OnionrUtils fully removed (but not fully bug free)

flow now uses daemon thread for displaying output
master
Kevin Froman 2019-06-25 18:07:35 -05:00
parent 909c002dc4
commit c7e06205b7
50 changed files with 280 additions and 330 deletions

View File

@ -23,11 +23,12 @@ from gevent import Timeout
import flask
from flask import request, Response, abort, send_from_directory
import core
import onionrutils, onionrexceptions, onionrcrypto, blockimporter, onionrevents as events, logger, config, onionrblockapi
import onionrexceptions, onionrcrypto, blockimporter, onionrevents as events, logger, config, onionrblockapi
import httpapi
from httpapi import friendsapi, profilesapi, configapi, miscpublicapi
from onionrservices import httpheaders
import onionr
from onionrutils import bytesconverter, stringvalidators, epoch, mnemonickeys
config.reload()
class FDSafeHandler(WSGIHandler):
@ -98,7 +99,7 @@ class PublicAPI:
resp = httpheaders.set_default_onionr_http_headers(resp)
# Network API version
resp.headers['X-API'] = onionr.API_VERSION
self.lastRequest = clientAPI._core._utils.getRoundedEpoch(roundS=5)
self.lastRequest = epoch.get_rounded_epoch(roundS=5)
return resp
@app.route('/')
@ -177,9 +178,8 @@ class API:
self.debug = debug
self._core = onionrInst.onionrCore
self.startTime = self._core._utils.getEpoch()
self.startTime = epoch.get_epoch()
self._crypto = onionrcrypto.OnionrCrypto(self._core)
self._utils = onionrutils.OnionrUtils(self._core)
app = flask.Flask(__name__)
bindPort = int(config.get('client.client.port', 59496))
self.bindPort = bindPort
@ -334,7 +334,7 @@ class API:
@app.route('/getblockbody/<name>')
def getBlockBodyData(name):
resp = ''
if self._core._utils.validateHash(name):
if stringvalidators.validate_hash(name):
try:
resp = onionrblockapi.Block(name, decrypt=True).bcontent
except TypeError:
@ -346,7 +346,7 @@ class API:
@app.route('/getblockdata/<name>')
def getData(name):
resp = ""
if self._core._utils.validateHash(name):
if stringvalidators.validate_hash(name):
if name in self._core.getBlockList():
try:
resp = self.getBlockData(name, decrypt=True)
@ -371,7 +371,7 @@ class API:
def site(name):
bHash = name
resp = 'Not Found'
if self._core._utils.validateHash(bHash):
if stringvalidators.validate_hash(bHash):
try:
resp = onionrblockapi.Block(bHash).bcontent
except onionrexceptions.NoDataAvailable:
@ -432,7 +432,7 @@ class API:
@app.route('/getHumanReadable/<name>')
def getHumanReadable(name):
return Response(self._core._utils.getHumanReadableID(name))
return Response(mnemonickeys.get_human_readable_ID(name))
@app.route('/insertblock', methods=['POST'])
def insertBlock():
@ -497,13 +497,13 @@ class API:
def getUptime(self):
while True:
try:
return self._utils.getEpoch() - self.startTime
return epoch.get_epoch() - self.startTime
except (AttributeError, NameError):
# Don't error on race condition with startup
pass
def getBlockData(self, bHash, decrypt=False, raw=False, headerOnly=False):
assert self._core._utils.validateHash(bHash)
assert stringvalidators.validate_hash(bHash)
bl = onionrblockapi.Block(bHash, core=self._core)
if decrypt:
bl.decrypt()
@ -520,8 +520,8 @@ class API:
pass
else:
validSig = False
signer = onionrutils.bytes_to_str(bl.signer)
if bl.isSigned() and onionrutils.stringvalidators.validate_pub_key(signer) and bl.isSigner(signer):
signer = bytesconverter.bytes_to_str(bl.signer)
if bl.isSigned() and stringvalidators.validate_pub_key(signer) and bl.isSigner(signer):
validSig = True
bl.bheader['validSig'] = validSig
bl.bheader['meta'] = ''

View File

@ -27,7 +27,7 @@ from communicatorutils import downloadblocks, lookupblocks, lookupadders
from communicatorutils import servicecreator, connectnewpeers, uploadblocks
from communicatorutils import daemonqueuehandler, announcenode, deniableinserts
from communicatorutils import cooldownpeer, housekeeping, netcheck
from onionrutils import localcommand
from onionrutils import localcommand, epoch, basicrequests
from etc import humanreadabletime
import onionrservices, onionr, onionrproofs
@ -91,7 +91,7 @@ class OnionrCommunicatorDaemon:
plugins.reload()
# time app started running for info/statistics purposes
self.startTime = self._core._utils.getEpoch()
self.startTime = epoch.get_epoch()
if developmentMode:
OnionrCommunicatorTimers(self, self.heartbeat, 30)
@ -310,9 +310,9 @@ class OnionrCommunicatorDaemon:
if len(data) > 0:
url += '&data=' + data
self._core.setAddressInfo(peer, 'lastConnectAttempt', self._core._utils.getEpoch()) # mark the time we're trying to request this peer
self._core.setAddressInfo(peer, 'lastConnectAttempt', epoch.get_epoch()) # mark the time we're trying to request this peer
retData = self._core._utils.doGetRequest(url, port=self.proxyPort)
retData = basicrequests.do_get_request(self._core, url, port=self.proxyPort)
# if request failed, (error), mark peer offline
if retData == False:
try:
@ -324,7 +324,7 @@ class OnionrCommunicatorDaemon:
except ValueError:
pass
else:
self._core.setAddressInfo(peer, 'lastConnect', self._core._utils.getEpoch())
self._core.setAddressInfo(peer, 'lastConnect', epoch.get_epoch())
self.getPeerProfileInstance(peer).addScore(1)
return retData # If returnHeaders, returns tuple of data, headers. if not, just data string
@ -341,7 +341,7 @@ class OnionrCommunicatorDaemon:
return retData
def getUptime(self):
return self._core._utils.getEpoch() - self.startTime
return epoch.get_epoch() - self.startTime
def heartbeat(self):
'''Show a heartbeat debug message'''

View File

@ -20,6 +20,7 @@
import base64
import onionrproofs, logger
from etc import onionrvalues
from onionrutils import basicrequests
def announce_node(daemon):
'''Announce our node to our peers'''
@ -75,8 +76,8 @@ def announce_node(daemon):
daemon.announceCache[peer] = data['random']
if not announceFail:
logger.info('Announcing node to ' + url)
if daemon._core._utils.doPostRequest(url, data) == 'Success':
logger.info('Successfully introduced node to ' + peer)
if basicrequests.do_post_request(daemon._core, url, data) == 'Success':
logger.info('Successfully introduced node to ' + peer, terminal=True)
retData = True
daemon._core.setAddressInfo(peer, 'introduced', 1)
daemon._core.setAddressInfo(peer, 'powValue', data['random'])

View File

@ -20,7 +20,7 @@
import time, sys
import onionrexceptions, logger, onionrpeers
from utils import networkmerger
from onionrutils import stringvalidators
from onionrutils import stringvalidators, epoch
# secrets module was added into standard lib in 3.6+
if sys.version_info[0] == 3 and sys.version_info[1] < 6:
from dependencies import secrets
@ -75,7 +75,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
if address not in comm_inst.onlinePeers:
logger.info('Connected to ' + address, terminal=True)
comm_inst.onlinePeers.append(address)
comm_inst.connectTimes[address] = comm_inst._core._utils.getEpoch()
comm_inst.connectTimes[address] = epoch.get_epoch()
retData = address
# add peer to profile list if they're not in it

View File

@ -17,6 +17,7 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from onionrutils import epoch
def cooldown_peer(comm_inst):
'''Randomly add an online peer to cooldown, so we can connect a new one'''
onlinePeerAmount = len(comm_inst.onlinePeers)
@ -28,7 +29,7 @@ def cooldown_peer(comm_inst):
# Remove peers from cooldown that have been there long enough
tempCooldown = dict(comm_inst.cooldownPeer)
for peer in tempCooldown:
if (comm_inst._core._utils.getEpoch() - tempCooldown[peer]) >= cooldownTime:
if (epoch.get_epoch() - tempCooldown[peer]) >= cooldownTime:
del comm_inst.cooldownPeer[peer]
# Cool down a peer, if we have max connections alive for long enough
@ -38,7 +39,7 @@ def cooldown_peer(comm_inst):
while finding:
try:
toCool = min(tempConnectTimes, key=tempConnectTimes.get)
if (comm_inst._core._utils.getEpoch() - tempConnectTimes[toCool]) < minTime:
if (epoch.get_epoch() - tempConnectTimes[toCool]) < minTime:
del tempConnectTimes[toCool]
else:
finding = False
@ -46,6 +47,6 @@ def cooldown_peer(comm_inst):
break
else:
comm_inst.removeOnlinePeer(toCool)
comm_inst.cooldownPeer[toCool] = comm_inst._core._utils.getEpoch()
comm_inst.cooldownPeer[toCool] = epoch.get_epoch()
comm_inst.decrementThreadCount('cooldown_peer')

View File

@ -19,7 +19,7 @@
'''
import communicator, onionrexceptions
import logger, onionrpeers
from onionrutils import blockmetadata
from onionrutils import blockmetadata, stringvalidators, validatemetadata
def download_blocks_from_communicator(comm_inst):
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
@ -48,7 +48,7 @@ def download_blocks_from_communicator(comm_inst):
continue
if comm_inst._core._blacklist.inBlacklist(blockHash):
continue
if comm_inst._core._utils.storageCounter.isFull():
if comm_inst._core.storage_counter.isFull():
break
comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
if len(blockPeers) == 0:
@ -75,7 +75,7 @@ def download_blocks_from_communicator(comm_inst):
content = content.decode() # decode here because sha3Hash needs bytes above
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0]
if comm_inst._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
if validatemetadata.validate_metadata(comm_inist._core, metadata, metas[2]): # check if metadata is valid, and verify nonce
if comm_inst._core._crypto.verifyPow(content): # check if POW is enough/correct
logger.info('Attempting to save block %s...' % blockHash[:12])
try:

View File

@ -20,6 +20,7 @@
import sqlite3
import logger
from onionrusers import onionrusers
from onionrutils import epoch
def clean_old_blocks(comm_inst):
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
@ -29,7 +30,7 @@ def clean_old_blocks(comm_inst):
comm_inst._core.removeBlock(bHash)
logger.info('Deleted block: %s' % (bHash,))
while comm_inst._core._utils.storageCounter.isFull():
while comm_inst._core.storage_counter.isFull():
oldest = comm_inst._core.getBlockList()[0]
comm_inst._core._blacklist.addToDB(oldest)
comm_inst._core.removeBlock(oldest)
@ -41,7 +42,7 @@ def clean_keys(comm_inst):
'''Delete expired forward secrecy keys'''
conn = sqlite3.connect(comm_inst._core.peerDB, timeout=10)
c = conn.cursor()
time = comm_inst._core._utils.getEpoch()
time = epoch.get_epoch()
deleteKeys = []
for entry in c.execute("SELECT * FROM forwardKeys WHERE expire <= ?", (time,)):

View File

@ -18,6 +18,8 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import logger, onionrproofs
from onionrutils import stringvalidators, epoch
def lookup_blocks_from_communicator(comm_inst):
logger.info('Looking up new blocks...')
tryAmount = 2
@ -34,7 +36,7 @@ def lookup_blocks_from_communicator(comm_inst):
if not comm_inst.isOnline:
break
# check if disk allocation is used
if comm_inst._core._utils.storageCounter.isFull():
if comm_inst._core.storage_counter.isFull():
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
break
peer = comm_inst.pickOnlinePeer() # select random online peer
@ -60,11 +62,11 @@ def lookup_blocks_from_communicator(comm_inst):
logger.warn('Could not get new blocks from %s.' % peer, error = error)
newBlocks = False
else:
comm_inst.dbTimestamps[peer] = comm_inst._core._utils.getRoundedEpoch(roundS=60)
comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60)
if newBlocks != False:
# if request was a success
for i in newBlocks.split('\n'):
if comm_inst._core._utils.validateHash(i):
if stringvalidators.validate_hash(i):
# if newline seperated string is valid hash
if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue

View File

@ -20,14 +20,14 @@
'''
import logger
from utils import netutils
from onionrutils import localcommand
from onionrutils import localcommand, epoch
def net_check(comm_inst):
'''Check if we are connected to the internet or not when we can't connect to any peers'''
rec = False # for detecting if we have received incoming connections recently
c = comm_inst._core
if len(comm_inst.onlinePeers) == 0:
try:
if (c._utils.getEpoch() - int(localcommand.local_command(c, '/lastconnect'))) <= 60:
if (epoch.get_epoch() - int(localcommand.local_command(c, '/lastconnect'))) <= 60:
comm_inst.isOnline = True
rec = True
except ValueError:

View File

@ -20,16 +20,17 @@
import logger
from communicatorutils import proxypicker
import onionrblockapi as block
from onionrutils import localcommand
from onionrutils import localcommand, stringvalidators, basicrequests
def upload_blocks_from_communicator(comm_inst):
# when inserting a block, we try to upload it to a few peers to add some deniability
triedPeers = []
finishedUploads = []
comm_inst.blocksToUpload = comm_inst._core._crypto.randomShuffle(comm_inst.blocksToUpload)
core = comm_inst._core
comm_inst.blocksToUpload = core._crypto.randomShuffle(comm_inst.blocksToUpload)
if len(comm_inst.blocksToUpload) != 0:
for bl in comm_inst.blocksToUpload:
if not comm_inst._core._utils.validateHash(bl):
if not stringvalidators.validate_hash(bl):
logger.warn('Requested to upload invalid block')
comm_inst.decrementThreadCount('uploadBlock')
return
@ -42,8 +43,8 @@ def upload_blocks_from_communicator(comm_inst):
data = {'block': block.Block(bl).getRaw()}
proxyType = proxypicker.pick_proxy(peer)
logger.info("Uploading block to " + peer)
if not comm_inst._core._utils.doPostRequest(url, data=data, proxyType=proxyType) == False:
localcommand.local_command(comm_inst._core, 'waitforshare/' + bl, post=True)
if not basicrequests.do_post_request(core, url, data=data, proxyType=proxyType) == False:
localcommand.local_command(core, 'waitforshare/' + bl, post=True)
finishedUploads.append(bl)
for x in finishedUploads:
try:

View File

@ -30,6 +30,7 @@ import dbcreator, onionrstorage, serializeddata, subprocesspow
from etc import onionrvalues, powchoice
from onionrutils import localcommand, stringvalidators, bytesconverter, epoch
from onionrutils import blockmetadata
import storagecounter
class Core:
def __init__(self, torPort=0):
@ -41,76 +42,76 @@ class Core:
if not self.dataDir.endswith('/'):
self.dataDir += '/'
try:
self.onionrInst = None
self.queueDB = self.dataDir + 'queue.db'
self.peerDB = self.dataDir + 'peers.db'
self.blockDB = self.dataDir + 'blocks.db'
self.blockDataLocation = self.dataDir + 'blocks/'
self.blockDataDB = self.blockDataLocation + 'block-data.db'
self.publicApiHostFile = self.dataDir + 'public-host.txt'
self.privateApiHostFile = self.dataDir + 'private-host.txt'
self.addressDB = self.dataDir + 'address.db'
self.hsAddress = ''
self.i2pAddress = config.get('i2p.own_addr', None)
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
self.bootstrapList = []
self.requirements = onionrvalues.OnionrValues()
self.torPort = torPort
self.dataNonceFile = self.dataDir + 'block-nonces.dat'
self.dbCreate = dbcreator.DBCreator(self)
self.forwardKeysFile = self.dataDir + 'forward-keys.db'
self.keyStore = simplekv.DeadSimpleKV(self.dataDir + 'cachedstorage.dat', refresh_seconds=5)
# Socket data, defined here because of multithreading constraints with gevent
self.killSockets = False
self.startSocket = {}
self.socketServerConnData = {}
self.socketReasons = {}
self.socketServerResponseData = {}
#try:
self.usageFile = self.dataDir + 'disk-usage.txt'
self.config = config
self.maxBlockSize = 10000000 # max block size in bytes
self.usageFile = self.dataDir + 'disk-usage.txt'
self.config = config
self.onionrInst = None
self.queueDB = self.dataDir + 'queue.db'
self.peerDB = self.dataDir + 'peers.db'
self.blockDB = self.dataDir + 'blocks.db'
self.blockDataLocation = self.dataDir + 'blocks/'
self.blockDataDB = self.blockDataLocation + 'block-data.db'
self.publicApiHostFile = self.dataDir + 'public-host.txt'
self.privateApiHostFile = self.dataDir + 'private-host.txt'
self.addressDB = self.dataDir + 'address.db'
self.hsAddress = ''
self.i2pAddress = config.get('i2p.own_addr', None)
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
self.bootstrapList = []
self.requirements = onionrvalues.OnionrValues()
self.torPort = torPort
self.dataNonceFile = self.dataDir + 'block-nonces.dat'
self.dbCreate = dbcreator.DBCreator(self)
self.forwardKeysFile = self.dataDir + 'forward-keys.db'
self.keyStore = simplekv.DeadSimpleKV(self.dataDir + 'cachedstorage.dat', refresh_seconds=5)
self.storage_counter = storagecounter.StorageCounter(self)
# Socket data, defined here because of multithreading constraints with gevent
self.killSockets = False
self.startSocket = {}
self.socketServerConnData = {}
self.socketReasons = {}
self.socketServerResponseData = {}
self.maxBlockSize = 10000000 # max block size in bytes
if not os.path.exists(self.dataDir):
os.mkdir(self.dataDir)
if not os.path.exists(self.dataDir + 'blocks/'):
os.mkdir(self.dataDir + 'blocks/')
if not os.path.exists(self.blockDB):
self.createBlockDB()
if not os.path.exists(self.forwardKeysFile):
self.dbCreate.createForwardKeyDB()
if not os.path.exists(self.peerDB):
self.createPeerDB()
if not os.path.exists(self.addressDB):
self.createAddressDB()
if not os.path.exists(self.dataDir):
os.mkdir(self.dataDir)
if not os.path.exists(self.dataDir + 'blocks/'):
os.mkdir(self.dataDir + 'blocks/')
if not os.path.exists(self.blockDB):
self.createBlockDB()
if not os.path.exists(self.forwardKeysFile):
self.dbCreate.createForwardKeyDB()
if not os.path.exists(self.peerDB):
self.createPeerDB()
if not os.path.exists(self.addressDB):
self.createAddressDB()
if os.path.exists(self.dataDir + '/hs/hostname'):
with open(self.dataDir + '/hs/hostname', 'r') as hs:
self.hsAddress = hs.read().strip()
if os.path.exists(self.dataDir + '/hs/hostname'):
with open(self.dataDir + '/hs/hostname', 'r') as hs:
self.hsAddress = hs.read().strip()
# Load bootstrap address list
if os.path.exists(self.bootstrapFileLocation):
with open(self.bootstrapFileLocation, 'r') as bootstrap:
bootstrap = bootstrap.read()
for i in bootstrap.split('\n'):
self.bootstrapList.append(i)
else:
logger.warn('Warning: address bootstrap file not found ' + self.bootstrapFileLocation)
# Load bootstrap address list
if os.path.exists(self.bootstrapFileLocation):
with open(self.bootstrapFileLocation, 'r') as bootstrap:
bootstrap = bootstrap.read()
for i in bootstrap.split('\n'):
self.bootstrapList.append(i)
else:
logger.warn('Warning: address bootstrap file not found ' + self.bootstrapFileLocation)
self.use_subprocess = powchoice.use_subprocess(self)
# Initialize the crypto object
self._crypto = onionrcrypto.OnionrCrypto(self)
self._blacklist = onionrblacklist.OnionrBlackList(self)
self.serializer = serializeddata.SerializedData(self)
self.use_subprocess = powchoice.use_subprocess(self)
self._utils = onionrutils.OnionrUtils(self)
# Initialize the crypto object
self._crypto = onionrcrypto.OnionrCrypto(self)
self._blacklist = onionrblacklist.OnionrBlackList(self)
self.serializer = serializeddata.SerializedData(self)
except Exception as error:
logger.error('Failed to initialize core Onionr library.', error=error)
logger.fatal('Cannot recover from error.')
sys.exit(1)
# except Exception as error:
# print(str(error))
# logger.error('Failed to initialize core Onionr library.', error=error, terminal=True)
# logger.fatal('Cannot recover from error.', terminal=True)
# sys.exit(1)
return
def refreshFirstStartVars(self):
@ -313,7 +314,7 @@ class Core:
encryptType must be specified to encrypt a block
'''
allocationReachedMessage = 'Cannot insert block, disk allocation reached.'
if self._utils.storageCounter.isFull():
if self.storage_counter.isFull():
logger.error(allocationReachedMessage)
return False
retData = False
@ -439,7 +440,7 @@ class Core:
localcommand.local_command(self, '/waitforshare/' + retData, post=True, maxWait=5)
self.daemonQueueAdd('uploadBlock', retData)
self.addToBlockDB(retData, selfInsert=True, dataSaved=True)
blockmetadata.process_block_metadata(retData)
blockmetadata.process_block_metadata(self, retData)
if retData != False:
if plaintextPeer == onionrvalues.DENIABLE_PEER_ADDRESS:

View File

@ -1,5 +1,5 @@
import os, sqlite3
import onionrutils
from onionrutils import epoch, blockmetadata
def add_to_block_DB(core_inst, newHash, selfInsert=False, dataSaved=False):
'''
Add a hash value to the block db
@ -9,11 +9,11 @@ def add_to_block_DB(core_inst, newHash, selfInsert=False, dataSaved=False):
if not os.path.exists(core_inst.blockDB):
raise Exception('Block db does not exist')
if onionrutils.has_block(core_inst, newHash):
if blockmetadata.has_block(core_inst, newHash):
return
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
c = conn.cursor()
currentTime = core_inst._utils.getEpoch() + core_inst._crypto.secrets.randbelow(301)
currentTime = epoch.get_epoch() + core_inst._crypto.secrets.randbelow(301)
if selfInsert or dataSaved:
selfInsert = 1
else:

View File

@ -1,9 +1,10 @@
import sqlite3
from onionrutils import epoch
def get_expired_blocks(core_inst):
'''Returns a list of expired blocks'''
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
c = conn.cursor()
date = int(core_inst._utils.getEpoch())
date = int(epoch.get_epoch())
execute = 'SELECT hash FROM hashes WHERE expire <= %s ORDER BY dateReceived;' % (date,)

View File

@ -1,6 +1,6 @@
import sqlite3, os
import onionrevents as events
from onionrutils import localcommand
from onionrutils import localcommand, epoch
def daemon_queue(core_inst):
'''
@ -38,7 +38,7 @@ def daemon_queue_add(core_inst, command, data='', responseID=''):
retData = True
date = core_inst._utils.getEpoch()
date = epoch.get_epoch()
conn = sqlite3.connect(core_inst.queueDB, timeout=30)
c = conn.cursor()
t = (command, data, date, responseID)

View File

@ -1,5 +1,6 @@
import sqlite3
import logger
from onionrutils import epoch
def list_peers(core_inst, randomOrder=True, getPow=False, trust=0):
'''
Return a list of public keys (misleading function name)
@ -56,7 +57,7 @@ def list_adders(core_inst, randomOrder=True, i2p=True, recent=0):
testList = list(addressList) # create new list to iterate
for address in testList:
try:
if recent > 0 and (core_inst._utils.getEpoch() - core_inst.getAddressInfo(address, 'lastConnect')) > recent:
if recent > 0 and (epoch.get_epoch() - core_inst.getAddressInfo(address, 'lastConnect')) > recent:
raise TypeError # If there is no last-connected date or it was too long ago, don't add peer to list if recent is not 0
except TypeError:
addressList.remove(address)

View File

@ -18,7 +18,8 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from flask import Response, abort
import config, onionrutils
import config
from onionrutils import bytesconverter, stringvalidators
def get_public_block_list(clientAPI, publicAPI, request):
# Provide a list of our blocks, with a date offset
dateAdjust = request.args.get('date')
@ -33,7 +34,7 @@ def get_public_block_list(clientAPI, publicAPI, request):
def get_block_data(clientAPI, publicAPI, data):
'''data is the block hash in hex'''
resp = ''
if clientAPI._utils.validateHash(data):
if stringvalidators.validate_hash(data):
if not clientAPI._core.config.get('general.hide_created_blocks', True) or data not in publicAPI.hideBlocks:
if data in clientAPI._core.getBlockList():
block = clientAPI.getBlockData(data, raw=True)
@ -41,7 +42,7 @@ def get_block_data(clientAPI, publicAPI, data):
block = block.encode() # Encode in case data is binary
except AttributeError:
abort(404)
block = onionrutils.str_to_bytes(block)
block = bytesconverter.str_to_bytes(block)
resp = block
if len(resp) == 0:
abort(404)

View File

@ -17,20 +17,20 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from onionrutils import bytesconverter
import onionrcrypto
class KeyManager:
def __init__(self, crypto):
assert isinstance(crypto, onionrcrypto.OnionrCrypto)
self._core = crypto._core
self._utils = self._core._utils
self.keyFile = crypto._keyFile
self.crypto = crypto
def addKey(self, pubKey=None, privKey=None):
if type(pubKey) is type(None) and type(privKey) is type(None):
pubKey, privKey = self.crypto.generatePubKey()
pubKey = self.crypto._core._utils.bytesToStr(pubKey)
privKey = self.crypto._core._utils.bytesToStr(privKey)
pubKey = bytesconverter.bytes_to_str(pubKey)
privKey = bytesconverter.bytes_to_str(privKey)
try:
if pubKey in self.getPubkeyList():
raise ValueError('Pubkey already in list: %s' % (pubKey,))

View File

@ -32,7 +32,6 @@ if sys.version_info[0] == 2 or sys.version_info[1] < MIN_PY_VERSION:
import os, base64, random, shutil, time, platform, signal
from threading import Thread
import api, core, config, logger, onionrplugins as plugins, onionrevents as events
import onionrutils
import netcontroller
from netcontroller import NetController
from onionrblockapi import Block
@ -51,6 +50,7 @@ class Onionr:
Main Onionr class. This is for the CLI program, and does not handle much of the logic.
In general, external programs and plugins should not use this class.
'''
self.API_VERSION = API_VERSION
self.userRunDir = os.getcwd() # Directory user runs the program from
self.killed = False

View File

@ -18,6 +18,7 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import sqlite3, os, logger
from onionrutils import epoch, bytesconverter
class OnionrBlackList:
def __init__(self, coreInst):
self.blacklistDB = coreInst.dataDir + 'blacklist.db'
@ -28,7 +29,7 @@ class OnionrBlackList:
return
def inBlacklist(self, data):
hashed = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(data))
hashed = bytesconverter.bytes_to_str(self._core._crypto.sha3Hash(data))
retData = False
if not hashed.isalnum():
@ -56,7 +57,7 @@ class OnionrBlackList:
def deleteExpired(self, dataType=0):
'''Delete expired entries'''
deleteList = []
curTime = self._core._utils.getEpoch()
curTime = epoch.get_epoch()
try:
int(dataType)
@ -98,7 +99,7 @@ class OnionrBlackList:
2=pubkey
'''
# we hash the data so we can remove data entirely from our node's disk
hashed = self._core._utils.bytesToStr(self._core._crypto.sha3Hash(data))
hashed = bytesconverter.bytes_to_str(self._core._crypto.sha3Hash(data))
if len(hashed) > 64:
raise Exception("Hashed data is too large")
@ -115,7 +116,7 @@ class OnionrBlackList:
if self.inBlacklist(hashed):
return
insert = (hashed,)
blacklistDate = self._core._utils.getEpoch()
blacklistDate = epoch.get_epoch()
try:
self._dbExecute("INSERT INTO blacklist (hash, dataType, blacklistDate, expire) VALUES(?, ?, ?, ?);", (str(hashed), dataType, blacklistDate, expire))
except sqlite3.IntegrityError:

View File

@ -21,7 +21,7 @@
import core as onionrcore, logger, config, onionrexceptions, nacl.exceptions
import json, os, sys, datetime, base64, onionrstorage
from onionrusers import onionrusers
from onionrutils import stringvalidators
from onionrutils import stringvalidators, epoch
class Block:
blockCacheOrder = list() # NEVER write your own code that writes to this!
@ -89,7 +89,7 @@ class Block:
# Check for replay attacks
try:
if self.core._utils.getEpoch() - self.core.getBlockDate(self.hash) < 60:
if epoch.get_epoch() - self.core.getBlockDate(self.hash) < 60:
assert self.core._crypto.replayTimestampValidation(self.bmetadata['rply'])
except (AssertionError, KeyError, TypeError) as e:
if not self.bypassReplayCheck:

View File

@ -19,12 +19,13 @@
'''
import sys
import logger
from onionrutils import stringvalidators
def ban_block(o_inst):
try:
ban = sys.argv[2]
except IndexError:
ban = logger.readline('Enter a block hash:')
if o_inst.onionrUtils.validateHash(ban):
if stringvalidators.validate_hash(ban):
if not o_inst.onionrCore._blacklist.inBlacklist(ban):
try:
o_inst.onionrCore._blacklist.addToDB(ban)

View File

@ -19,6 +19,7 @@
'''
import sys, os
import logger, onionrstorage
from onionrutils import stringvalidators
def doExport(o_inst, bHash):
exportDir = o_inst.dataDir + 'block-export/'
if not os.path.exists(exportDir):
@ -34,7 +35,7 @@ def doExport(o_inst, bHash):
def export_block(o_inst):
exportDir = o_inst.dataDir + 'block-export/'
try:
assert o_inst.onionrUtils.validateHash(sys.argv[2])
assert stringvalidators.validate_hash(sys.argv[2])
except (IndexError, AssertionError):
logger.error('No valid block hash specified.', terminal=True)
sys.exit(1)

View File

@ -21,6 +21,7 @@
import base64, sys, os
import logger
from onionrblockapi import Block
from onionrutils import stringvalidators
def add_file(o_inst, singleBlock=False, blockType='bin'):
'''
Adds a file to the onionr network
@ -60,7 +61,7 @@ def getFile(o_inst):
if os.path.exists(fileName):
logger.error("File already exists", terminal=True)
return
if not o_inst.onionrUtils.validateHash(bHash):
if not stringvalidators.validate_hash(bHash):
logger.error('Block hash is invalid', terminal=True)
return

View File

@ -21,7 +21,7 @@ import os, binascii, base64, hashlib, time, sys, hmac, secrets
import nacl.signing, nacl.encoding, nacl.public, nacl.hash, nacl.pwhash, nacl.utils, nacl.secret
import unpaddedbase32
import logger, onionrproofs
from onionrutils import stringvalidators
from onionrutils import stringvalidators, epoch, bytesconverter
import onionrexceptions, keymanager, core, onionrutils
import config
config.reload()
@ -95,10 +95,10 @@ class OnionrCrypto:
def pubKeyEncrypt(self, data, pubkey, encodedData=False):
'''Encrypt to a public key (Curve25519, taken from base32 Ed25519 pubkey)'''
pubkey = unpaddedbase32.repad(onionrutils.str_to_bytes(pubkey))
pubkey = unpaddedbase32.repad(bytesconverter.str_to_bytes(pubkey))
retVal = ''
box = None
data = onionrutils.str_to_bytes(data)
data = bytesconverter.str_to_bytes(data)
pubkey = nacl.signing.VerifyKey(pubkey, encoder=nacl.encoding.Base32Encoder()).to_curve25519_public_key()
@ -182,7 +182,7 @@ class OnionrCrypto:
def generateDeterministic(self, passphrase, bypassCheck=False):
'''Generate a Ed25519 public key pair from a password'''
passStrength = self.deterministicRequirement
passphrase = onionrutils.str_to_bytes(passphrase) # Convert to bytes if not already
passphrase = bytesconverter.str_to_bytes(passphrase) # Convert to bytes if not already
# Validate passphrase length
if not bypassCheck:
if len(passphrase) < passStrength:
@ -202,7 +202,7 @@ class OnionrCrypto:
if pubkey == '':
pubkey = self.pubKey
prev = ''
pubkey = onionrutils.str_to_bytes(pubkey)
pubkey = bytesconverter.str_to_bytes(pubkey)
for i in range(self.HASH_ID_ROUNDS):
try:
prev = prev.encode()
@ -266,7 +266,7 @@ class OnionrCrypto:
@staticmethod
def replayTimestampValidation(timestamp):
if core.Core()._utils.getEpoch() - int(timestamp) > 2419200:
if epoch.get_epoch() - int(timestamp) > 2419200:
return False
else:
return True

View File

@ -19,6 +19,7 @@
'''
import sqlite3
import core, config, logger
from onionrutils import epoch
config.reload()
class PeerProfiles:
'''
@ -106,7 +107,7 @@ def peerCleanup(coreInst):
if PeerProfiles(address, coreInst).score < minScore:
coreInst.removeAddress(address)
try:
if (int(coreInst._utils.getEpoch()) - int(coreInst.getPeerInfo(address, 'dateSeen'))) >= 600:
if (int(epoch.get_epoch()) - int(coreInst.getPeerInfo(address, 'dateSeen'))) >= 600:
expireTime = 600
else:
expireTime = 86400

View File

@ -170,9 +170,6 @@ class pluginapi:
def get_core(self):
return self.core
def get_utils(self):
return self.get_core()._utils
def get_crypto(self):
return self.get_core()._crypto

View File

@ -30,10 +30,7 @@ def getDifficultyModifier(coreOrUtilsInst=None):
'''
classInst = coreOrUtilsInst
retData = 0
if isinstance(classInst, core.Core):
useFunc = classInst._utils.storageCounter.getPercent
else:
useFunc = core.Core()._utils.storageCounter.getPercent
useFunc = classInst.storage_counter.getPercent
percentUse = useFunc()

View File

@ -21,7 +21,7 @@ import time
import stem
import core
from . import connectionserver, bootstrapservice
from onionrutils import stringvalidators
from onionrutils import stringvalidators, basicrequests
class OnionrServices:
'''
@ -47,7 +47,7 @@ class OnionrServices:
base_url = 'http://%s/' % (address,)
socks = self._core.config.get('tor.socksport')
for x in range(BOOTSTRAP_TRIES):
if self._core._utils.doGetRequest(base_url + 'ping', port=socks, ignoreAPI=True) == 'pong!':
if basicrequests.do_get_request(self._core, base_url + 'ping', port=socks, ignoreAPI=True) == 'pong!':
# if bootstrap sever is online, tell them our service address
connectionserver.ConnectionServer(peer, address, core_inst=self._core)
else:

View File

@ -24,7 +24,7 @@ from flask import Flask, Response
import core
from netcontroller import getOpenPort
from . import httpheaders
from onionrutils import stringvalidators
from onionrutils import stringvalidators, epoch
def bootstrap_client_service(peer, core_inst=None, bootstrap_timeout=300):
'''
@ -77,7 +77,7 @@ def bootstrap_client_service(peer, core_inst=None, bootstrap_timeout=300):
# Create the v3 onion service
response = controller.create_ephemeral_hidden_service({80: bootstrap_port}, key_type = 'NEW', key_content = 'ED25519-V3', await_publication = True)
core_inst.insertBlock(response.service_id, header='con', sign=True, encryptType='asym',
asymPeer=peer, disableForward=True, expire=(core_inst._utils.getEpoch() + bootstrap_timeout))
asymPeer=peer, disableForward=True, expire=(epoch.get_epoch() + bootstrap_timeout))
# Run the bootstrap server
try:
http_server.serve_forever()

View File

@ -24,7 +24,7 @@ import core, logger, httpapi
import onionrexceptions
from netcontroller import getOpenPort
import api
from onionrutils import stringvalidators
from onionrutils import stringvalidators, basicrequests
from . import httpheaders
class ConnectionServer:
@ -72,7 +72,7 @@ class ConnectionServer:
try:
for x in range(3):
attempt = self.core_inst._utils.doPostRequest('http://' + address + '/bs/' + response.service_id, port=socks)
attempt = basicrequests.do_post_request(self.core_inst, 'http://' + address + '/bs/' + response.service_id, port=socks)
if attempt == 'success':
break
else:

View File

@ -18,7 +18,7 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import core, sys, sqlite3, os, dbcreator, onionrexceptions
from onionrutils import bytesconverter
from onionrutils import bytesconverter, stringvalidators
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
@ -66,7 +66,7 @@ def deleteBlock(coreInst, blockHash):
def store(coreInst, data, blockHash=''):
assert isinstance(coreInst, core.Core)
assert coreInst._utils.validateHash(blockHash)
assert stringvalidators.validate_hash(blockHash)
ourHash = coreInst._crypto.sha3Hash(data)
if blockHash != '':
assert ourHash == blockHash
@ -81,7 +81,7 @@ def store(coreInst, data, blockHash=''):
def getData(coreInst, bHash):
assert isinstance(coreInst, core.Core)
assert coreInst._utils.validateHash(bHash)
assert stringvalidators.validate_hash(bHash)
bHash = bytesconverter.bytes_to_str(bHash)

View File

@ -1,5 +1,6 @@
import sys, sqlite3
import onionrexceptions, onionrstorage
from onionrutils import stringvalidators
def remove_block(core_inst, block):
'''
remove a block from this node (does not automatically blacklist)
@ -7,7 +8,7 @@ def remove_block(core_inst, block):
**You may want blacklist.addToDB(blockHash)
'''
if core_inst._utils.validateHash(block):
if stringvalidators.validate_hash(block):
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
c = conn.cursor()
t = (block,)
@ -15,6 +16,6 @@ def remove_block(core_inst, block):
conn.commit()
conn.close()
dataSize = sys.getsizeof(onionrstorage.getData(core_inst, block))
core_inst._utils.storageCounter.removeBytes(dataSize)
core_inst.storage_counter.removeBytes(dataSize)
else:
raise onionrexceptions.InvalidHexHash

View File

@ -19,7 +19,7 @@ def set_data(core_inst, data):
try:
onionrstorage.getData(core_inst, dataHash)
except onionrexceptions.NoDataAvailable:
if core_inst._utils.storageCounter.addBytes(dataSize) != False:
if core_inst.storage_counter.addBytes(dataSize) != False:
onionrstorage.store(core_inst, data, blockHash=dataHash)
conn = sqlite3.connect(core_inst.blockDB, timeout=30)
c = conn.cursor()

View File

@ -20,7 +20,7 @@
import os, json, onionrexceptions
import unpaddedbase32
from onionrusers import onionrusers
from onionrutils import bytesconverter
from onionrutils import bytesconverter, epoch
class ContactManager(onionrusers.OnionrUser):
def __init__(self, coreInst, publicKey, saveUser=False, recordExpireSeconds=5):
@ -42,7 +42,7 @@ class ContactManager(onionrusers.OnionrUser):
dataFile.write(data)
def _loadData(self):
self.lastRead = self._core._utils.getEpoch()
self.lastRead = epoch.get_epoch()
retData = {}
if os.path.exists(self.dataFile):
with open(self.dataFile, 'r') as dataFile:
@ -62,7 +62,7 @@ class ContactManager(onionrusers.OnionrUser):
if self.deleted:
raise onionrexceptions.ContactDeleted
if (self._core._utils.getEpoch() - self.lastRead >= self.recordExpire) or forceReload:
if (epoch.get_epoch() - self.lastRead >= self.recordExpire) or forceReload:
self.data = self._loadData()
try:
return self.data[key]

View File

@ -18,8 +18,7 @@
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import logger, onionrexceptions, json, sqlite3, time
from onionrutils import stringvalidators, bytesconverter
from onionrutils import stringvalidators, bytesconverter, epoch
import unpaddedbase32
import nacl.exceptions
@ -28,7 +27,7 @@ def deleteExpiredKeys(coreInst):
conn = sqlite3.connect(coreInst.forwardKeysFile, timeout=10)
c = conn.cursor()
curTime = coreInst._utils.getEpoch()
curTime = epoch.get_epoch()
c.execute("DELETE from myForwardKeys where expire <= ?", (curTime,))
conn.commit()
conn.execute("VACUUM")
@ -40,7 +39,7 @@ def deleteTheirExpiredKeys(coreInst, pubkey):
c = conn.cursor()
# Prepare the insert
command = (pubkey, coreInst._utils.getEpoch())
command = (pubkey, epoch.get_epoch())
c.execute("DELETE from forwardKeys where peerKey = ? and expire <= ?", command)
@ -160,10 +159,10 @@ class OnionrUser:
conn = sqlite3.connect(self._core.forwardKeysFile, timeout=10)
c = conn.cursor()
# Prepare the insert
time = self._core._utils.getEpoch()
time = epoch.get_epoch()
newKeys = self._core._crypto.generatePubKey()
newPub = self._core._utils.bytesToStr(newKeys[0])
newPriv = self._core._utils.bytesToStr(newKeys[1])
newPub = bytesconverter.bytes_to_str(newKeys[0])
newPriv = bytesconverter.bytes_to_str(newKeys[1])
command = (self.publicKey, newPub, newPriv, time, expire + time)
@ -178,7 +177,7 @@ class OnionrUser:
conn = sqlite3.connect(self._core.forwardKeysFile, timeout=10)
c = conn.cursor()
pubkey = self.publicKey
pubkey = self._core._utils.bytesToStr(pubkey)
pubkey = bytesconverter.bytes_to_str(pubkey)
command = (pubkey,)
keyList = [] # list of tuples containing pub, private for peer
@ -192,7 +191,7 @@ class OnionrUser:
return list(keyList)
def addForwardKey(self, newKey, expire=DEFAULT_KEY_EXPIRE):
newKey = self._core._utils.bytesToStr(unpaddedbase32.repad(bytesconverter.str_to_bytes(newKey)))
newKey = bytesconverter.bytes_to_str(unpaddedbase32.repad(bytesconverter.str_to_bytes(newKey)))
if not stringvalidators.validate_pub_key(newKey):
# Do not add if something went wrong with the key
raise onionrexceptions.InvalidPubkey(newKey)
@ -201,7 +200,7 @@ class OnionrUser:
c = conn.cursor()
# Get the time we're inserting the key at
timeInsert = self._core._utils.getEpoch()
timeInsert = epoch.get_epoch()
# Look at our current keys for duplicate key data or time
for entry in self._getForwardKeys():

View File

@ -1,120 +0,0 @@
'''
Onionr - Private P2P Communication
OnionrUtils offers various useful functions to Onionr. Relatively misc.
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
# Misc functions that do not fit in the main api, but are useful
import sys, os, sqlite3, binascii, time, base64, json, glob, shutil, math, re, urllib.parse, string
import requests
import nacl.signing, nacl.encoding
import unpaddedbase32
import onionrexceptions, config, logger
import onionrevents
import storagecounter
from etc import pgpwords, onionrvalues
from . import localcommand, blockmetadata, basicrequests, validatemetadata
from . import stringvalidators
config.reload()
class OnionrUtils:
'''
Various useful functions for validating things, etc functions, connectivity
'''
def __init__(self, coreInstance):
#self.fingerprintFile = 'data/own-fingerprint.txt' #TODO Remove since probably not needed
self._core = coreInstance # onionr core instance
self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions
self.peerProcessing = {} # dict of current peer actions: peer, actionList
self.storageCounter = storagecounter.StorageCounter(self._core) # used to keep track of how much data onionr is using on disk
return
def escapeAnsi(self, line):
'''
Remove ANSI escape codes from a string with regex
taken or adapted from: https://stackoverflow.com/a/38662876 by user https://stackoverflow.com/users/802365/%c3%89douard-lopez
cc-by-sa-3 license https://creativecommons.org/licenses/by-sa/3.0/
'''
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', line)
def validateHash(self, data, length=64):
'''
Validate if a string is a valid hash hex digest (does not compare, just checks length and charset)
'''
return stringvalidators.validate_hash(self, data, length)
def getEpoch(self):
'''returns epoch'''
return math.floor(time.time())
def doPostRequest(self, url, data={}, port=0, proxyType='tor'):
'''
Do a POST request through a local tor or i2p instance
'''
return basicrequests.do_post_request(self, url, data, port, proxyType)
def doGetRequest(self, url, port=0, proxyType='tor', ignoreAPI=False, returnHeaders=False):
'''
Do a get request through a local tor or i2p instance
'''
return basicrequests.do_get_request(self, url, port, proxyType, ignoreAPI, returnHeaders)
def size(path='.'):
'''
Returns the size of a folder's contents in bytes
'''
total = 0
if os.path.exists(path):
if os.path.isfile(path):
total = os.path.getsize(path)
else:
for entry in os.scandir(path):
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += size(entry.path)
return total
def humanSize(num, suffix='B'):
'''
Converts from bytes to a human readable format.
'''
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def has_block(core_inst, hash):
'''
Check for new block in the list
'''
conn = sqlite3.connect(core_inst.blockDB)
c = conn.cursor()
if not stringvalidators.validate_hash(hash):
raise Exception("Invalid hash")
for result in c.execute("SELECT COUNT() FROM hashes WHERE hash = ?", (hash,)):
if result[0] >= 1:
conn.commit()
conn.close()
return True
else:
conn.commit()
conn.close()
return False

View File

@ -1,12 +1,12 @@
import requests
import logger, onionrexceptions
def do_post_request(utils_inst, url, data={}, port=0, proxyType='tor'):
def do_post_request(core_inst, url, data={}, port=0, proxyType='tor'):
'''
Do a POST request through a local tor or i2p instance
'''
if proxyType == 'tor':
if port == 0:
port = utils_inst._core.torPort
port = core_inst.torPort
proxies = {'http': 'socks4a://127.0.0.1:' + str(port), 'https': 'socks4a://127.0.0.1:' + str(port)}
elif proxyType == 'i2p':
proxies = {'http': 'http://127.0.0.1:4444'}
@ -24,11 +24,11 @@ def do_post_request(utils_inst, url, data={}, port=0, proxyType='tor'):
retData = False
return retData
def do_get_request(utils_inst, url, port=0, proxyType='tor', ignoreAPI=False, returnHeaders=False):
def do_get_request(core_inst, url, port=0, proxyType='tor', ignoreAPI=False, returnHeaders=False):
'''
Do a get request through a local tor or i2p instance
'''
API_VERSION = utils_inst._core.onionrInst.API_VERSION
API_VERSION = core_inst.onionrInst.API_VERSION
retData = False
if proxyType == 'tor':
if port == 0:

View File

@ -1,9 +1,9 @@
import json
import json, sqlite3
import logger, onionrevents
from onionrusers import onionrusers
from etc import onionrvalues
import onionrblockapi
from . import epoch
from . import epoch, stringvalidators, bytesconverter
def get_block_metadata_from_data(blockData):
'''
accepts block contents as string, returns a tuple of
@ -33,24 +33,24 @@ def get_block_metadata_from_data(blockData):
meta = metadata['meta']
return (metadata, meta, data)
def process_block_metadata(utils_inst, blockHash):
def process_block_metadata(core_inst, blockHash):
'''
Read metadata from a block and cache it to the block database
'''
curTime = epoch.get_rounded_epoch(roundS=60)
myBlock = onionrblockapi.Block(blockHash, utils_inst._core)
myBlock = onionrblockapi.Block(blockHash, core_inst)
if myBlock.isEncrypted:
myBlock.decrypt()
if (myBlock.isEncrypted and myBlock.decrypted) or (not myBlock.isEncrypted):
blockType = myBlock.getMetadata('type') # we would use myBlock.getType() here, but it is bugged with encrypted blocks
signer = utils_inst.bytesToStr(myBlock.signer)
signer = bytesconverter.bytes_to_str(myBlock.signer)
valid = myBlock.verifySig()
if myBlock.getMetadata('newFSKey') is not None:
onionrusers.OnionrUser(utils_inst._core, signer).addForwardKey(myBlock.getMetadata('newFSKey'))
onionrusers.OnionrUser(core_inst, signer).addForwardKey(myBlock.getMetadata('newFSKey'))
try:
if len(blockType) <= 10:
utils_inst._core.updateBlockInfo(blockHash, 'dataType', blockType)
core_inst.updateBlockInfo(blockHash, 'dataType', blockType)
except TypeError:
logger.warn("Missing block information")
pass
@ -61,9 +61,28 @@ def process_block_metadata(utils_inst, blockHash):
except (AssertionError, ValueError, TypeError) as e:
expireTime = onionrvalues.OnionrValues().default_expire + curTime
finally:
utils_inst._core.updateBlockInfo(blockHash, 'expire', expireTime)
core_inst.updateBlockInfo(blockHash, 'expire', expireTime)
if not blockType is None:
utils_inst._core.updateBlockInfo(blockHash, 'dataType', blockType)
onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid}, onionr = utils_inst._core.onionrInst)
core_inst.updateBlockInfo(blockHash, 'dataType', blockType)
onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid}, onionr = core_inst.onionrInst)
else:
pass
pass
def has_block(core_inst, hash):
'''
Check for new block in the list
'''
conn = sqlite3.connect(core_inst.blockDB)
c = conn.cursor()
if not stringvalidators.validate_hash(hash):
raise Exception("Invalid hash")
for result in c.execute("SELECT COUNT() FROM hashes WHERE hash = ?", (hash,)):
if result[0] >= 1:
conn.commit()
conn.close()
return True
else:
conn.commit()
conn.close()
return False
return False

View File

@ -6,6 +6,6 @@ def get_rounded_epoch(roundS=60):
epoch = get_epoch()
return epoch - (epoch % roundS)
def get_epoch(self):
def get_epoch():
'''returns epoch'''
return math.floor(time.time())

View File

@ -0,0 +1,10 @@
import re
def escape_ANSI(line):
'''
Remove ANSI escape codes from a string with regex
taken or adapted from: https://stackoverflow.com/a/38662876 by user https://stackoverflow.com/users/802365/%c3%89douard-lopez
cc-by-sa-3 license https://creativecommons.org/licenses/by-sa/3.0/
'''
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', line)

View File

@ -1,6 +1,7 @@
import base64, string, onionrutils
import base64, string
import unpaddedbase32, nacl.signing, nacl.encoding
def validate_hash(utils_inst, data, length=64):
from onionrutils import bytesconverter
def validate_hash(data, length=64):
'''
Validate if a string is a valid hash hex digest (does not compare, just checks length and charset)
'''
@ -25,7 +26,7 @@ def validate_pub_key(key):
if type(key) is type(None):
return False
# Accept keys that have no = padding
key = unpaddedbase32.repad(onionrutils.str_to_bytes(key))
key = unpaddedbase32.repad(bytesconverter.str_to_bytes(key))
retVal = False
try:

View File

@ -1,7 +1,7 @@
import json
import logger, onionrexceptions
from etc import onionrvalues
from onionrutils import stringvalidators
from onionrutils import stringvalidators, epoch
def validate_metadata(core_inst, metadata, blockData):
'''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string'''
# TODO, make this check sane sizes
@ -37,18 +37,18 @@ def validate_metadata(core_inst, metadata, blockData):
if not stringvalidators.is_integer_string(metadata[i]):
logger.warn('Block metadata time stamp is not integer string or int')
break
isFuture = (metadata[i] - core_inst.getEpoch())
isFuture = (metadata[i] - epoch.get_epoch())
if isFuture > maxClockDifference:
logger.warn('Block timestamp is skewed to the future over the max %s: %s' (maxClockDifference, isFuture))
break
if (core_inst.getEpoch() - metadata[i]) > maxAge:
if (epoch.get_epoch() - metadata[i]) > maxAge:
logger.warn('Block is outdated: %s' % (metadata[i],))
break
elif i == 'expire':
try:
assert int(metadata[i]) > core_inst.getEpoch()
assert int(metadata[i]) > epoch.get_epoch()
except AssertionError:
logger.warn('Block is expired: %s less than %s' % (metadata[i], core_inst.getEpoch()))
logger.warn('Block is expired: %s less than %s' % (metadata[i], epoch.get_epoch()))
break
elif i == 'encryptType':
try:

View File

@ -23,7 +23,7 @@ import locale, sys, os, threading, json
locale.setlocale(locale.LC_ALL, '')
import onionrservices, logger
from onionrservices import bootstrapservice
from onionrutils import stringvalidators
from onionrutils import stringvalidators, epoch, basicrequests
plugin_name = 'esoteric'
PLUGIN_VERSION = '0.0.0'
@ -58,8 +58,8 @@ class Esoteric:
else:
message += '\n'
except EOFError:
message = json.dumps({'m': message, 't': self.myCore._utils.getEpoch()})
print(self.myCore._utils.doPostRequest('http://%s/esoteric/sendto' % (self.transport,), port=self.socks, data=message))
message = json.dumps({'m': message, 't': epoch.get_epoch()})
print(basicrequests.do_post_request(self.myCore, 'http://%s/esoteric/sendto' % (self.transport,), port=self.socks, data=message))
message = ''
except KeyboardInterrupt:
self.shutdown = True
@ -78,7 +78,7 @@ class Esoteric:
self.socks = self.myCore.config.get('tor.socksport')
print('connected with', peer, 'on', peer_transport_address)
if self.myCore._utils.doGetRequest('http://%s/ping' % (peer_transport_address,), ignoreAPI=True, port=self.socks) == 'pong!':
if basicrequests.do_get_request(self.myCore, 'http://%s/ping' % (peer_transport_address,), ignoreAPI=True, port=self.socks) == 'pong!':
print('connected', peer_transport_address)
threading.Thread(target=self._sender_loop).start()

View File

@ -22,6 +22,7 @@
import threading, time, locale, sys, os
from onionrblockapi import Block
import logger, config
from onionrutils import escapeansi, epoch
locale.setlocale(locale.LC_ALL, '')
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
@ -43,7 +44,7 @@ class OnionrFlow:
logger.warn("Please note: everything said here is public, even if a random channel name is used.", terminal=True)
message = ""
self.flowRunning = True
newThread = threading.Thread(target=self.showOutput)
newThread = threading.Thread(target=self.showOutput, daemon=True)
newThread.start()
try:
self.channel = logger.readline("Enter a channel name or none for default:")
@ -59,7 +60,7 @@ class OnionrFlow:
else:
if message == "q":
self.flowRunning = False
expireTime = self.myCore._utils.getEpoch() + 43200
expireTime = epoch.get_epoch() + 43200
if len(message) > 0:
logger.info('Inserting message as block...', terminal=True)
self.myCore.insertBlock(message, header='txt', expire=expireTime, meta={'ch': self.channel})
@ -83,7 +84,7 @@ class OnionrFlow:
logger.info('\n------------------------', prompt = False, terminal=True)
content = block.getContent()
# Escape new lines, remove trailing whitespace, and escape ansi sequences
content = self.myCore._utils.escapeAnsi(content.replace('\n', '\\n').replace('\r', '\\r').strip())
content = escapeansi.escape_ANSI(content.replace('\n', '\\n').replace('\r', '\\r').strip())
logger.info(block.getDate().strftime("%m/%d %H:%M") + ' - ' + logger.colors.reset + content, prompt = False, terminal=True)
self.alreadyOutputed.append(block.getHash())
time.sleep(5)

View File

@ -22,7 +22,7 @@
import logger, config
import os, sys, json, time, random, shutil, base64, getpass, datetime, re
from onionrblockapi import Block
from onionrutils import importnewblocks, stringvalidators,
from onionrutils import importnewblocks, stringvalidators
plugin_name = 'pluginmanager'
@ -397,7 +397,7 @@ def commandInstallPlugin():
return True
valid_hash = pluginapi.get_utils().validateHash(pkobh)
valid_hash = stringvalidators.validate_hash(pkobh)
real_block = False
valid_key = stringvalidators.validate_pub_key(pkobh)
real_key = False
@ -485,7 +485,7 @@ def commandAddRepository():
blockhash = sys.argv[2]
if pluginapi.get_utils().validateHash(blockhash):
if stringvalidators.validate_hash(blockhash):
if Block.exists(blockhash):
try:
blockContent = json.loads(Block(blockhash, core = pluginapi.get_core()).getContent())
@ -521,7 +521,7 @@ def commandRemoveRepository():
blockhash = sys.argv[2]
if pluginapi.get_utils().validateHash(blockhash):
if stringvalidators.validate_hash(blockhash):
if blockhash in getRepositories():
try:
removeRepository(blockhash)

View File

@ -21,6 +21,7 @@ import sys, os, json
from flask import Response, request, redirect, Blueprint, abort
import core
from onionrusers import contactmanager
from onionrutils import stringvalidators
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
import loadinbox, sentboxdb
@ -34,7 +35,7 @@ def mail_ping():
@flask_blueprint.route('/mail/deletemsg/<block>', methods=['POST'])
def mail_delete(block):
if not c._utils.validateHash(block):
if not stringvalidators.validate_hash(block):
abort(504)
existing = kv.get('deleted_mail')
if existing is None:

View File

@ -23,7 +23,7 @@ import logger, config, threading, time, datetime
from onionrblockapi import Block
import onionrexceptions
from onionrusers import onionrusers
from onionrutils import stringvalidators
from onionrutils import stringvalidators, escapeansi
import locale, sys, os, json
locale.setlocale(locale.LC_ALL, '')
@ -148,7 +148,7 @@ class OnionrMail:
print('')
if cancel != '-q':
try:
print(draw_border(self.myCore._utils.escapeAnsi(readBlock.bcontent.decode().strip())))
print(draw_border(escapeansi.escape_ANSI(readBlock.bcontent.decode().strip())))
except ValueError:
logger.warn('Error presenting message. This is usually due to a malformed or blank message.', terminal=True)
pass
@ -187,7 +187,7 @@ class OnionrMail:
else:
logger.info('Sent to: ' + self.sentMessages[self.sentboxList[int(choice)]][1], terminal=True)
# Print ansi escaped sent message
logger.info(self.myCore._utils.escapeAnsi(self.sentMessages[self.sentboxList[int(choice)]][0]), terminal=True)
logger.info(escapeansi.escape_ANSI(self.sentMessages[self.sentboxList[int(choice)]][0]), terminal=True)
input('Press enter to continue...')
finally:
if choice == '-q':

View File

@ -19,6 +19,7 @@
'''
import sqlite3, os
import core
from onionrutils import epoch
class SentBox:
def __init__(self, mycore):
assert isinstance(mycore, core.Core)
@ -60,7 +61,7 @@ class SentBox:
def addToSent(self, blockID, peer, message, subject=''):
self.connect()
args = (blockID, peer, message, subject, self.core._utils.getEpoch())
args = (blockID, peer, message, subject, epoch.get_epoch())
self.cursor.execute('INSERT INTO sent VALUES(?, ?, ?, ?, ?)', args)
self.conn.commit()
self.close()

View File

@ -17,7 +17,8 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
def checkNetwork(utilsInst, torPort=0):
from onionrutils import basicrequests
def checkNetwork(core_inst, torPort=0):
'''Check if we are connected to the internet (through Tor)'''
retData = False
connectURLs = []
@ -26,7 +27,7 @@ def checkNetwork(utilsInst, torPort=0):
connectURLs = connectTest.read().split(',')
for url in connectURLs:
if utilsInst.doGetRequest(url, port=torPort, ignoreAPI=True) != False:
if basicrequests.do_get_request(core_inst, url, port=torPort, ignoreAPI=True) != False:
retData = True
break
except FileNotFoundError:

27
onionr/utils/sizeutils.py Normal file
View File

@ -0,0 +1,27 @@
import sqlite3, os
from onionrutils import stringvalidators
def human_size(num, suffix='B'):
'''
Converts from bytes to a human readable format.
'''
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def size(path='.'):
'''
Returns the size of a folder's contents in bytes
'''
total = 0
if os.path.exists(path):
if os.path.isfile(path):
total = os.path.getsize(path)
else:
for entry in os.scandir(path):
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += size(entry.path)
return total