refactoring

master
Kevin Froman 2019-08-09 15:41:27 -05:00
parent 7c02f6fff1
commit daff149acc
14 changed files with 36 additions and 41 deletions

View File

@ -27,9 +27,8 @@ from communicator import onlinepeers
from coredb import keydb from coredb import keydb
def announce_node(daemon): def announce_node(daemon):
'''Announce our node to our peers''' '''Announce our node to our peers'''
ov = onionrvalues.OnionrValues() ret_data = False
retData = False announce_fail = False
announceFail = False
# Do not let announceCache get too large # Do not let announceCache get too large
if len(daemon.announceCache) >= 10000: if len(daemon.announceCache) >= 10000:
@ -57,7 +56,7 @@ def announce_node(daemon):
if ourID != 1: if ourID != 1:
existingRand = bytesconverter.bytes_to_str(keydb.transportinfo.get_address_info(peer, 'powValue')) existingRand = bytesconverter.bytes_to_str(keydb.transportinfo.get_address_info(peer, 'powValue'))
# Reset existingRand if it no longer meets the minimum POW # Reset existingRand if it no longer meets the minimum POW
if type(existingRand) is type(None) or not existingRand.endswith('0' * ov.announce_pow): if type(existingRand) is type(None) or not existingRand.endswith('0' * onionrvalues.ANNOUNCE_POW):
existingRand = '' existingRand = ''
if peer in daemon.announceCache: if peer in daemon.announceCache:
@ -66,22 +65,22 @@ def announce_node(daemon):
data['random'] = existingRand data['random'] = existingRand
else: else:
daemon.announceProgress[peer] = True daemon.announceProgress[peer] = True
proof = onionrproofs.DataPOW(combinedNodes, forceDifficulty=ov.announce_pow) proof = onionrproofs.DataPOW(combinedNodes, forceDifficulty=onionrvalues.ANNOUNCE_POW)
del daemon.announceProgress[peer] del daemon.announceProgress[peer]
try: try:
data['random'] = base64.b64encode(proof.waitForResult()[1]) data['random'] = base64.b64encode(proof.waitForResult()[1])
except TypeError: except TypeError:
# Happens when we failed to produce a proof # Happens when we failed to produce a proof
logger.error("Failed to produce a pow for announcing to " + peer) logger.error("Failed to produce a pow for announcing to " + peer)
announceFail = True announce_fail = True
else: else:
daemon.announceCache[peer] = data['random'] daemon.announceCache[peer] = data['random']
if not announceFail: if not announce_fail:
logger.info('Announcing node to ' + url) logger.info('Announcing node to ' + url)
if basicrequests.do_post_request(url, data, port=daemon.shared_state.get(NetController).socksPort) == 'Success': if basicrequests.do_post_request(url, data, port=daemon.shared_state.get(NetController).socksPort) == 'Success':
logger.info('Successfully introduced node to ' + peer, terminal=True) logger.info('Successfully introduced node to ' + peer, terminal=True)
retData = True ret_data = True
keydb.transportinfo.set_address_info(peer, 'introduced', 1) keydb.transportinfo.set_address_info(peer, 'introduced', 1)
keydb.transportinfo.set_address_info(peer, 'powValue', data['random']) keydb.transportinfo.set_address_info(peer, 'powValue', data['random'])
daemon.decrementThreadCount('announce_node') daemon.decrementThreadCount('announce_node')
return retData return ret_data

View File

@ -29,15 +29,13 @@ DEVELOPMENT_MODE = True
MAX_BLOCK_TYPE_LENGTH = 15 MAX_BLOCK_TYPE_LENGTH = 15
# Begin OnionrValues migrated values
ANNOUNCE_POW = 5
DEFAULT_EXPIRE = 2592000
BLOCK_METADATA_LENGTHS = {'meta': 1000, 'sig': 200, 'signer': 200, 'time': 10, 'pow': 1000, 'encryptType': 4, 'expire': 14}
platform = platform.system() platform = platform.system()
if platform == 'Windows': if platform == 'Windows':
SCRIPT_NAME = 'run-windows.bat' SCRIPT_NAME = 'run-windows.bat'
else: else:
SCRIPT_NAME = 'onionr.sh' SCRIPT_NAME = 'onionr.sh'
class OnionrValues:
def __init__(self):
self.passwordLength = 20
self.blockMetadataLengths = {'meta': 1000, 'sig': 200, 'signer': 200, 'time': 10, 'pow': 1000, 'encryptType': 4, 'expire': 14} #TODO properly refine values to minimum needed
self.default_expire = 2592000
self.announce_pow = 5

View File

@ -55,7 +55,7 @@ def handle_announce(request):
powHash = powHash.decode() powHash = powHash.decode()
except AttributeError: except AttributeError:
pass pass
if powHash.startswith('0' * onionrvalues.OnionrValues().announce_pow): if powHash.startswith('0' * onionrvalues.ANNOUNCE_POW):
newNode = bytesconverter.bytes_to_str(newNode) newNode = bytesconverter.bytes_to_str(newNode)
announce_queue = deadsimplekv.DeadSimpleKV(filepaths.announce_cache) announce_queue = deadsimplekv.DeadSimpleKV(filepaths.announce_cache)
announce_queue_list = announce_queue.get('new_peers') announce_queue_list = announce_queue.get('new_peers')

View File

@ -13,7 +13,6 @@ def insert_block(data, header='txt', sign=False, encryptType='', symKey='', asym
encryptType must be specified to encrypt a block encryptType must be specified to encrypt a block
''' '''
use_subprocess = powchoice.use_subprocess(config) use_subprocess = powchoice.use_subprocess(config)
requirements = onionrvalues.OnionrValues()
storage_counter = storagecounter.StorageCounter() storage_counter = storagecounter.StorageCounter()
allocationReachedMessage = 'Cannot insert block, disk allocation reached.' allocationReachedMessage = 'Cannot insert block, disk allocation reached.'
if storage_counter.isFull(): if storage_counter.isFull():

View File

@ -57,7 +57,7 @@ def process_block_metadata(blockHash: str):
expireTime = myBlock.getHeader('expire') expireTime = myBlock.getHeader('expire')
assert len(str(int(expireTime))) < 20 # test that expire time is an integer of sane length (for epoch) assert len(str(int(expireTime))) < 20 # test that expire time is an integer of sane length (for epoch)
except (AssertionError, ValueError, TypeError) as e: except (AssertionError, ValueError, TypeError) as e:
expireTime = onionrvalues.OnionrValues().default_expire + curTime expireTime = onionrvalues.DEFAULT_EXPIRE + curTime
finally: finally:
blockmetadb.update_block_info(blockHash, 'expire', expireTime) blockmetadb.update_block_info(blockHash, 'expire', expireTime)
onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid}) onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid})

View File

@ -59,12 +59,12 @@ def local_command(command, data='', silent = True, post=False, postData = {}, ma
try: try:
if post: if post:
retData = requests.post(payload, data=postData, headers={'token': config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text ret_data = requests.post(payload, data=postData, headers={'token': config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text
else: else:
retData = requests.get(payload, headers={'token': config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text ret_data = requests.get(payload, headers={'token': config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text
except Exception as error: except Exception as error:
if not silent: if not silent:
logger.error('Failed to make local request (command: %s):%s' % (command, error), terminal=True) logger.error('Failed to make local request (command: %s):%s' % (command, error), terminal=True)
retData = False ret_data = False
return retData return ret_data

View File

@ -24,9 +24,8 @@ from onionrutils import stringvalidators, epoch, bytesconverter
import config, filepaths, onionrcrypto import config, filepaths, onionrcrypto
def validate_metadata(metadata, blockData): def validate_metadata(metadata, blockData):
'''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string''' '''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string'''
# TODO, make this check sane sizes
requirements = onionrvalues.OnionrValues() ret_data = False
retData = False
maxClockDifference = 120 maxClockDifference = 120
# convert to dict if it is json string # convert to dict if it is json string
@ -37,11 +36,11 @@ def validate_metadata(metadata, blockData):
pass pass
# Validate metadata dict for invalid keys to sizes that are too large # Validate metadata dict for invalid keys to sizes that are too large
maxAge = config.get("general.max_block_age", onionrvalues.OnionrValues().default_expire) maxAge = config.get("general.max_block_age", onionrvalues.DEFAULT_EXPIRE)
if type(metadata) is dict: if type(metadata) is dict:
for i in metadata: for i in metadata:
try: try:
requirements.blockMetadataLengths[i] onionrvalues.BLOCK_METADATA_LENGTHS[i]
except KeyError: except KeyError:
logger.warn('Block has invalid metadata key ' + i) logger.warn('Block has invalid metadata key ' + i)
break break
@ -51,7 +50,7 @@ def validate_metadata(metadata, blockData):
testData = len(testData) testData = len(testData)
except (TypeError, AttributeError) as e: except (TypeError, AttributeError) as e:
testData = len(str(testData)) testData = len(str(testData))
if requirements.blockMetadataLengths[i] < testData: if onionrvalues.BLOCK_METADATA_LENGTHS[i] < testData:
logger.warn('Block metadata key ' + i + ' exceeded maximum size') logger.warn('Block metadata key ' + i + ' exceeded maximum size')
break break
if i == 'time': if i == 'time':
@ -84,16 +83,16 @@ def validate_metadata(metadata, blockData):
try: try:
with open(filepaths.data_nonce_file, 'r') as nonceFile: with open(filepaths.data_nonce_file, 'r') as nonceFile:
if nonce in nonceFile.read(): if nonce in nonceFile.read():
retData = False # we've seen that nonce before, so we can't pass metadata ret_data = False # we've seen that nonce before, so we can't pass metadata
raise onionrexceptions.DataExists raise onionrexceptions.DataExists
except FileNotFoundError: except FileNotFoundError:
retData = True ret_data = True
except onionrexceptions.DataExists: except onionrexceptions.DataExists:
# do not set retData to True, because nonce has been seen before # do not set ret_data to True, because nonce has been seen before
pass pass
else: else:
retData = True ret_data = True
else: else:
logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object') logger.warn('In call to utils.validateMetadata, metadata must be JSON string or a dictionary object')
return retData return ret_data