refactor downloadblocks, removed old main.js, correct replay protection check

master
Kevin Froman 2019-07-03 21:28:16 -05:00
parent 873740c5ad
commit bcc475dc61
4 changed files with 24 additions and 767 deletions

View File

@ -21,6 +21,21 @@ import communicator, onionrexceptions
import logger, onionrpeers import logger, onionrpeers
from onionrutils import blockmetadata, stringvalidators, validatemetadata from onionrutils import blockmetadata, stringvalidators, validatemetadata
def _should_download(comm_inst, block_hash):
ret_data = True
if block_hash in comm_inst._core.getBlockList():
#logger.debug('Block %s is already saved.' % (blockHash,))
ret_data = False
else:
if comm_inst._core._blacklist.inBlacklist(blockHash):
ret_data = False
if ret_data is False:
try:
del comm_inst.blockQueue[blockHash]
except KeyError:
pass
return ret_data
def download_blocks_from_communicator(comm_inst): def download_blocks_from_communicator(comm_inst):
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon) assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
for blockHash in list(comm_inst.blockQueue): for blockHash in list(comm_inst.blockQueue):
@ -32,24 +47,18 @@ def download_blocks_from_communicator(comm_inst):
except KeyError: except KeyError:
blockPeers = [] blockPeers = []
removeFromQueue = True removeFromQueue = True
if comm_inst.shutdown or not comm_inst.isOnline:
# Exit loop if shutting down or offline if _should_download(comm_inst, blockHash):
continue
if comm_inst.shutdown or not comm_inst.isOnline or comm_inst._core.storage_counter.isFull():
# Exit loop if shutting down or offline, or disk allocation reached
break break
# Do not download blocks being downloaded or that are already saved (edge cases) # Do not download blocks being downloaded or that are already saved (edge cases)
if blockHash in comm_inst.currentDownloading: if blockHash in comm_inst.currentDownloading:
#logger.debug('Already downloading block %s...' % blockHash) #logger.debug('Already downloading block %s...' % blockHash)
continue continue
if blockHash in comm_inst._core.getBlockList():
#logger.debug('Block %s is already saved.' % (blockHash,))
try:
del comm_inst.blockQueue[blockHash]
except KeyError:
pass
continue
if comm_inst._core._blacklist.inBlacklist(blockHash):
continue
if comm_inst._core.storage_counter.isFull():
break
comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
if len(blockPeers) == 0: if len(blockPeers) == 0:
peerUsed = comm_inst.pickOnlinePeer() peerUsed = comm_inst.pickOnlinePeer()

View File

@ -68,6 +68,7 @@ class PublicEndpoints:
@public_endpoints_bp.route('/announce', methods=['post']) @public_endpoints_bp.route('/announce', methods=['post'])
def accept_announce(): def accept_announce():
'''Accept announcements with pow token to prevent spam'''
resp = announce.handle_announce(client_API, request) resp = announce.handle_announce(client_API, request)
return resp return resp

View File

@ -89,7 +89,7 @@ class Block:
# Check for replay attacks # Check for replay attacks
try: try:
if epoch.get_epoch() - self.core.getBlockDate(self.hash) < 60: if epoch.get_epoch() - self.core.getBlockDate(self.hash) > 60:
assert self.core._crypto.replayTimestampValidation(self.bmetadata['rply']) assert self.core._crypto.replayTimestampValidation(self.bmetadata['rply'])
except (AssertionError, KeyError, TypeError) as e: except (AssertionError, KeyError, TypeError) as e:
if not self.bypassReplayCheck: if not self.bypassReplayCheck:

File diff suppressed because one or more lines are too long