cleanup lookupblocks
parent
e174b4a636
commit
63e676ed6f
|
@ -1,4 +1,4 @@
|
|||
"""Onionr - Private P2P Communication
|
||||
"""Onionr - Private P2P Communication.
|
||||
|
||||
Lookup new blocks with the communicator using a random connected peer
|
||||
"""
|
||||
|
@ -8,7 +8,7 @@ import logger
|
|||
import onionrproofs
|
||||
from onionrutils import stringvalidators, epoch
|
||||
from communicator import peeraction, onlinepeers
|
||||
from coredb import blockmetadb
|
||||
from coredb.blockmetadb import get_block_list
|
||||
from utils import reconstructhash
|
||||
from onionrblocks import onionrblacklist
|
||||
import onionrexceptions
|
||||
|
@ -36,20 +36,24 @@ def lookup_blocks_from_communicator(comm_inst):
|
|||
logger.info('Looking up new blocks')
|
||||
tryAmount = 2
|
||||
newBlocks = ''
|
||||
existingBlocks = blockmetadb.get_block_list() # List of existing saved blocks
|
||||
triedPeers = [] # list of peers we've tried this time around
|
||||
maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
|
||||
lastLookupTime = 0 # Last time we looked up a particular peer's list
|
||||
# List of existing saved blocks
|
||||
existingBlocks = get_block_list()
|
||||
triedPeers = [] # list of peers we've tried this time around
|
||||
# Max amount of *new* block hashes to have in queue
|
||||
maxBacklog = 1560
|
||||
lastLookupTime = 0 # Last time we looked up a particular peer's list
|
||||
new_block_count = 0
|
||||
for i in range(tryAmount):
|
||||
listLookupCommand = 'getblocklist' # This is defined here to reset it each time
|
||||
# Defined here to reset it each time, time offset is added later
|
||||
listLookupCommand = 'getblocklist'
|
||||
if len(comm_inst.blockQueue) >= maxBacklog:
|
||||
break
|
||||
if not comm_inst.isOnline:
|
||||
break
|
||||
# check if disk allocation is used
|
||||
if comm_inst.storage_counter.is_full():
|
||||
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
|
||||
logger.debug(
|
||||
'Not looking up new blocks due to maximum amount of disk used')
|
||||
break
|
||||
try:
|
||||
# select random online peer
|
||||
|
@ -65,32 +69,41 @@ def lookup_blocks_from_communicator(comm_inst):
|
|||
continue
|
||||
triedPeers.append(peer)
|
||||
|
||||
# Get the last time we looked up a peer's stamp to only fetch blocks since then.
|
||||
# Get the last time we looked up a peer's stamp,
|
||||
# to only fetch blocks since then.
|
||||
# Saved in memory only for privacy reasons
|
||||
try:
|
||||
lastLookupTime = comm_inst.dbTimestamps[peer]
|
||||
except KeyError:
|
||||
lastLookupTime = epoch.get_epoch() - config.get("general.max_block_age", onionrvalues.DEFAULT_EXPIRE)
|
||||
lastLookupTime = epoch.get_epoch() - \
|
||||
config.get("general.max_block_age",
|
||||
onionrvalues.DEFAULT_EXPIRE)
|
||||
listLookupCommand += '?date=%s' % (lastLookupTime,)
|
||||
try:
|
||||
newBlocks = peeraction.peer_action(comm_inst, peer, listLookupCommand) # get list of new block hashes
|
||||
newBlocks = peeraction.peer_action(
|
||||
comm_inst,
|
||||
peer, listLookupCommand) # get list of new block hashes
|
||||
except Exception as error:
|
||||
logger.warn('Could not get new blocks from %s.' % peer, error = error)
|
||||
logger.warn(
|
||||
f'Could not get new blocks from {peer}.',
|
||||
error=error)
|
||||
newBlocks = False
|
||||
else:
|
||||
comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60)
|
||||
if newBlocks != False:
|
||||
|
||||
if newBlocks != False: # noqa
|
||||
# if request was a success
|
||||
for i in newBlocks.split('\n'):
|
||||
if stringvalidators.validate_hash(i):
|
||||
i = reconstructhash.reconstruct_hash(i)
|
||||
# if newline seperated string is valid hash
|
||||
if not i in existingBlocks:
|
||||
# if block does not exist on disk and is not already in block queue
|
||||
|
||||
# if block does not exist on disk + is not already in queue
|
||||
if i not in existingBlocks:
|
||||
if i not in comm_inst.blockQueue:
|
||||
if onionrproofs.hashMeetsDifficulty(i) and not blacklist.inBlacklist(i):
|
||||
if onionrproofs.hashMeetsDifficulty(i) and \
|
||||
not blacklist.inBlacklist(i):
|
||||
if len(comm_inst.blockQueue) <= 1000000:
|
||||
comm_inst.blockQueue[i] = [peer] # add blocks to download queue
|
||||
# add blocks to download queue
|
||||
comm_inst.blockQueue[i] = [peer]
|
||||
new_block_count += 1
|
||||
else:
|
||||
if peer not in comm_inst.blockQueue[i]:
|
||||
|
@ -100,7 +113,9 @@ def lookup_blocks_from_communicator(comm_inst):
|
|||
block_string = ""
|
||||
if new_block_count > 1:
|
||||
block_string = "s"
|
||||
logger.info('Discovered %s new block%s' % (new_block_count, block_string), terminal=True)
|
||||
comm_inst.download_blocks_timer.count = int(comm_inst.download_blocks_timer.frequency * 0.99)
|
||||
logger.info(
|
||||
f'Discovered {new_block_count} new block{block_string}',
|
||||
terminal=True)
|
||||
comm_inst.download_blocks_timer.count = \
|
||||
int(comm_inst.download_blocks_timer.frequency * 0.99)
|
||||
comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
|
||||
return
|
||||
|
|
Loading…
Reference in New Issue