improved logging messages to be less spammy
This commit is contained in:
parent
8082570b7f
commit
065e97ab11
14 changed files with 129 additions and 129 deletions
|
@ -72,7 +72,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
|
|||
# Add a peer to our list if it isn't already since it successfully connected
|
||||
networkmerger.mergeAdders(address, comm_inst._core)
|
||||
if address not in comm_inst.onlinePeers:
|
||||
logger.info('Connected to ' + address)
|
||||
logger.info('Connected to ' + address, terminal=True)
|
||||
comm_inst.onlinePeers.append(address)
|
||||
comm_inst.connectTimes[address] = comm_inst._core._utils.getEpoch()
|
||||
retData = address
|
||||
|
|
|
@ -110,6 +110,7 @@ def download_blocks_from_communicator(comm_inst):
|
|||
if removeFromQueue:
|
||||
try:
|
||||
del comm_inst.blockQueue[blockHash] # remove from block queue both if success or false
|
||||
logger.info('%s blocks remaining in queue' % [len(comm_inst.blockQueue)])
|
||||
except KeyError:
|
||||
pass
|
||||
comm_inst.currentDownloading.remove(blockHash)
|
||||
|
|
|
@ -19,61 +19,65 @@
|
|||
'''
|
||||
import logger, onionrproofs
|
||||
def lookup_blocks_from_communicator(comm_inst):
|
||||
logger.info('Looking up new blocks...')
|
||||
tryAmount = 2
|
||||
newBlocks = ''
|
||||
existingBlocks = comm_inst._core.getBlockList()
|
||||
triedPeers = [] # list of peers we've tried this time around
|
||||
maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
|
||||
lastLookupTime = 0 # Last time we looked up a particular peer's list
|
||||
for i in range(tryAmount):
|
||||
listLookupCommand = 'getblocklist' # This is defined here to reset it each time
|
||||
if len(comm_inst.blockQueue) >= maxBacklog:
|
||||
logger.info('Looking up new blocks...')
|
||||
tryAmount = 2
|
||||
newBlocks = ''
|
||||
existingBlocks = comm_inst._core.getBlockList()
|
||||
triedPeers = [] # list of peers we've tried this time around
|
||||
maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
|
||||
lastLookupTime = 0 # Last time we looked up a particular peer's list
|
||||
new_block_count = 0
|
||||
for i in range(tryAmount):
|
||||
listLookupCommand = 'getblocklist' # This is defined here to reset it each time
|
||||
if len(comm_inst.blockQueue) >= maxBacklog:
|
||||
break
|
||||
if not comm_inst.isOnline:
|
||||
break
|
||||
# check if disk allocation is used
|
||||
if comm_inst._core._utils.storageCounter.isFull():
|
||||
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
|
||||
break
|
||||
peer = comm_inst.pickOnlinePeer() # select random online peer
|
||||
# if we've already tried all the online peers this time around, stop
|
||||
if peer in triedPeers:
|
||||
if len(comm_inst.onlinePeers) == len(triedPeers):
|
||||
break
|
||||
if not comm_inst.isOnline:
|
||||
break
|
||||
# check if disk allocation is used
|
||||
if comm_inst._core._utils.storageCounter.isFull():
|
||||
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
|
||||
break
|
||||
peer = comm_inst.pickOnlinePeer() # select random online peer
|
||||
# if we've already tried all the online peers this time around, stop
|
||||
if peer in triedPeers:
|
||||
if len(comm_inst.onlinePeers) == len(triedPeers):
|
||||
break
|
||||
else:
|
||||
continue
|
||||
triedPeers.append(peer)
|
||||
else:
|
||||
continue
|
||||
triedPeers.append(peer)
|
||||
|
||||
# Get the last time we looked up a peer's stamp to only fetch blocks since then.
|
||||
# Saved in memory only for privacy reasons
|
||||
try:
|
||||
lastLookupTime = comm_inst.dbTimestamps[peer]
|
||||
except KeyError:
|
||||
lastLookupTime = 0
|
||||
else:
|
||||
listLookupCommand += '?date=%s' % (lastLookupTime,)
|
||||
try:
|
||||
newBlocks = comm_inst.peerAction(peer, listLookupCommand) # get list of new block hashes
|
||||
except Exception as error:
|
||||
logger.warn('Could not get new blocks from %s.' % peer, error = error)
|
||||
newBlocks = False
|
||||
else:
|
||||
comm_inst.dbTimestamps[peer] = comm_inst._core._utils.getRoundedEpoch(roundS=60)
|
||||
if newBlocks != False:
|
||||
# if request was a success
|
||||
for i in newBlocks.split('\n'):
|
||||
if comm_inst._core._utils.validateHash(i):
|
||||
# if newline seperated string is valid hash
|
||||
if not i in existingBlocks:
|
||||
# if block does not exist on disk and is not already in block queue
|
||||
if i not in comm_inst.blockQueue:
|
||||
if onionrproofs.hashMeetsDifficulty(i) and not comm_inst._core._blacklist.inBlacklist(i):
|
||||
if len(comm_inst.blockQueue) <= 1000000:
|
||||
comm_inst.blockQueue[i] = [peer] # add blocks to download queue
|
||||
else:
|
||||
if peer not in comm_inst.blockQueue[i]:
|
||||
if len(comm_inst.blockQueue[i]) < 10:
|
||||
comm_inst.blockQueue[i].append(peer)
|
||||
comm_inst.decrementThreadCount('lookupBlocks')
|
||||
return
|
||||
# Get the last time we looked up a peer's stamp to only fetch blocks since then.
|
||||
# Saved in memory only for privacy reasons
|
||||
try:
|
||||
lastLookupTime = comm_inst.dbTimestamps[peer]
|
||||
except KeyError:
|
||||
lastLookupTime = 0
|
||||
else:
|
||||
listLookupCommand += '?date=%s' % (lastLookupTime,)
|
||||
try:
|
||||
newBlocks = comm_inst.peerAction(peer, listLookupCommand) # get list of new block hashes
|
||||
except Exception as error:
|
||||
logger.warn('Could not get new blocks from %s.' % peer, error = error)
|
||||
newBlocks = False
|
||||
else:
|
||||
comm_inst.dbTimestamps[peer] = comm_inst._core._utils.getRoundedEpoch(roundS=60)
|
||||
if newBlocks != False:
|
||||
# if request was a success
|
||||
for i in newBlocks.split('\n'):
|
||||
if comm_inst._core._utils.validateHash(i):
|
||||
# if newline seperated string is valid hash
|
||||
if not i in existingBlocks:
|
||||
# if block does not exist on disk and is not already in block queue
|
||||
if i not in comm_inst.blockQueue:
|
||||
if onionrproofs.hashMeetsDifficulty(i) and not comm_inst._core._blacklist.inBlacklist(i):
|
||||
if len(comm_inst.blockQueue) <= 1000000:
|
||||
comm_inst.blockQueue[i] = [peer] # add blocks to download queue
|
||||
new_block_count += 1
|
||||
else:
|
||||
if peer not in comm_inst.blockQueue[i]:
|
||||
if len(comm_inst.blockQueue[i]) < 10:
|
||||
comm_inst.blockQueue[i].append(peer)
|
||||
if new_block_count > 0:
|
||||
logger.info('Discovered %s new blocks' % (new_block_count,), terminal=True)
|
||||
comm_inst.decrementThreadCount('lookupBlocks')
|
||||
return
|
Loading…
Add table
Add a link
Reference in a new issue