bug fixes
parent
1faae80aaf
commit
e346c09228
|
@ -89,7 +89,7 @@ class OnionrCommunicatorDaemon:
|
||||||
OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1)
|
OnionrCommunicatorTimers(self, self.lookupBlocks, 7, requiresPeer=True, maxThreads=1)
|
||||||
OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.getBlocks, 10, requiresPeer=True)
|
||||||
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
|
OnionrCommunicatorTimers(self, self.clearOfflinePeer, 58)
|
||||||
OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 650)
|
OnionrCommunicatorTimers(self, self.daemonTools.cleanOldBlocks, 65)
|
||||||
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.lookupKeys, 60, requiresPeer=True)
|
||||||
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
|
OnionrCommunicatorTimers(self, self.lookupAdders, 60, requiresPeer=True)
|
||||||
netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
|
netCheckTimer = OnionrCommunicatorTimers(self, self.daemonTools.netCheck, 600)
|
||||||
|
@ -152,7 +152,7 @@ class OnionrCommunicatorDaemon:
|
||||||
if not self.isOnline:
|
if not self.isOnline:
|
||||||
break
|
break
|
||||||
if self._core._utils.storageCounter.isFull():
|
if self._core._utils.storageCounter.isFull():
|
||||||
logger.warn('Not looking up new blocks due to maximum amount of allowed disk space used')
|
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
|
||||||
break
|
break
|
||||||
peer = self.pickOnlinePeer() # select random online peer
|
peer = self.pickOnlinePeer() # select random online peer
|
||||||
# if we've already tried all the online peers this time around, stop
|
# if we've already tried all the online peers this time around, stop
|
||||||
|
@ -187,6 +187,7 @@ class OnionrCommunicatorDaemon:
|
||||||
def getBlocks(self):
|
def getBlocks(self):
|
||||||
'''download new blocks in queue'''
|
'''download new blocks in queue'''
|
||||||
for blockHash in self.blockQueue:
|
for blockHash in self.blockQueue:
|
||||||
|
removeFromQueue = True
|
||||||
if self.shutdown or not self.isOnline:
|
if self.shutdown or not self.isOnline:
|
||||||
# Exit loop if shutting down or offline
|
# Exit loop if shutting down or offline
|
||||||
break
|
break
|
||||||
|
@ -198,6 +199,8 @@ class OnionrCommunicatorDaemon:
|
||||||
logger.debug('%s is already saved' % (blockHash,))
|
logger.debug('%s is already saved' % (blockHash,))
|
||||||
self.blockQueue.remove(blockHash)
|
self.blockQueue.remove(blockHash)
|
||||||
continue
|
continue
|
||||||
|
if self._core._utils.storageCounter.isFull():
|
||||||
|
break
|
||||||
self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
|
self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
|
||||||
logger.info("Attempting to download %s..." % blockHash)
|
logger.info("Attempting to download %s..." % blockHash)
|
||||||
peerUsed = self.pickOnlinePeer()
|
peerUsed = self.pickOnlinePeer()
|
||||||
|
@ -225,6 +228,7 @@ class OnionrCommunicatorDaemon:
|
||||||
self._core.setData(content)
|
self._core.setData(content)
|
||||||
except onionrexceptions.DiskAllocationReached:
|
except onionrexceptions.DiskAllocationReached:
|
||||||
logger.error("Reached disk allocation allowance, cannot save this block.")
|
logger.error("Reached disk allocation allowance, cannot save this block.")
|
||||||
|
removeFromQueue = False
|
||||||
else:
|
else:
|
||||||
self._core.addToBlockDB(blockHash, dataSaved=True)
|
self._core.addToBlockDB(blockHash, dataSaved=True)
|
||||||
self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
|
self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
|
||||||
|
@ -246,6 +250,7 @@ class OnionrCommunicatorDaemon:
|
||||||
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
|
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
|
||||||
onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50)
|
onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50)
|
||||||
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
|
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
|
||||||
|
if removeFromQueue:
|
||||||
self.blockQueue.remove(blockHash) # remove from block queue both if success or false
|
self.blockQueue.remove(blockHash) # remove from block queue both if success or false
|
||||||
self.currentDownloading.remove(blockHash)
|
self.currentDownloading.remove(blockHash)
|
||||||
self.decrementThreadCount('getBlocks')
|
self.decrementThreadCount('getBlocks')
|
||||||
|
|
|
@ -58,7 +58,7 @@ class DaemonTools:
|
||||||
def netCheck(self):
|
def netCheck(self):
|
||||||
'''Check if we are connected to the internet or not when we can't connect to any peers'''
|
'''Check if we are connected to the internet or not when we can't connect to any peers'''
|
||||||
if len(self.daemon.onlinePeers) != 0:
|
if len(self.daemon.onlinePeers) != 0:
|
||||||
if not self.daemon._core._utils.checkNetwork():
|
if not self.daemon._core._utils.checkNetwork(torPort=self.daemon.proxyPort):
|
||||||
logger.warn('Network check failed, are you connected to the internet?')
|
logger.warn('Network check failed, are you connected to the internet?')
|
||||||
self.daemon.isOnline = False
|
self.daemon.isOnline = False
|
||||||
self.daemon.decrementThreadCount('netCheck')
|
self.daemon.decrementThreadCount('netCheck')
|
||||||
|
|
|
@ -90,7 +90,11 @@ def peerCleanup(coreInst):
|
||||||
if PeerProfiles(address, coreInst).score < minScore:
|
if PeerProfiles(address, coreInst).score < minScore:
|
||||||
coreInst.removeAddress(address)
|
coreInst.removeAddress(address)
|
||||||
try:
|
try:
|
||||||
coreInst._blacklist.addToDB(address, dataType=1, expire=300)
|
if (self.coreInst._utils.getEpoch() - coreInst.getPeerInfo(address, 4)) >= 600:
|
||||||
|
expireTime = 600
|
||||||
|
else:
|
||||||
|
expireTime = 86400
|
||||||
|
coreInst._blacklist.addToDB(address, dataType=1, expire=expireTime)
|
||||||
except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue
|
except sqlite3.IntegrityError: #TODO just make sure its not a unique constraint issue
|
||||||
pass
|
pass
|
||||||
logger.warn('Removed address ' + address + '.')
|
logger.warn('Removed address ' + address + '.')
|
||||||
|
|
|
@ -631,7 +631,7 @@ class OnionrUtils:
|
||||||
pass
|
pass
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def checkNetwork(self):
|
def checkNetwork(self, torPort=0):
|
||||||
'''Check if we are connected to the internet (through Tor)'''
|
'''Check if we are connected to the internet (through Tor)'''
|
||||||
retData = False
|
retData = False
|
||||||
connectURLs = []
|
connectURLs = []
|
||||||
|
@ -640,7 +640,7 @@ class OnionrUtils:
|
||||||
connectURLs = connectTest.read().split(',')
|
connectURLs = connectTest.read().split(',')
|
||||||
|
|
||||||
for url in connectURLs:
|
for url in connectURLs:
|
||||||
if self.doGetRequest(url) != False:
|
if self.doGetRequest(url, port=torPort) != False:
|
||||||
retData = True
|
retData = True
|
||||||
break
|
break
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
|
|
|
@ -51,7 +51,7 @@
|
||||||
},
|
},
|
||||||
|
|
||||||
"allocations":{
|
"allocations":{
|
||||||
"disk": 800,
|
"disk": 10000000000,
|
||||||
"netTotal": 1000000000,
|
"netTotal": 1000000000,
|
||||||
"blockCache": 5000000,
|
"blockCache": 5000000,
|
||||||
"blockCacheTotal": 50000000
|
"blockCacheTotal": 50000000
|
||||||
|
|
Loading…
Reference in New Issue