* do not save blocks if disk allocation reached

* improved some commenting
* bug fixes
master
Kevin Froman 2018-08-22 23:59:41 -05:00
parent 53577a4c10
commit dd5cb99155
No known key found for this signature in database
GPG Key ID: 0D414D0FE405B63B
5 changed files with 47 additions and 24 deletions

View File

@ -39,7 +39,11 @@ def importBlockFromData(content, coreInst):
if coreInst._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid
if coreInst._crypto.verifyPow(content): # check if POW is enough/correct
logger.info('Block passed proof, saving.')
try:
blockHash = coreInst.setData(content)
except onionrexceptions.DiskAllocationReached:
pass
else:
coreInst.addToBlockDB(blockHash, dataSaved=True)
coreInst._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
retData = True

View File

@ -117,14 +117,14 @@ class OnionrCommunicatorDaemon:
pass
logger.info('Goodbye.')
self._core._utils.localCommand('shutdown')
self._core._utils.localCommand('shutdown') # shutdown the api
time.sleep(0.5)
def lookupKeys(self):
'''Lookup new keys'''
logger.debug('Looking up new keys...')
tryAmount = 1
for i in range(tryAmount):
for i in range(tryAmount): # amount of times to ask peers for new keys
# Download new key list from random online peers
peer = self.pickOnlinePeer()
newKeys = self.peerAction(peer, action='kex')
@ -151,6 +151,10 @@ class OnionrCommunicatorDaemon:
existingBlocks = self._core.getBlockList()
triedPeers = [] # list of peers we've tried this time around
for i in range(tryAmount):
# check if disk allocation is used
if self._core._utils.storageCounter.isFull():
logger.warn('Not looking up new blocks due to maximum amount of allowed disk space used')
break
peer = self.pickOnlinePeer() # select random online peer
# if we've already tried all the online peers this time around, stop
if peer in triedPeers:
@ -165,7 +169,7 @@ class OnionrCommunicatorDaemon:
if newDBHash != self._core.getAddressInfo(peer, 'DBHash'):
self._core.setAddressInfo(peer, 'DBHash', newDBHash)
try:
newBlocks = self.peerAction(peer, 'getBlockHashes')
newBlocks = self.peerAction(peer, 'getBlockHashes') # get list of new block hashes
except Exception as error:
logger.warn("could not get new blocks with " + peer, error=error)
newBlocks = False
@ -177,7 +181,7 @@ class OnionrCommunicatorDaemon:
if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue
if i not in self.blockQueue and not self._core._blacklist.inBlacklist(i):
self.blockQueue.append(i)
self.blockQueue.append(i) # add blocks to download queue
self.decrementThreadCount('lookupBlocks')
return
@ -185,7 +189,9 @@ class OnionrCommunicatorDaemon:
'''download new blocks in queue'''
for blockHash in self.blockQueue:
if self.shutdown:
# Exit loop if shutting down
break
# Do not download blocks being downloaded or that are already saved (edge cases)
if blockHash in self.currentDownloading:
logger.debug('ALREADY DOWNLOADING ' + blockHash)
continue
@ -193,7 +199,7 @@ class OnionrCommunicatorDaemon:
logger.debug('%s is already saved' % (blockHash,))
self.blockQueue.remove(blockHash)
continue
self.currentDownloading.append(blockHash)
self.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
logger.info("Attempting to download %s..." % blockHash)
peerUsed = self.pickOnlinePeer()
content = self.peerAction(peerUsed, 'getData', data=blockHash) # block content from random peer (includes metadata)
@ -216,7 +222,11 @@ class OnionrCommunicatorDaemon:
if self._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
if self._core._crypto.verifyPow(content): # check if POW is enough/correct
logger.info('Block passed proof, saving.')
try:
self._core.setData(content)
except onionrexceptions.DiskAllocationReached:
logger.error("Reached disk allocation allowance, cannot save additional blocks.")
else:
self._core.addToBlockDB(blockHash, dataSaved=True)
self._core._utils.processBlockMetadata(blockHash) # caches block metadata values to block database
else:

View File

@ -50,6 +50,7 @@ class Core:
self.dbCreate = dbcreator.DBCreator(self)
self.usageFile = 'data/disk-usage.txt'
self.config = config
if not os.path.exists('data/'):
os.mkdir('data/')
@ -256,6 +257,8 @@ class Core:
Set the data assciated with a hash
'''
data = data
dataSize = sys.getsizeof(data)
if not type(data) is bytes:
data = data.encode()
@ -268,15 +271,17 @@ class Core:
pass # TODO: properly check if block is already saved elsewhere
#raise Exception("Data is already set for " + dataHash)
else:
if self._utils.storageCounter.addBytes(dataSize) != False:
blockFile = open(blockFileName, 'wb')
blockFile.write(data)
blockFile.close()
conn = sqlite3.connect(self.blockDB)
c = conn.cursor()
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = '" + dataHash + "';")
conn.commit()
conn.close()
else:
raise onionrexceptions.DiskAllocationReached
return dataHash

View File

@ -58,3 +58,8 @@ class MissingPort(Exception):
class InvalidAddress(Exception):
pass
# file exceptions
class DiskAllocationReached:
pass

View File

@ -23,7 +23,7 @@ import nacl.signing, nacl.encoding
from onionrblockapi import Block
import onionrexceptions
from defusedxml import minidom
import pgpwords
import pgpwords, storagecounter
if sys.version_info < (3, 6):
try:
import sha3
@ -40,9 +40,9 @@ class OnionrUtils:
self._core = coreInstance
self.timingToken = ''
self.avoidDupe = [] # list used to prevent duplicate requests per peer for certain actions
self.peerProcessing = {} # dict of current peer actions: peer, actionList
self.storageCounter = storagecounter.StorageCounter(self._core)
config.reload()
return
@ -647,7 +647,6 @@ class OnionrUtils:
if self.doGetRequest(url) != False:
retData = True
break
except FileNotFoundError:
pass
return retData