* uploads now only remove from upload queue on 'exists' for efficiency-security trade off

fixes #26
master
Kevin Froman 2019-08-11 23:00:08 -05:00
parent 97d04440ee
commit 26b0a05d66
4 changed files with 19 additions and 10 deletions

View File

@ -44,7 +44,7 @@ def importBlockFromData(content):
try: try:
blockHash = onionrstorage.set_data(content) blockHash = onionrstorage.set_data(content)
except onionrexceptions.DiskAllocationReached: except onionrexceptions.DiskAllocationReached:
pass logger.warn('Failed to save block due to full disk allocation')
else: else:
blockmetadb.add_to_block_DB(blockHash, dataSaved=True) blockmetadb.add_to_block_DB(blockHash, dataSaved=True)
blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database

View File

@ -38,11 +38,13 @@ class UploadQueue:
''' '''
def __init__(self, communicator: 'OnionrCommunicatorDaemon'): def __init__(self, communicator: 'OnionrCommunicatorDaemon'):
'''Start the UploadQueue object, loading left over uploads into queue
and registering save shutdown function
'''
self.communicator = communicator self.communicator = communicator
cache = deadsimplekv.DeadSimpleKV(UPLOAD_MEMORY_FILE) cache = deadsimplekv.DeadSimpleKV(UPLOAD_MEMORY_FILE)
self.store_obj = cache self.store_obj = cache
cache: list = cache.get('uploads') cache: list = cache.get('uploads')
if cache == None: if cache == None:
cache = [] cache = []
@ -52,7 +54,7 @@ class UploadQueue:
atexit.register(self.save) atexit.register(self.save)
def save(self): def save(self):
'''Saves to disk on shutdown or if called manually'''
bl: list = self.communicator.blocksToUpload bl: list = self.communicator.blocksToUpload
if len(bl) > 0:
self.store_obj.put('uploads', bl) self.store_obj.put('uploads', bl)
self.store_obj.flush() self.store_obj.flush()

View File

@ -45,9 +45,13 @@ def upload_blocks_from_communicator(comm_inst):
data = {'block': block.Block(bl).getRaw()} data = {'block': block.Block(bl).getRaw()}
proxyType = proxypicker.pick_proxy(peer) proxyType = proxypicker.pick_proxy(peer)
logger.info("Uploading block to " + peer, terminal=True) logger.info("Uploading block to " + peer, terminal=True)
if not basicrequests.do_post_request(url, data=data, proxyType=proxyType) == False: resp = basicrequests.do_post_request(url, data=data, proxyType=proxyType)
if not resp == False:
if resp == 'success':
localcommand.local_command('waitforshare/' + bl, post=True) localcommand.local_command('waitforshare/' + bl, post=True)
finishedUploads.append(bl) finishedUploads.append(bl)
elif resp == 'exists':
finishedUploads.append(bl)
for x in finishedUploads: for x in finishedUploads:
try: try:
comm_inst.blocksToUpload.remove(x) comm_inst.blocksToUpload.remove(x)

View File

@ -33,10 +33,13 @@ def accept_upload(request):
if blockimporter.importBlockFromData(data): if blockimporter.importBlockFromData(data):
resp = 'success' resp = 'success'
else: else:
resp = 'failure'
logger.warn('Error encountered importing uploaded block') logger.warn('Error encountered importing uploaded block')
except onionrexceptions.BlacklistedBlock: except onionrexceptions.BlacklistedBlock:
logger.debug('uploaded block is blacklisted') logger.debug('uploaded block is blacklisted')
pass resp = 'failure'
except onionrexceptions.DataExists:
resp = 'exists'
if resp == 'failure': if resp == 'failure':
abort(400) abort(400)
resp = Response(resp) resp = Response(resp)