refactoring core databases to not use core anymore

master
Kevin Froman 2019-07-17 11:58:40 -05:00
parent 000538ddc8
commit bf8a9c4f27
9 changed files with 39 additions and 52 deletions

View File

@ -19,6 +19,7 @@
''' '''
import core, onionrexceptions, logger import core, onionrexceptions, logger
from onionrutils import validatemetadata, blockmetadata from onionrutils import validatemetadata, blockmetadata
from coredb import blockmetadb
def importBlockFromData(content, coreInst): def importBlockFromData(content, coreInst):
retData = False retData = False
@ -45,7 +46,7 @@ def importBlockFromData(content, coreInst):
except onionrexceptions.DiskAllocationReached: except onionrexceptions.DiskAllocationReached:
pass pass
else: else:
coreInst.addToBlockDB(blockHash, dataSaved=True) blockmetadb.add_to_block_DB(blockHash, dataSaved=True)
blockmetadata.process_block_metadata(coreInst, blockHash) # caches block metadata values to block database blockmetadata.process_block_metadata(coreInst, blockHash) # caches block metadata values to block database
retData = True retData = True
return retData return retData

View File

@ -82,7 +82,7 @@ def download_blocks_from_communicator(comm_inst):
logger.error('Reached disk allocation allowance, cannot save block %s.' % (blockHash,)) logger.error('Reached disk allocation allowance, cannot save block %s.' % (blockHash,))
removeFromQueue = False removeFromQueue = False
else: else:
comm_inst._core.addToBlockDB(blockHash, dataSaved=True) blockmetadb.add_to_block_DB(blockHash, dataSaved=True) # add block to meta db
blockmetadata.process_block_metadata(comm_inst._core, blockHash) # caches block metadata values to block database blockmetadata.process_block_metadata(comm_inst._core, blockHash) # caches block metadata values to block database
else: else:
logger.warn('POW failed for block %s.' % (blockHash,)) logger.warn('POW failed for block %s.' % (blockHash,))

View File

@ -26,7 +26,7 @@ def clean_old_blocks(comm_inst):
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks''' '''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
# Delete expired blocks # Delete expired blocks
for bHash in comm_inst._core.getExpiredBlocks(): for bHash in blockmetadb.get_expired_blocks():
comm_inst._core._blacklist.addToDB(bHash) comm_inst._core._blacklist.addToDB(bHash)
comm_inst._core.removeBlock(bHash) comm_inst._core.removeBlock(bHash)
logger.info('Deleted block: %s' % (bHash,)) logger.info('Deleted block: %s' % (bHash,))

View File

@ -164,14 +164,6 @@ class Core:
''' '''
self.dbCreate.createBlockDB() self.dbCreate.createBlockDB()
def addToBlockDB(self, newHash, selfInsert=False, dataSaved=False):
'''
Add a hash value to the block db
Should be in hex format!
'''
coredb.blockmetadb.add.add_to_block_DB(self, newHash, selfInsert, dataSaved)
def setData(self, data): def setData(self, data):
''' '''
Set the data assciated with a hash Set the data assciated with a hash
@ -267,27 +259,6 @@ class Core:
''' '''
return coredb.keydb.transportinfo.set_address_info(self, address, key, data) return coredb.keydb.transportinfo.set_address_info(self, address, key, data)
def getExpiredBlocks(self):
'''Returns a list of expired blocks'''
return coredb.blockmetadb.expiredblocks.get_expired_blocks(self)
def updateBlockInfo(self, hash, key, data):
'''
sets info associated with a block
hash - the hash of a block
dateReceived - the date the block was recieved, not necessarily when it was created
decrypted - if we can successfully decrypt the block (does not describe its current state)
dataType - data type of the block
dataFound - if the data has been found for the block
dataSaved - if the data has been saved for the block
sig - optional signature by the author (not optional if author is specified)
author - multi-round partial sha3-256 hash of authors public key
dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is
expire - expire date for a block
'''
return coredb.blockmetadb.updateblockinfo.update_block_info(self, hash, key, data)
def insertBlock(self, data, header='txt', sign=False, encryptType='', symKey='', asymPeer='', meta = {}, expire=None, disableForward=False): def insertBlock(self, data, header='txt', sign=False, encryptType='', symKey='', asymPeer='', meta = {}, expire=None, disableForward=False):
''' '''
Inserts a block into the network Inserts a block into the network
@ -422,8 +393,8 @@ class Core:
self.daemonQueueAdd('uploadBlock', retData) self.daemonQueueAdd('uploadBlock', retData)
else: else:
pass pass
self.addToBlockDB(retData, selfInsert=True, dataSaved=True) coredb.blockmetadb.add_to_block_DB(retData, selfInsert=True, dataSaved=True)
blockmetadata.process_block_metadata(self, retData) coredb.blockmetadata.process_block_metadata(self, retData)
if retData != False: if retData != False:
if plaintextPeer == onionrvalues.DENIABLE_PEER_ADDRESS: if plaintextPeer == onionrvalues.DENIABLE_PEER_ADDRESS:

View File

@ -17,22 +17,21 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
import os, sqlite3 import os, sqlite3, secrets
from onionrutils import epoch, blockmetadata from onionrutils import epoch, blockmetadata
def add_to_block_DB(core_inst, newHash, selfInsert=False, dataSaved=False): from .. import dbfiles
def add_to_block_DB(newHash, selfInsert=False, dataSaved=False):
''' '''
Add a hash value to the block db Add a hash value to the block db
Should be in hex format! Should be in hex format!
''' '''
if not os.path.exists(core_inst.blockDB): if blockmetadata.has_block(newHash):
raise Exception('Block db does not exist')
if blockmetadata.has_block(core_inst, newHash):
return return
conn = sqlite3.connect(core_inst.blockDB, timeout=30) conn = sqlite3.connect(dbfiles.block_meta_db, timeout=30)
c = conn.cursor() c = conn.cursor()
currentTime = epoch.get_epoch() + core_inst._crypto.secrets.randbelow(301) currentTime = epoch.get_epoch() + secrets.randbelow(301)
if selfInsert or dataSaved: if selfInsert or dataSaved:
selfInsert = 1 selfInsert = 1
else: else:

View File

@ -19,9 +19,10 @@
''' '''
import sqlite3 import sqlite3
from onionrutils import epoch from onionrutils import epoch
def get_expired_blocks(core_inst): from .. import dbfiles
def get_expired_blocks():
'''Returns a list of expired blocks''' '''Returns a list of expired blocks'''
conn = sqlite3.connect(core_inst.blockDB, timeout=30) conn = sqlite3.connect(dbfiles.block_meta_db, timeout=30)
c = conn.cursor() c = conn.cursor()
date = int(epoch.get_epoch()) date = int(epoch.get_epoch())

View File

@ -17,13 +17,27 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
import sqlite3 import sqlite3
def update_block_info(core_inst, hash, key, data): from .. import dbfiles
def update_block_info(hash, key, data):
'''
sets info associated with a block
hash - the hash of a block
dateReceived - the date the block was recieved, not necessarily when it was created
decrypted - if we can successfully decrypt the block (does not describe its current state)
dataType - data type of the block
dataFound - if the data has been found for the block
dataSaved - if the data has been saved for the block
sig - optional signature by the author (not optional if author is specified)
author - multi-round partial sha3-256 hash of authors public key
dateClaimed - timestamp claimed inside the block, only as trustworthy as the block author is
expire - expire date for a block
'''
if key not in ('dateReceived', 'decrypted', 'dataType', 'dataFound', 'dataSaved', 'sig', 'author', 'dateClaimed', 'expire'): if key not in ('dateReceived', 'decrypted', 'dataType', 'dataFound', 'dataSaved', 'sig', 'author', 'dateClaimed', 'expire'):
return False return False
conn = sqlite3.connect(core_inst.blockDB, timeout=30) conn = sqlite3.connect(dbfiles.block_meta_db, timeout=30)
c = conn.cursor() c = conn.cursor()
args = (data, hash) args = (data, hash)
c.execute("UPDATE hashes SET " + key + " = ? where hash = ?;", args) c.execute("UPDATE hashes SET " + key + " = ? where hash = ?;", args)

View File

@ -23,6 +23,7 @@ from onionrusers import onionrusers
from etc import onionrvalues from etc import onionrvalues
import onionrblockapi import onionrblockapi
from . import epoch, stringvalidators, bytesconverter from . import epoch, stringvalidators, bytesconverter
from coredb import dbfiles, blockmetadb
def get_block_metadata_from_data(blockData): def get_block_metadata_from_data(blockData):
''' '''
accepts block contents as string, returns a tuple of accepts block contents as string, returns a tuple of
@ -70,7 +71,7 @@ def process_block_metadata(core_inst, blockHash):
try: try:
if len(blockType) <= 10: if len(blockType) <= 10:
core_inst.updateBlockInfo(blockHash, 'dataType', blockType) blockmetadb.update_block_info(blockHash, 'dataType', blockType)
except TypeError: except TypeError:
logger.warn("Missing block information") logger.warn("Missing block information")
pass pass
@ -81,18 +82,18 @@ def process_block_metadata(core_inst, blockHash):
except (AssertionError, ValueError, TypeError) as e: except (AssertionError, ValueError, TypeError) as e:
expireTime = onionrvalues.OnionrValues().default_expire + curTime expireTime = onionrvalues.OnionrValues().default_expire + curTime
finally: finally:
core_inst.updateBlockInfo(blockHash, 'expire', expireTime) blockmetadb.update_block_info(blockHash, 'expire', expireTime)
if not blockType is None: if not blockType is None:
core_inst.updateBlockInfo(blockHash, 'dataType', blockType) blockmetadb.update_block_info(blockHash, 'dataType', blockType)
onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid}, onionr = core_inst.onionrInst) onionrevents.event('processblocks', data = {'block': myBlock, 'type': blockType, 'signer': signer, 'validSig': valid}, onionr = core_inst.onionrInst)
else: else:
pass pass
def has_block(core_inst, hash): def has_block(hash):
''' '''
Check for new block in the list Check for new block in the list
''' '''
conn = sqlite3.connect(core_inst.blockDB) conn = sqlite3.connect(dbfiles.block_meta_db)
c = conn.cursor() c = conn.cursor()
if not stringvalidators.validate_hash(hash): if not stringvalidators.validate_hash(hash):
raise Exception("Invalid hash") raise Exception("Invalid hash")

View File

@ -40,7 +40,7 @@ def import_new_blocks(core_inst=None, scanDir=''):
with open(block, 'rb') as newBlock: with open(block, 'rb') as newBlock:
block = block.replace(scanDir, '').replace('.dat', '') block = block.replace(scanDir, '').replace('.dat', '')
if core_inst._crypto.sha3Hash(newBlock.read()) == block.replace('.dat', ''): if core_inst._crypto.sha3Hash(newBlock.read()) == block.replace('.dat', ''):
core_inst.addToBlockDB(block.replace('.dat', ''), dataSaved=True) blockmetadb.add_to_block_DB(block.replace('.dat', ''), dataSaved=True)
logger.info('Imported block %s.' % block, terminal=True) logger.info('Imported block %s.' % block, terminal=True)
blockmetadata.process_block_metadata(core_inst, block) blockmetadata.process_block_metadata(core_inst, block)
else: else: