dbstorage largely complete

master
Kevin Froman 2019-01-05 16:16:36 -06:00
parent 7eddb0a879
commit 84fdb23b1c
6 changed files with 49 additions and 28 deletions

View File

@ -273,7 +273,9 @@ class OnionrCommunicatorDaemon:
pass
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
onionrpeers.PeerProfiles(peerUsed, self._core).addScore(-50)
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
# Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
if removeFromQueue:
try:
self.blockQueue.remove(blockHash) # remove from block queue both if success or false

View File

@ -22,7 +22,7 @@ from onionrblockapi import Block
import onionrutils, onionrcrypto, onionrproofs, onionrevents as events, onionrexceptions, onionrvalues
import onionrblacklist, onionrchat, onionrusers
import dbcreator
import dbcreator, onionrstorage
if sys.version_info < (3, 6):
try:
@ -278,6 +278,7 @@ class Core:
Simply return the data associated to a hash
'''
'''
try:
# logger.debug('Opening %s' % (str(self.blockDataLocation) + str(hash) + '.dat'))
dataFile = open(self.blockDataLocation + hash + '.dat', 'rb')
@ -285,6 +286,8 @@ class Core:
dataFile.close()
except FileNotFoundError:
data = False
'''
data = onionrstorage.getData(self, hash)
return data
@ -309,9 +312,10 @@ class Core:
#raise Exception("Data is already set for " + dataHash)
else:
if self._utils.storageCounter.addBytes(dataSize) != False:
blockFile = open(blockFileName, 'wb')
blockFile.write(data)
blockFile.close()
#blockFile = open(blockFileName, 'wb')
#blockFile.write(data)
#blockFile.close()
onionrstorage.store(self, data, blockHash=dataHash)
conn = sqlite3.connect(self.blockDB, timeout=10)
c = conn.cursor()
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = ?;", (dataHash,))

View File

@ -92,11 +92,11 @@ class DBCreator:
expire int - block expire date in epoch
'''
if os.path.exists(self.core.blockDB):
raise Exception("Block database already exists")
raise FileExistsError("Block database already exists")
conn = sqlite3.connect(self.core.blockDB)
c = conn.cursor()
c.execute('''CREATE TABLE hashes(
hash text distinct not null,
hash text not null,
dateReceived int,
decrypted int,
dataType text,
@ -114,11 +114,11 @@ class DBCreator:
def createBlockDataDB(self):
if os.path.exists(self.core.blockDataDB):
raise Exception("Block data database already exists")
raise FileExistsError("Block data database already exists")
conn = sqlite3.connect(self.core.blockDataDB)
c = conn.cursor()
c.execute('''CREATE TABLE blockData(
hash text distinct not null,
hash text not null,
data blob not null
);
''')
@ -130,7 +130,7 @@ class DBCreator:
Create the forward secrecy key db (*for *OUR* keys*)
'''
if os.path.exists(self.core.forwardKeysFile):
raise Exception("Block database already exists")
raise FileExistsError("Block database already exists")
conn = sqlite3.connect(self.core.forwardKeysFile)
c = conn.cursor()
c.execute('''CREATE TABLE myForwardKeys(

View File

@ -19,7 +19,7 @@
'''
import core as onionrcore, logger, config, onionrexceptions, nacl.exceptions, onionrusers
import json, os, sys, datetime, base64
import json, os, sys, datetime, base64, onionrstorage
class Block:
blockCacheOrder = list() # NEVER write your own code that writes to this!
@ -164,13 +164,13 @@ class Block:
filelocation = self.core.dataDir + 'blocks/%s.dat' % self.getHash()
if readfile:
with open(filelocation, 'rb') as f:
blockdata = f.read().decode()
blockdata = onionrstorage.getData(self.core, self.getHash()).decode()
#with open(filelocation, 'rb') as f:
#blockdata = f.read().decode()
self.blockFile = filelocation
else:
self.blockFile = None
# parse block
self.raw = str(blockdata)
self.bheader = json.loads(self.getRaw()[:self.getRaw().index('\n')])
@ -241,8 +241,9 @@ class Block:
try:
if self.isValid() is True:
if (not self.getBlockFile() is None) and (recreate is True):
with open(self.getBlockFile(), 'wb') as blockFile:
blockFile.write(self.getRaw().encode())
onionrstorage.store(self.core, self.getRaw().encode())
#with open(self.getBlockFile(), 'wb') as blockFile:
# blockFile.write(self.getRaw().encode())
else:
self.hash = self.getCore().insertBlock(self.getContent(), header = self.getType(), sign = sign, meta = self.getMetadata(), expire = self.getExpire())
if self.hash != False:

View File

@ -17,12 +17,19 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import core, sys, sqlite3, os
import core, sys, sqlite3, os, dbcreator
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
def dbCreate(coreInst):
try:
dbcreator.DBCreator(coreInst).createBlockDataDB()
except FileExistsError:
pass
def _dbInsert(coreInst, blockHash, data):
assert isinstance(core, core.Core)
assert isinstance(coreInst, core.Core)
dbCreate(coreInst)
conn = sqlite3.connect(coreInst.blockDataDB, timeout=10)
c = conn.cursor()
data = (blockHash, data)
@ -31,6 +38,8 @@ def _dbInsert(coreInst, blockHash, data):
conn.close()
def _dbFetch(coreInst, blockHash):
assert isinstance(coreInst, core.Core)
dbCreate(coreInst)
conn = sqlite3.connect(coreInst.blockDataDB, timeout=10)
c = conn.cursor()
for i in c.execute('SELECT data from blockData where hash = ?', (blockHash,)):
@ -39,20 +48,24 @@ def _dbFetch(coreInst, blockHash):
conn.close()
return None
def store(coreInst, blockHash, data):
def store(coreInst, data, blockHash=''):
assert isinstance(coreInst, core.Core)
assert self._core._utils.validateHash(blockHash)
assert self._core._crypto.sha3Hash(data) == blockHash
assert coreInst._utils.validateHash(blockHash)
ourHash = coreInst._crypto.sha3Hash(data)
if blockHash != '':
assert ourHash == blockHash
else:
blockHash = ourHash
if DB_ENTRY_SIZE_LIMIT >= sys.getsizeof(data):
_dbInsert(coreInst, blockHash, data)
else:
with open('%s/%s.dat' % (coreInst.blockDataLocation, blockHash), 'w') as blockFile:
with open('%s/%s.dat' % (coreInst.blockDataLocation, blockHash), 'wb') as blockFile:
blockFile.write(data)
def getData(coreInst, bHash):
assert isinstance(coreInst, core.Core)
assert self._core._utils.validateHash(blockHash)
assert coreInst._utils.validateHash(bHash)
# First check DB for data entry by hash
# if no entry, check disk
@ -60,8 +73,8 @@ def getData(coreInst, bHash):
retData = ''
fileLocation = '%s/%s.dat' % (coreInst.blockDataLocation, bHash)
if os.path.exists(fileLocation):
with open(fileLocation, 'r') as block:
with open(fileLocation, 'rb') as block:
retData = block.read()
else:
retData = _dbFetch(coreInst, bHash)
return
return retData

View File

@ -54,9 +54,10 @@ class OnionrFlow:
self.flowRunning = False
expireTime = self.myCore._utils.getEpoch() + 43200
if len(message) > 0:
insertBL = Block(content = message, type = 'txt', expire=expireTime, core = self.myCore)
insertBL.setMetadata('ch', self.channel)
insertBL.save()
self.myCore.insertBlock(message, header='txt', expire=expireTime)
#insertBL = Block(content = message, type = 'txt', expire=expireTime, core = self.myCore)
#insertBL.setMetadata('ch', self.channel)
#insertBL.save()
logger.info("Flow is exiting, goodbye")
return