renamed onionr dir and bugfixes/linting progress
This commit is contained in:
parent
2b996da17f
commit
720efe4fca
226 changed files with 179 additions and 142 deletions
106
src/onionrstorage/__init__.py
Executable file
106
src/onionrstorage/__init__.py
Executable file
|
@ -0,0 +1,106 @@
|
|||
'''
|
||||
Onionr - Private P2P Communication
|
||||
|
||||
This file handles block storage, providing an abstraction for storing blocks between file system and database
|
||||
'''
|
||||
import sys
|
||||
import sqlite3
|
||||
import os
|
||||
from onionrutils import bytesconverter
|
||||
from onionrutils import stringvalidators
|
||||
from coredb import dbfiles
|
||||
import filepaths
|
||||
import onionrcrypto
|
||||
import onionrexceptions
|
||||
from onionrsetup import dbcreator
|
||||
from onionrcrypto import hashers
|
||||
from . import setdata
|
||||
'''
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
'''
|
||||
|
||||
|
||||
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
|
||||
|
||||
set_data = setdata.set_data
|
||||
|
||||
|
||||
def _dbInsert(blockHash, data):
|
||||
conn = sqlite3.connect(dbfiles.block_data_db, timeout=10)
|
||||
c = conn.cursor()
|
||||
data = (blockHash, data)
|
||||
c.execute('INSERT INTO blockData (hash, data) VALUES(?, ?);', data)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
|
||||
def _dbFetch(blockHash):
|
||||
conn = sqlite3.connect(dbfiles.block_data_db, timeout=10)
|
||||
c = conn.cursor()
|
||||
for i in c.execute('SELECT data from blockData where hash = ?', (blockHash,)):
|
||||
return i[0]
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return None
|
||||
|
||||
|
||||
def deleteBlock(blockHash):
|
||||
# You should call removeblock.remove_block if you automatically want to remove storage byte count
|
||||
if os.path.exists('%s/%s.dat' % (filepaths.block_data_location, blockHash)):
|
||||
os.remove('%s/%s.dat' % (filepaths.block_data_location, blockHash))
|
||||
return True
|
||||
conn = sqlite3.connect(dbfiles.block_data_db, timeout=10)
|
||||
c = conn.cursor()
|
||||
data = (blockHash,)
|
||||
c.execute('DELETE FROM blockData where hash = ?', data)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return True
|
||||
|
||||
|
||||
def store(data, blockHash=''):
|
||||
if not stringvalidators.validate_hash(blockHash): raise ValueError
|
||||
ourHash = hashers.sha3_hash(data)
|
||||
if blockHash != '':
|
||||
if not ourHash == blockHash:
|
||||
raise ValueError('Hash specified does not meet internal hash check')
|
||||
else:
|
||||
blockHash = ourHash
|
||||
|
||||
if DB_ENTRY_SIZE_LIMIT >= sys.getsizeof(data):
|
||||
_dbInsert(blockHash, data)
|
||||
else:
|
||||
with open('%s/%s.dat' % (filepaths.block_data_location, blockHash), 'wb') as blockFile:
|
||||
blockFile.write(data)
|
||||
|
||||
|
||||
def getData(bHash):
|
||||
if not stringvalidators.validate_hash(bHash): raise ValueError
|
||||
|
||||
bHash = bytesconverter.bytes_to_str(bHash)
|
||||
|
||||
# First check DB for data entry by hash
|
||||
# if no entry, check disk
|
||||
# If no entry in either, raise an exception
|
||||
retData = None
|
||||
fileLocation = '%s/%s.dat' % (filepaths.block_data_location, bHash)
|
||||
not_found_msg = "Flock data not found for: "
|
||||
if os.path.exists(fileLocation):
|
||||
with open(fileLocation, 'rb') as block:
|
||||
retData = block.read()
|
||||
else:
|
||||
retData = _dbFetch(bHash)
|
||||
if retData is None:
|
||||
raise onionrexceptions.NoDataAvailable(not_found_msg + str(bHash))
|
||||
return retData
|
23
src/onionrstorage/removeblock.py
Normal file
23
src/onionrstorage/removeblock.py
Normal file
|
@ -0,0 +1,23 @@
|
|||
import sys, sqlite3
|
||||
import onionrexceptions, onionrstorage
|
||||
from onionrutils import stringvalidators
|
||||
from coredb import dbfiles
|
||||
from onionrblocks import storagecounter
|
||||
def remove_block(block):
|
||||
'''
|
||||
remove a block from this node (does not automatically blacklist)
|
||||
|
||||
**You may want blacklist.addToDB(blockHash)
|
||||
'''
|
||||
|
||||
if stringvalidators.validate_hash(block):
|
||||
conn = sqlite3.connect(dbfiles.block_meta_db, timeout=30)
|
||||
c = conn.cursor()
|
||||
t = (block,)
|
||||
c.execute('Delete from hashes where hash=?;', t)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
dataSize = sys.getsizeof(onionrstorage.getData(block))
|
||||
storagecounter.StorageCounter().remove_bytes(dataSize)
|
||||
else:
|
||||
raise onionrexceptions.InvalidHexHash
|
42
src/onionrstorage/setdata.py
Normal file
42
src/onionrstorage/setdata.py
Normal file
|
@ -0,0 +1,42 @@
|
|||
import sys, sqlite3
|
||||
import onionrstorage, onionrexceptions, onionrcrypto as crypto
|
||||
import filepaths
|
||||
from onionrblocks import storagecounter
|
||||
from coredb import dbfiles
|
||||
from onionrutils import blockmetadata, bytesconverter
|
||||
def set_data(data)->str:
|
||||
'''
|
||||
Set the data assciated with a hash
|
||||
'''
|
||||
storage_counter = storagecounter.StorageCounter()
|
||||
data = data
|
||||
dataSize = sys.getsizeof(data)
|
||||
nonce_hash = crypto.hashers.sha3_hash(bytesconverter.str_to_bytes(blockmetadata.fromdata.get_block_metadata_from_data(data)[2]))
|
||||
nonce_hash = bytesconverter.bytes_to_str(nonce_hash)
|
||||
|
||||
if not type(data) is bytes:
|
||||
data = data.encode()
|
||||
|
||||
dataHash = crypto.hashers.sha3_hash(data)
|
||||
|
||||
if type(dataHash) is bytes:
|
||||
dataHash = dataHash.decode()
|
||||
blockFileName = filepaths.block_data_location + dataHash + '.dat'
|
||||
try:
|
||||
onionrstorage.getData(dataHash)
|
||||
except onionrexceptions.NoDataAvailable:
|
||||
if storage_counter.add_bytes(dataSize) != False:
|
||||
onionrstorage.store(data, blockHash=dataHash)
|
||||
conn = sqlite3.connect(dbfiles.block_meta_db, timeout=30)
|
||||
c = conn.cursor()
|
||||
c.execute("UPDATE hashes SET dataSaved=1 WHERE hash = ?;", (dataHash,))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
with open(filepaths.data_nonce_file, 'a') as nonceFile:
|
||||
nonceFile.write(nonce_hash + '\n')
|
||||
else:
|
||||
raise onionrexceptions.DiskAllocationReached
|
||||
else:
|
||||
raise onionrexceptions.DataExists("Data is already set for " + dataHash)
|
||||
|
||||
return dataHash
|
Loading…
Add table
Add a link
Reference in a new issue