progress in removing core

master
Kevin Froman 2019-07-24 12:22:19 -05:00
parent 274505a51f
commit 3097407774
11 changed files with 22 additions and 17 deletions

View File

@ -20,13 +20,13 @@
import onionrexceptions, logger
from onionrutils import validatemetadata, blockmetadata
from coredb import blockmetadb
import onionrcrypto, onionrblacklist, onionrstorage
import onionrblacklist, onionrstorage
import onionrcrypto as crypto
def importBlockFromData(content):
crypto = onionrcrypto.OnionrCrypto()
blacklist = onionrblacklist.OnionrBlackList()
retData = False
dataHash = crypto.sha3Hash(content)
dataHash = crypto.hashers.sha3_hash(content)
if blacklist.inBlacklist(dataHash):
raise onionrexceptions.BlacklistedBlock('%s is a blacklisted block' % (dataHash,))
@ -39,10 +39,10 @@ def importBlockFromData(content):
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0]
if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid
if crypto.verifyPow(content): # check if POW is enough/correct
if crypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
logger.info('Block passed proof, saving.', terminal=True)
try:
blockHash = onionrstorage.setdata(content)
blockHash = onionrstorage.set_data(content)
except onionrexceptions.DiskAllocationReached:
pass
else:

View File

@ -21,6 +21,7 @@ import base64
import onionrproofs, logger
from etc import onionrvalues
from onionrutils import basicrequests, bytesconverter
from utils import gettransports
from communicator import onlinepeers
from coredb import keydb
def announce_node(daemon):
@ -43,7 +44,10 @@ def announce_node(daemon):
peer = onlinepeers.pick_online_peer(daemon)
for x in range(1):
ourID = daemon.hsAddress
try:
ourID = gettransports.get()[0]
except IndexError:
break
url = 'http://' + peer + '/announce'
data = {'node': ourID}

View File

@ -57,7 +57,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
if not config.get('tor.v3onions') and len(address) == 62:
continue
# Don't connect to our own address
if address == comm_inst.hsAddress:
if address in transports:
continue
# Don't connect to invalid address or if its already been tried/connected, or if its cooled down
if len(address) == 0 or address in tried or address in comm_inst.onlinePeers or address in comm_inst.cooldownPeer:

View File

@ -78,7 +78,7 @@ def download_blocks_from_communicator(comm_inst):
if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
logger.info('Attempting to save block %s...' % blockHash[:12])
try:
onionrstorage.setdata.set_data(content)
onionrstorage.set_data(content)
except onionrexceptions.DataExists:
logger.warn('Data is already set for %s ' % (blockHash,))
except onionrexceptions.DiskAllocationReached:

View File

@ -22,7 +22,7 @@ import logger
from onionrusers import onionrusers
from onionrutils import epoch
from coredb import blockmetadb, dbfiles
from onionrstorage import removeblock, setdata
from onionrstorage import removeblock
def clean_old_blocks(comm_inst):
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''

View File

@ -21,13 +21,12 @@ import logger
from onionrutils import stringvalidators
from communicator import peeraction, onlinepeers
from utils import gettransports
transports = gettransports.get()
def lookup_new_peer_transports_with_communicator(comm_inst):
logger.info('Looking up new addresses...')
tryAmount = 1
newPeers = []
if len(transports) == 0:
transports = list(gettransports.get())
transports = gettransports.get()
for i in range(tryAmount):
# Download new peer address list from random online peers
if len(newPeers) > 10000:

View File

@ -45,7 +45,7 @@ def upload_blocks_from_communicator(comm_inst):
data = {'block': block.Block(bl).getRaw()}
proxyType = proxypicker.pick_proxy(peer)
logger.info("Uploading block to " + peer, terminal=True)
if not basicrequests.do_post_request(url, data=data, proxyType=proxyType) == False:
if not basicrequests.do_post_request(comm_inst.onionrInst, url, data=data, proxyType=proxyType) == False:
localcommand.local_command('waitforshare/' + bl, post=True)
finishedUploads.append(bl)
for x in finishedUploads:

View File

@ -21,6 +21,7 @@ import sqlite3
from . import expiredblocks, updateblockinfo, add
from .. import dbfiles
update_block_info = updateblockinfo.update_block_info
add_to_block_DB = add.add_to_block_DB
def get_block_list(dateRec = None, unsaved = False):
'''
Get list of our blocks

View File

@ -25,14 +25,12 @@ class PublicAPISecurity:
def __init__(self, public_api):
public_api_security_bp = Blueprint('publicapisecurity', __name__)
self.public_api_security_bp = public_api_security_bp
transports = gettransports.get()
@public_api_security_bp.before_app_request
def validate_request():
'''Validate request has the correct hostname'''
# If high security level, deny requests to public (HS should be disabled anyway for Tor, but might not be for I2P)
if len(transports) == 0:
transports = list(gettransports.get())
transports = gettransports.get()
if public_api.config.get('general.security_level', default=1) > 0:
abort(403)
if request.host not in transports:

View File

@ -124,7 +124,7 @@ def insert_block(data, header='txt', sign=False, encryptType='', symKey='', asym
payload = onionrproofs.POW(metadata, data).waitForResult()
if payload != False:
try:
retData = onionrstorage.setdata.set_data(payload)
retData = onionrstorage.set_data(payload)
except onionrexceptions.DiskAllocationReached:
logger.error(allocationReachedMessage)
retData = False

View File

@ -22,8 +22,11 @@ from onionrutils import bytesconverter, stringvalidators
from coredb import dbfiles
import filepaths, onionrcrypto, dbcreator, onionrexceptions
from onionrcrypto import hashers
from . import setdata
DB_ENTRY_SIZE_LIMIT = 10000 # Will be a config option
set_data = setdata.set_data
def _dbInsert(blockHash, data):
conn = sqlite3.connect(dbfiles.block_data_db, timeout=10)
c = conn.cursor()