progress in removing core

This commit is contained in:
Kevin Froman 2019-07-18 18:07:18 -05:00
parent 1775b96a04
commit dbbefafd19
22 changed files with 224 additions and 75 deletions

View file

@ -19,6 +19,7 @@
'''
import secrets
from etc import onionrvalues
import onionrblocks
def insert_deniable_block(comm_inst):
'''Insert a fake block in order to make it more difficult to track real blocks'''
fakePeer = ''
@ -27,5 +28,5 @@ def insert_deniable_block(comm_inst):
# This assumes on the libsodium primitives to have key-privacy
fakePeer = onionrvalues.DENIABLE_PEER_ADDRESS
data = secrets.token_hex(secrets.randbelow(1024) + 1)
comm_inst._core.insertBlock(data, header='pm', encryptType='asym', asymPeer=fakePeer, disableForward=True, meta={'subject': 'foo'})
onionrblocks.insert(data, header='pm', encryptType='asym', asymPeer=fakePeer, disableForward=True, meta={'subject': 'foo'})
comm_inst.decrementThreadCount('insert_deniable_block')

View file

@ -21,27 +21,28 @@ import sqlite3
import logger
from onionrusers import onionrusers
from onionrutils import epoch
from coredb import blockmetadb
from coredb import blockmetadb, dbfiles
from onionrstorage import removeblock, setdata
def clean_old_blocks(comm_inst):
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
# Delete expired blocks
for bHash in blockmetadb.get_expired_blocks():
comm_inst._core._blacklist.addToDB(bHash)
comm_inst._core.removeBlock(bHash)
comm_inst.blacklist.addToDB(bHash)
removeblock.remove_block(bHash)
logger.info('Deleted block: %s' % (bHash,))
while comm_inst._core.storage_counter.isFull():
oldest = blockmetadb.get_block_list()[0]
comm_inst._core._blacklist.addToDB(oldest)
comm_inst._core.removeBlock(oldest)
comm_inst.blacklist.addToDB(oldest)
removeblock.remove_block(oldest)
logger.info('Deleted block: %s' % (oldest,))
comm_inst.decrementThreadCount('clean_old_blocks')
def clean_keys(comm_inst):
'''Delete expired forward secrecy keys'''
conn = sqlite3.connect(comm_inst._core.peerDB, timeout=10)
conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=10)
c = conn.cursor()
time = epoch.get_epoch()
deleteKeys = []

View file

@ -20,6 +20,7 @@
import logger
from onionrutils import stringvalidators
from communicator import peeraction, onlinepeers
from utils import gettransports
def lookup_new_peer_transports_with_communicator(comm_inst):
logger.info('Looking up new addresses...')
tryAmount = 1
@ -40,7 +41,7 @@ def lookup_new_peer_transports_with_communicator(comm_inst):
invalid = []
for x in newPeers:
x = x.strip()
if not stringvalidators.validate_transport(x) or x in comm_inst.newPeers or x == comm_inst._core.hsAddress:
if not stringvalidators.validate_transport(x) or x in comm_inst.newPeers or x == gettransports.transports[0]:
# avoid adding if its our address
invalid.append(x)
for x in invalid:

View file

@ -37,7 +37,7 @@ def lookup_blocks_from_communicator(comm_inst):
if not comm_inst.isOnline:
break
# check if disk allocation is used
if comm_inst._core.storage_counter.isFull():
if comm_inst.storage_counter.isFull():
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
break
peer = onlinepeers.pick_online_peer(comm_inst) # select random online peer
@ -72,7 +72,7 @@ def lookup_blocks_from_communicator(comm_inst):
if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue
if i not in comm_inst.blockQueue:
if onionrproofs.hashMeetsDifficulty(i) and not comm_inst._core._blacklist.inBlacklist(i):
if onionrproofs.hashMeetsDifficulty(i) and not comm_inst.blacklist.inBlacklist(i):
if len(comm_inst.blockQueue) <= 1000000:
comm_inst.blockQueue[i] = [peer] # add blocks to download queue
new_block_count += 1

View file

@ -24,15 +24,14 @@ from onionrutils import localcommand, epoch
def net_check(comm_inst):
'''Check if we are connected to the internet or not when we can't connect to any peers'''
rec = False # for detecting if we have received incoming connections recently
c = comm_inst._core
if len(comm_inst.onlinePeers) == 0:
try:
if (epoch.get_epoch() - int(localcommand.local_command(c, '/lastconnect'))) <= 60:
if (epoch.get_epoch() - int(localcommand.local_command('/lastconnect'))) <= 60:
comm_inst.isOnline = True
rec = True
except ValueError:
pass
if not rec and not netutils.checkNetwork(c, torPort=comm_inst.proxyPort):
if not rec and not netutils.checkNetwork(torPort=comm_inst.proxyPort):
if not comm_inst.shutdown:
logger.warn('Network check failed, are you connected to the Internet, and is Tor working?')
comm_inst.isOnline = False

View file

@ -27,7 +27,6 @@ class OnionrCommunicatorTimers:
self.requiresPeer = requiresPeer
self.daemonInstance = daemonInstance
self.maxThreads = maxThreads
self._core = self.daemonInstance._core
self.args = myArgs
self.daemonInstance.timers.append(self)

View file

@ -22,14 +22,13 @@ from onionrutils import stringvalidators, bytesconverter
from coredb import blockmetadb
def service_creator(daemon):
assert isinstance(daemon, communicator.OnionrCommunicatorDaemon)
core = daemon._core
# Find socket connection blocks
# TODO cache blocks and only look at recently received ones
con_blocks = blockmetadb.get_blocks_by_type('con')
for b in con_blocks:
if not b in daemon.active_services:
bl = onionrblockapi.Block(b, core=core, decrypt=True)
bl = onionrblockapi.Block(b, decrypt=True)
bs = bytesconverter.bytes_to_str(bl.bcontent) + '.onion'
if stringvalidators.validate_pub_key(bl.signer) and stringvalidators.validate_transport(bs):
signer = bytesconverter.bytes_to_str(bl.signer)

View file

@ -29,8 +29,7 @@ def upload_blocks_from_communicator(comm_inst):
triedPeers = []
finishedUploads = []
core = comm_inst._core
comm_inst.blocksToUpload = core._crypto.randomShuffle(comm_inst.blocksToUpload)
comm_inst.blocksToUpload = comm_inst.crypto.randomShuffle(comm_inst.blocksToUpload)
if len(comm_inst.blocksToUpload) != 0:
for bl in comm_inst.blocksToUpload:
if not stringvalidators.validate_hash(bl):
@ -46,8 +45,8 @@ def upload_blocks_from_communicator(comm_inst):
data = {'block': block.Block(bl).getRaw()}
proxyType = proxypicker.pick_proxy(peer)
logger.info("Uploading block to " + peer, terminal=True)
if not basicrequests.do_post_request(core, url, data=data, proxyType=proxyType) == False:
localcommand.local_command(core, 'waitforshare/' + bl, post=True)
if not basicrequests.do_post_request(url, data=data, proxyType=proxyType) == False:
localcommand.local_command('waitforshare/' + bl, post=True)
finishedUploads.append(bl)
for x in finishedUploads:
try: