renamed onionr dir and bugfixes/linting progress

This commit is contained in:
Kevin Froman 2019-11-20 04:52:50 -06:00
parent 2b996da17f
commit 720efe4fca
226 changed files with 179 additions and 142 deletions

33
src/communicatorutils/README.md Executable file
View file

@ -0,0 +1,33 @@
# communicatorutils
The files in this submodule handle various subtasks and utilities for the onionr communicator.
## Files:
announcenode.py: Uses a communicator instance to announce our transport address to connected nodes
connectnewpeers.py: takes a communicator instance and has it connect to as many peers as needed, and/or to a new specified peer.
cooldownpeer.py: randomly selects a connected peer in a communicator and disconnects them for the purpose of security and network balancing.
daemonqueuehandler.py: checks for new commands in the daemon queue and processes them accordingly.
deniableinserts.py: insert fake blocks with the communicator for plausible deniability
downloadblocks.py: iterates a communicator instance's block download queue and attempts to download the blocks from online peers
housekeeping.py: cleans old blocks and forward secrecy keys
lookupadders.py: ask connected peers to share their list of peer transport addresses
lookupblocks.py: lookup new blocks from connected peers from the communicator
netcheck.py: check if the node is online based on communicator status and onion server ping results
onionrcommunicataortimers.py: create a timer for a function to be launched on an interval. Control how many possible instances of a timer may be running a function at once and control if the timer should be ran in a thread or not.
proxypicker.py: returns a string name for the appropriate proxy to be used with a particular peer transport address.
servicecreator.py: iterate connection blocks and create new direct connection servers for them.
uploadblocks.py: iterate a communicator's upload queue and upload the blocks to connected peers

View file

View file

@ -0,0 +1,86 @@
'''
Onionr - Private P2P Communication
Use a communicator instance to announce our transport address to connected nodes
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import base64
import onionrproofs, logger
from etc import onionrvalues
from onionrutils import basicrequests, bytesconverter
from utils import gettransports
from netcontroller import NetController
from communicator import onlinepeers
from coredb import keydb
def announce_node(daemon):
'''Announce our node to our peers'''
ret_data = False
announce_fail = False
# Do not let announceCache get too large
if len(daemon.announceCache) >= 10000:
daemon.announceCache.popitem()
if daemon.config.get('general.security_level', 0) == 0:
# Announce to random online peers
for i in daemon.onlinePeers:
if not i in daemon.announceCache and not i in daemon.announceProgress:
peer = i
break
else:
peer = onlinepeers.pick_online_peer(daemon)
for x in range(1):
try:
ourID = gettransports.get()[0]
except IndexError:
break
url = 'http://' + peer + '/announce'
data = {'node': ourID}
combinedNodes = ourID + peer
if ourID != 1:
existingRand = bytesconverter.bytes_to_str(keydb.transportinfo.get_address_info(peer, 'powValue'))
# Reset existingRand if it no longer meets the minimum POW
if type(existingRand) is type(None) or not existingRand.endswith('0' * onionrvalues.ANNOUNCE_POW):
existingRand = ''
if peer in daemon.announceCache:
data['random'] = daemon.announceCache[peer]
elif len(existingRand) > 0:
data['random'] = existingRand
else:
daemon.announceProgress[peer] = True
proof = onionrproofs.DataPOW(combinedNodes, minDifficulty=onionrvalues.ANNOUNCE_POW)
del daemon.announceProgress[peer]
try:
data['random'] = base64.b64encode(proof.waitForResult()[1])
except TypeError:
# Happens when we failed to produce a proof
logger.error("Failed to produce a pow for announcing to " + peer)
announce_fail = True
else:
daemon.announceCache[peer] = data['random']
if not announce_fail:
logger.info('Announcing node to ' + url)
if basicrequests.do_post_request(url, data, port=daemon.shared_state.get(NetController).socksPort) == 'Success':
logger.info('Successfully introduced node to ' + peer, terminal=True)
ret_data = True
keydb.transportinfo.set_address_info(peer, 'introduced', 1)
keydb.transportinfo.set_address_info(peer, 'powValue', data['random'])
daemon.decrementThreadCount('announce_node')
return ret_data

View file

@ -0,0 +1,94 @@
'''
Onionr - Private P2P Communication
Connect a new peer to our communicator instance. Does so randomly if no peer is specified
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import time, sys, secrets
import onionrexceptions, logger, onionrpeers
from utils import networkmerger, gettransports
from onionrutils import stringvalidators, epoch
from communicator import peeraction, bootstrappeers
from coredb import keydb
def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
config = comm_inst.config
retData = False
tried = comm_inst.offlinePeers
transports = gettransports.get()
if peer != '':
if stringvalidators.validate_transport(peer):
peerList = [peer]
else:
raise onionrexceptions.InvalidAddress('Will not attempt connection test to invalid address')
else:
peerList = keydb.listkeys.list_adders()
mainPeerList = keydb.listkeys.list_adders()
peerList = onionrpeers.get_score_sorted_peer_list()
# If we don't have enough peers connected or random chance, select new peers to try
if len(peerList) < 8 or secrets.randbelow(4) == 3:
tryingNew = []
for x in comm_inst.newPeers:
if x not in peerList:
peerList.append(x)
tryingNew.append(x)
for i in tryingNew:
comm_inst.newPeers.remove(i)
if len(peerList) == 0 or useBootstrap:
# Avoid duplicating bootstrap addresses in peerList
if config.get('general.use_bootstrap_list', True):
bootstrappeers.add_bootstrap_list_to_peer_list(comm_inst, peerList)
for address in peerList:
address = address.strip()
if not config.get('tor.v3onions') and len(address) == 62:
continue
# Don't connect to our own address
if address in transports:
continue
# Don't connect to invalid address or if its already been tried/connected, or if its cooled down
if len(address) == 0 or address in tried or address in comm_inst.onlinePeers or address in comm_inst.cooldownPeer:
continue
if comm_inst.shutdown:
return
# Ping a peer,
ret = peeraction.peer_action(comm_inst, address, 'ping')
if ret == 'pong!':
time.sleep(0.1)
if address not in mainPeerList:
# Add a peer to our list if it isn't already since it successfully connected
networkmerger.mergeAdders(address)
if address not in comm_inst.onlinePeers:
logger.info('Connected to ' + address, terminal=True)
comm_inst.onlinePeers.append(address)
comm_inst.connectTimes[address] = epoch.get_epoch()
retData = address
# add peer to profile list if they're not in it
for profile in comm_inst.peerProfiles:
if profile.address == address:
break
else:
comm_inst.peerProfiles.append(onionrpeers.PeerProfiles(address))
break
else:
# Mark a peer as tried if they failed to respond to ping
tried.append(address)
logger.debug('Failed to connect to %s: %s ' % (address, ret))
return retData

View file

@ -0,0 +1,54 @@
'''
Onionr - Private P2P Communication
Select a random online peer in a communicator instance and have them "cool down"
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from onionrutils import epoch
from communicator import onlinepeers
def cooldown_peer(comm_inst):
'''Randomly add an online peer to cooldown, so we can connect a new one'''
config = comm_inst.config
onlinePeerAmount = len(comm_inst.onlinePeers)
minTime = 300
cooldownTime = 600
toCool = ''
tempConnectTimes = dict(comm_inst.connectTimes)
# Remove peers from cooldown that have been there long enough
tempCooldown = dict(comm_inst.cooldownPeer)
for peer in tempCooldown:
if (epoch.get_epoch() - tempCooldown[peer]) >= cooldownTime:
del comm_inst.cooldownPeer[peer]
# Cool down a peer, if we have max connections alive for long enough
if onlinePeerAmount >= config.get('peers.max_connect', 10, save = True):
finding = True
while finding:
try:
toCool = min(tempConnectTimes, key=tempConnectTimes.get)
if (epoch.get_epoch() - tempConnectTimes[toCool]) < minTime:
del tempConnectTimes[toCool]
else:
finding = False
except ValueError:
break
else:
onlinepeers.remove_online_peer(comm_inst, toCool)
comm_inst.cooldownPeer[toCool] = epoch.get_epoch()
comm_inst.decrementThreadCount('cooldown_peer')

View file

@ -0,0 +1,73 @@
'''
Onionr - P2P Anonymous Storage Network
Handle daemon queue commands in the communicator
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import logger
from onionrplugins import onionrevents as events
from onionrutils import localcommand
from coredb import daemonqueue
import filepaths
from . import restarttor
def handle_daemon_commands(comm_inst):
cmd = daemonqueue.daemon_queue()
response = ''
if cmd is not False:
events.event('daemon_command', data = {'cmd' : cmd})
if cmd[0] == 'shutdown':
comm_inst.shutdown = True
elif cmd[0] == 'runtimeTest':
comm_inst.shared_state.get_by_string("OnionrRunTestManager").run_tests()
elif cmd[0] == 'remove_from_insert_list':
try:
comm_inst.generating_blocks.remove(cmd[1])
except ValueError:
pass
elif cmd[0] == 'announceNode':
if len(comm_inst.onlinePeers) > 0:
comm_inst.announce(cmd[1])
else:
logger.debug("No nodes connected. Will not introduce node.")
elif cmd[0] == 'runCheck': # deprecated
logger.debug('Status check; looks good.')
open(filepaths.run_check_file + '.runcheck', 'w+').close()
elif cmd[0] == 'connectedPeers':
response = '\n'.join(list(comm_inst.onlinePeers)).strip()
if response == '':
response = 'none'
elif cmd[0] == 'localCommand':
response = localcommand.local_command(cmd[1])
elif cmd[0] == 'clearOffline':
comm_inst.offlinePeers = []
elif cmd[0] == 'restartTor':
restarttor.restart(comm_inst)
comm_inst.offlinePeers = []
elif cmd[0] == 'pex':
for i in comm_inst.timers:
if i.timerFunction.__name__ == 'lookupAdders':
i.count = (i.frequency - 1)
elif cmd[0] == 'uploadBlock':
comm_inst.blocksToUpload.append(cmd[1])
else:
logger.debug('Received daemon queue command unable to be handled: %s' % (cmd[0],))
if cmd[0] not in ('', None):
if response != '':
localcommand.local_command('queueResponseAdd/' + cmd[4], post=True, postData={'data': response})
response = ''
comm_inst.decrementThreadCount('handle_daemon_commands')

View file

@ -0,0 +1,32 @@
'''
Onionr - Private P2P Communication
Use the communicator to insert fake mail messages
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import secrets
from etc import onionrvalues
import onionrblocks
def insert_deniable_block(comm_inst):
'''Insert a fake block in order to make it more difficult to track real blocks'''
fakePeer = ''
chance = 10
if secrets.randbelow(chance) == (chance - 1):
# This assumes on the libsodium primitives to have key-privacy
fakePeer = onionrvalues.DENIABLE_PEER_ADDRESS
data = secrets.token_hex(secrets.randbelow(1024) + 1)
onionrblocks.insert(data, header='pm', encryptType='asym', asymPeer=fakePeer, disableForward=True, meta={'subject': 'foo'})
comm_inst.decrementThreadCount('insert_deniable_block')

View file

@ -0,0 +1,133 @@
'''
Onionr - Private P2P Communication
Download blocks using the communicator instance
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import communicator, onionrexceptions
import logger, onionrpeers
from onionrutils import blockmetadata, stringvalidators, validatemetadata
from coredb import blockmetadb
from . import shoulddownload
from communicator import peeraction, onlinepeers
import onionrcrypto, onionrstorage
from onionrblocks import onionrblacklist, storagecounter
def download_blocks_from_communicator(comm_inst):
'''Use Onionr communicator instance to download blocks in the communicator's queue'''
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
blacklist = onionrblacklist.OnionrBlackList()
storage_counter = storagecounter.StorageCounter()
LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter
count: int = 0
metadata_validation_result: bool = False
# Iterate the block queue in the communicator
for blockHash in list(comm_inst.blockQueue):
count += 1
if len(comm_inst.onlinePeers) == 0:
break
triedQueuePeers = [] # List of peers we've tried for a block
try:
blockPeers = list(comm_inst.blockQueue[blockHash])
except KeyError:
blockPeers = []
removeFromQueue = True
if not shoulddownload.should_download(comm_inst, blockHash):
continue
if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.is_full():
# Exit loop if shutting down or offline, or disk allocation reached
break
# Do not download blocks being downloaded
if blockHash in comm_inst.currentDownloading:
#logger.debug('Already downloading block %s...' % blockHash)
continue
comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
if len(blockPeers) == 0:
peerUsed = onlinepeers.pick_online_peer(comm_inst)
else:
blockPeers = onionrcrypto.cryptoutils.random_shuffle(blockPeers)
peerUsed = blockPeers.pop(0)
if not comm_inst.shutdown and peerUsed.strip() != '':
logger.info("Attempting to download %s from %s..." % (blockHash[:12], peerUsed))
content = peeraction.peer_action(comm_inst, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000) # block content from random peer (includes metadata)
if content != False and len(content) > 0:
try:
content = content.encode()
except AttributeError:
pass
realHash = onionrcrypto.hashers.sha3_hash(content)
try:
realHash = realHash.decode() # bytes on some versions for some reason
except AttributeError:
pass
if realHash == blockHash:
#content = content.decode() # decode here because sha3Hash needs bytes above
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0]
try:
metadata_validation_result = validatemetadata.validate_metadata(metadata, metas[2])
except onionrexceptions.DataExists:
metadata_validation_result = False
if metadata_validation_result: # check if metadata is valid, and verify nonce
if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
logger.info('Attempting to save block %s...' % blockHash[:12])
try:
onionrstorage.set_data(content)
except onionrexceptions.DataExists:
logger.warn('Data is already set for %s ' % (blockHash,))
except onionrexceptions.DiskAllocationReached:
logger.error('Reached disk allocation allowance, cannot save block %s.' % (blockHash,))
removeFromQueue = False
else:
blockmetadb.add_to_block_DB(blockHash, dataSaved=True) # add block to meta db
blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database
else:
logger.warn('POW failed for block %s.' % (blockHash,))
else:
if blacklist.inBlacklist(realHash):
logger.warn('Block %s is blacklisted.' % (realHash,))
else:
logger.warn('Metadata for block %s is invalid.' % (blockHash,))
blacklist.addToDB(blockHash)
else:
# if block didn't meet expected hash
tempHash = onionrcrypto.hashers.sha3_hash(content) # lazy hack, TODO use var
try:
tempHash = tempHash.decode()
except AttributeError:
pass
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
onionrpeers.PeerProfiles(peerUsed).addScore(-50)
if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
# Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
else:
removeFromQueue = False # Don't remove from queue if 404
if removeFromQueue:
try:
del comm_inst.blockQueue[blockHash] # remove from block queue both if success or false
if count == LOG_SKIP_COUNT:
logger.info('%s blocks remaining in queue' % [len(comm_inst.blockQueue)], terminal=True)
count = 0
except KeyError:
pass
comm_inst.currentDownloading.remove(blockHash)
comm_inst.decrementThreadCount('getBlocks')

View file

@ -0,0 +1,37 @@
'''
Onionr - Private P2P Communication
Check if a block should be downloaded (if we already have it or its blacklisted or not)
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from coredb import blockmetadb
from onionrblocks import onionrblacklist
def should_download(comm_inst, block_hash):
blacklist = onionrblacklist.OnionrBlackList()
ret_data = True
if block_hash in blockmetadb.get_block_list(): # Dont download block we have
ret_data = False
else:
if blacklist.inBlacklist(block_hash): # Dont download blacklisted block
ret_data = False
if ret_data is False:
# Remove block from communicator queue if it shouldnt be downloaded
try:
del comm_inst.blockQueue[block_hash]
except KeyError:
pass
return ret_data

View file

@ -0,0 +1,75 @@
'''
Onionr - Private P2P Communication
Cleanup old Onionr blocks and forward secrecy keys using the communicator. Ran from a timer usually
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import sqlite3
import logger
from onionrusers import onionrusers
from onionrutils import epoch
from coredb import blockmetadb, dbfiles
import onionrstorage
from onionrstorage import removeblock
from onionrblocks import onionrblacklist
def __remove_from_upload(comm_inst, block_hash: str):
try:
comm_inst.blocksToUpload.remove(block_hash)
except ValueError:
pass
def clean_old_blocks(comm_inst):
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
blacklist = onionrblacklist.OnionrBlackList()
# Delete expired blocks
for bHash in blockmetadb.expiredblocks.get_expired_blocks():
blacklist.addToDB(bHash)
removeblock.remove_block(bHash)
onionrstorage.deleteBlock(bHash)
__remove_from_upload(comm_inst, bHash)
logger.info('Deleted block: %s' % (bHash,))
while comm_inst.storage_counter.is_full():
oldest = blockmetadb.get_block_list()[0]
blacklist.addToDB(oldest)
removeblock.remove_block(oldest)
onionrstorage.deleteBlock(oldest)
__remove_from_upload.remove(comm_inst, oldest)
logger.info('Deleted block: %s' % (oldest,))
comm_inst.decrementThreadCount('clean_old_blocks')
def clean_keys(comm_inst):
'''Delete expired forward secrecy keys'''
conn = sqlite3.connect(dbfiles.user_id_info_db, timeout=10)
c = conn.cursor()
time = epoch.get_epoch()
deleteKeys = []
for entry in c.execute("SELECT * FROM forwardKeys WHERE expire <= ?", (time,)):
logger.debug('Forward key: %s' % entry[1])
deleteKeys.append(entry[1])
for key in deleteKeys:
logger.debug('Deleting forward key %s' % key)
c.execute("DELETE from forwardKeys where forwardKey = ?", (key,))
conn.commit()
conn.close()
onionrusers.deleteExpiredKeys()
comm_inst.decrementThreadCount('clean_keys')

View file

@ -0,0 +1,55 @@
'''
Onionr - Private P2P Communication
Lookup new peer transport addresses using the communicator
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import logger
from onionrutils import stringvalidators
from communicator import peeraction, onlinepeers
from utils import gettransports
def lookup_new_peer_transports_with_communicator(comm_inst):
logger.info('Looking up new addresses...')
tryAmount = 1
newPeers = []
transports = gettransports.get()
for i in range(tryAmount):
# Download new peer address list from random online peers
if len(newPeers) > 10000:
# Don't get new peers if we have too many queued up
break
peer = onlinepeers.pick_online_peer(comm_inst)
newAdders = peeraction.peer_action(comm_inst, peer, action='pex')
try:
newPeers = newAdders.split(',')
except AttributeError:
pass
else:
# Validate new peers are good format and not already in queue
invalid = []
for x in newPeers:
x = x.strip()
if not stringvalidators.validate_transport(x) or x in comm_inst.newPeers or x in transports:
# avoid adding if its our address
invalid.append(x)
for x in invalid:
try:
newPeers.remove(x)
except ValueError:
pass
comm_inst.newPeers.extend(newPeers)
comm_inst.decrementThreadCount('lookup_new_peer_transports_with_communicator')

View file

@ -0,0 +1,94 @@
'''
Onionr - Private P2P Communication
Lookup new blocks with the communicator using a random connected peer
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import logger, onionrproofs
from onionrutils import stringvalidators, epoch
from communicator import peeraction, onlinepeers
from coredb import blockmetadb
from utils import reconstructhash
from onionrblocks import onionrblacklist
blacklist = onionrblacklist.OnionrBlackList()
def lookup_blocks_from_communicator(comm_inst):
logger.info('Looking up new blocks')
tryAmount = 2
newBlocks = ''
existingBlocks = blockmetadb.get_block_list() # List of existing saved blocks
triedPeers = [] # list of peers we've tried this time around
maxBacklog = 1560 # Max amount of *new* block hashes to have already in queue, to avoid memory exhaustion
lastLookupTime = 0 # Last time we looked up a particular peer's list
new_block_count = 0
for i in range(tryAmount):
listLookupCommand = 'getblocklist' # This is defined here to reset it each time
if len(comm_inst.blockQueue) >= maxBacklog:
break
if not comm_inst.isOnline:
break
# check if disk allocation is used
if comm_inst.storage_counter.is_full():
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
break
peer = onlinepeers.pick_online_peer(comm_inst) # select random online peer
# if we've already tried all the online peers this time around, stop
if peer in triedPeers:
if len(comm_inst.onlinePeers) == len(triedPeers):
break
else:
continue
triedPeers.append(peer)
# Get the last time we looked up a peer's stamp to only fetch blocks since then.
# Saved in memory only for privacy reasons
try:
lastLookupTime = comm_inst.dbTimestamps[peer]
except KeyError:
lastLookupTime = 0
else:
listLookupCommand += '?date=%s' % (lastLookupTime,)
try:
newBlocks = peeraction.peer_action(comm_inst, peer, listLookupCommand) # get list of new block hashes
except Exception as error:
logger.warn('Could not get new blocks from %s.' % peer, error = error)
newBlocks = False
else:
comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60)
if newBlocks != False:
# if request was a success
for i in newBlocks.split('\n'):
if stringvalidators.validate_hash(i):
i = reconstructhash.reconstruct_hash(i)
# if newline seperated string is valid hash
if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue
if i not in comm_inst.blockQueue:
if onionrproofs.hashMeetsDifficulty(i) and not blacklist.inBlacklist(i):
if len(comm_inst.blockQueue) <= 1000000:
comm_inst.blockQueue[i] = [peer] # add blocks to download queue
new_block_count += 1
else:
if peer not in comm_inst.blockQueue[i]:
if len(comm_inst.blockQueue[i]) < 10:
comm_inst.blockQueue[i].append(peer)
if new_block_count > 0:
block_string = ""
if new_block_count > 1:
block_string = "s"
logger.info('Discovered %s new block%s' % (new_block_count, block_string), terminal=True)
comm_inst.download_blocks_timer.count = int(comm_inst.download_blocks_timer.frequency * 0.99)
comm_inst.decrementThreadCount('lookup_blocks_from_communicator')
return

View file

@ -0,0 +1,43 @@
'''
Onionr - Private P2P Communication
Determine if our node is able to use Tor based on the status of a communicator instance
and the result of pinging onion http servers
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import logger
from utils import netutils
from onionrutils import localcommand, epoch
from . import restarttor
def net_check(comm_inst):
'''Check if we are connected to the internet or not when we can't connect to any peers'''
rec = False # for detecting if we have received incoming connections recently
if len(comm_inst.onlinePeers) == 0:
try:
if (epoch.get_epoch() - int(localcommand.local_command('/lastconnect'))) <= 60:
comm_inst.isOnline = True
rec = True
except ValueError:
pass
if not rec and not netutils.checkNetwork(torPort=comm_inst.proxyPort):
if not comm_inst.shutdown:
logger.warn('Network check failed, are you connected to the Internet, and is Tor working?', terminal=True)
restarttor.restart(comm_inst)
comm_inst.offlinePeers = []
comm_inst.isOnline = False
else:
comm_inst.isOnline = True
comm_inst.decrementThreadCount('net_check')

View file

@ -0,0 +1,79 @@
'''
Onionr - Private P2P Communication
This file contains timer control for the communicator
'''
from __future__ import annotations # thank you python, very cool
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import uuid
import threading
import onionrexceptions, logger
from typing import TYPE_CHECKING
from typing import Callable, NewType, Iterable
if TYPE_CHECKING:
from communicator import OnionrCommunicatorDaemon
CallFreqSeconds = NewType('CallFreqSeconds', int)
class OnionrCommunicatorTimers:
def __init__(self, daemon_inst: OnionrCommunicatorDaemon,
timer_function: Callable, frequency: CallFreqSeconds,
make_thread:bool=True, thread_amount:int=1, max_threads:int=5,
requires_peer:bool=False, my_args:Iterable=[]):
self.timer_function = timer_function
self.frequency = frequency
self.thread_amount = thread_amount
self.make_thread = make_thread
self.requires_peer = requires_peer
self.daemon_inst = daemon_inst
self.max_threads = max_threads
self.args = my_args
self.daemon_inst.timers.append(self)
self.count = 0
def processTimer(self):
# mark how many instances of a thread we have (decremented at thread end)
try:
self.daemon_inst.threadCounts[self.timer_function.__name__]
except KeyError:
self.daemon_inst.threadCounts[self.timer_function.__name__] = 0
# execute thread if it is time, and we are not missing *required* online peer
if self.count == self.frequency and not self.daemon_inst.shutdown:
try:
if self.requires_peer and len(self.daemon_inst.onlinePeers) == 0:
raise onionrexceptions.OnlinePeerNeeded
except onionrexceptions.OnlinePeerNeeded:
return
else:
if self.make_thread:
for i in range(self.thread_amount):
if self.daemon_inst.threadCounts[self.timer_function.__name__] >= self.max_threads:
logger.debug('%s is currently using the maximum number of threads, not starting another.' % self.timer_function.__name__)
else:
self.daemon_inst.threadCounts[self.timer_function.__name__] += 1
newThread = threading.Thread(target=self.timer_function, args=self.args, daemon=True,
name=self.timer_function.__name__ + ' - ' + str(uuid.uuid4()))
newThread.start()
else:
self.timer_function()
self.count = -1 # negative 1 because its incremented at bottom
self.count += 1

View file

@ -0,0 +1,26 @@
'''
Onionr - Private P2P Communication
Just picks a proxy to use based on a peer's address
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
def pick_proxy(peer_address):
if peer_address.endswith('.onion'):
return 'tor'
elif peer_address.endswith('.i2p'):
return 'i2p'
raise ValueError(f"Peer address was not string ending with acceptable value: {peer_address}")

View file

@ -0,0 +1,5 @@
import netcontroller
def restart(comm_inst):
net = comm_inst.shared_state.get(netcontroller.NetController)
net.killTor()
net.startTor()

View file

@ -0,0 +1,45 @@
'''
Onionr - Private P2P Communication
Creates an onionr direct connection service by scanning all connection blocks
'''
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import communicator
from onionrblocks import onionrblockapi
import logger
from onionrutils import stringvalidators, bytesconverter
from coredb import blockmetadb
from onionrservices import server_exists
def service_creator(daemon):
assert isinstance(daemon, communicator.OnionrCommunicatorDaemon)
# Find socket connection blocks
# TODO cache blocks and only look at recently received ones
con_blocks = blockmetadb.get_blocks_by_type('con')
for b in con_blocks:
if not b in daemon.active_services:
bl = onionrblockapi.Block(b, decrypt=True)
bs = bytesconverter.bytes_to_str(bl.bcontent) + '.onion'
if server_exists(bl.signer):
continue
if stringvalidators.validate_pub_key(bl.signer) and stringvalidators.validate_transport(bs):
signer = bytesconverter.bytes_to_str(bl.signer)
daemon.active_services.append(b)
daemon.active_services.append(signer)
if not daemon.services.create_server(signer, bs, daemon):
daemon.active_services.remove(b)
daemon.active_services.remove(signer)
daemon.decrementThreadCount('service_creator')

View file

@ -0,0 +1,90 @@
'''
Onionr - Private P2P Communication
Upload blocks in the upload queue to peers from the communicator
'''
from __future__ import annotations
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from typing import Union, TYPE_CHECKING
import logger
from communicatorutils import proxypicker
import onionrexceptions
from onionrblocks import onionrblockapi as block
from onionrutils import localcommand, stringvalidators, basicrequests
from communicator import onlinepeers
import onionrcrypto
from . import sessionmanager
def upload_blocks_from_communicator(comm_inst: OnionrCommunicatorDaemon):
"""Accepts a communicator instance and uploads blocks from its upload queue"""
"""when inserting a block, we try to upload
it to a few peers to add some deniability & increase functionality"""
TIMER_NAME = "upload_blocks_from_communicator"
session_manager: sessionmanager.BlockUploadSessionManager = comm_inst.shared_state.get(sessionmanager.BlockUploadSessionManager)
triedPeers = []
finishedUploads = []
comm_inst.blocksToUpload = onionrcrypto.cryptoutils.random_shuffle(comm_inst.blocksToUpload)
if len(comm_inst.blocksToUpload) != 0:
for bl in comm_inst.blocksToUpload:
if not stringvalidators.validate_hash(bl):
logger.warn('Requested to upload invalid block', terminal=True)
comm_inst.decrementThreadCount(TIMER_NAME)
return
session = session_manager.add_session(bl)
for i in range(min(len(comm_inst.onlinePeers), 6)):
peer = onlinepeers.pick_online_peer(comm_inst)
try:
session.peer_exists[peer]
continue
except KeyError:
pass
try:
if session.peer_fails[peer] > 3: continue
except KeyError:
pass
if peer in triedPeers: continue
triedPeers.append(peer)
url = f'http://{peer}/upload'
try:
data = block.Block(bl).getRaw()
except onionrexceptions.NoDataAvailable:
finishedUploads.append(bl)
break
proxyType = proxypicker.pick_proxy(peer)
logger.info(f"Uploading block {bl[:8]} to {peer}", terminal=True)
resp = basicrequests.do_post_request(url, data=data, proxyType=proxyType, content_type='application/octet-stream')
if not resp == False:
if resp == 'success':
session.success()
session.peer_exists[peer] = True
elif resp == 'exists':
session.success()
session.peer_exists[peer] = True
else:
session.fail()
session.fail_peer(peer)
comm_inst.getPeerProfileInstance(peer).addScore(-5)
logger.warn(f'Failed to upload {bl[:8]}, reason: {resp[:15]}', terminal=True)
else:
session.fail()
session_manager.clean_session()
for x in finishedUploads:
try:
comm_inst.blocksToUpload.remove(x)
except ValueError:
pass
comm_inst.decrementThreadCount(TIMER_NAME)

View file

@ -0,0 +1,54 @@
"""
Onionr - Private P2P Communication
Virtual upload "sessions" for blocks
"""
from __future__ import annotations
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from typing import Union
from onionrutils import stringvalidators
from onionrutils import bytesconverter
from onionrutils import epoch
from utils import reconstructhash
class UploadSession:
"""Manages statistics for an Onionr block upload session
accepting a block hash (incl. unpadded) as an argument"""
def __init__(self, block_hash: Union[str, bytes]):
block_hash = bytesconverter.bytes_to_str(block_hash)
block_hash = reconstructhash.reconstruct_hash(block_hash)
if not stringvalidators.validate_hash(block_hash): raise ValueError
self.start_time = epoch.get_epoch()
self.block_hash = reconstructhash.deconstruct_hash(block_hash)
self.total_fail_count: int = 0
self.total_success_count: int = 0
self.peer_fails = {}
self.peer_exists = {}
def fail_peer(self, peer):
try:
self.peer_fails[peer] += 1
except KeyError:
self.peer_fails[peer] = 0
def fail(self):
self.total_fail_count += 1
def success(self):
self.total_success_count += 1

View file

@ -0,0 +1,95 @@
"""
Onionr - Private P2P Communication
Manager for upload 'sessions'
"""
from __future__ import annotations
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from typing import Iterable, Union
from onionrutils import bytesconverter
from onionrutils import localcommand
from etc import onionrvalues
from etc import waitforsetvar
from utils import reconstructhash
from . import session
class BlockUploadSessionManager:
"""Holds block UploadSession instances. Optionally accepts iterable of sessions to added on init
Arguments: old_session: iterable of old UploadSession objects"""
def __init__(self, old_sessions:Iterable=None):
#self._too_many: TooMany = None
if old_sessions is None:
self.sessions = []
else:
self.sessions = old_session
def add_session(self, session_or_block: Union(str, bytes, session.UploadSession))->session.UploadSession:
"""Create (or add existing) block upload session from a str/bytes block hex hash, existing UploadSession"""
if isinstance(session_or_block, session.UploadSession):
if not session_or_block in self.sessions:
self.sessions.append(session_or_block)
return session_or_block
try:
return self.get_session(session_or_block)
except KeyError:
pass
# convert bytes hash to str
if isinstance(session_or_block, bytes): session_or_block = bytesconverter.bytes_to_str(session_or_block)
# intentionally not elif
if isinstance(session_or_block, str):
new_session = session.UploadSession(session_or_block)
self.sessions.append(new_session)
return new_session
def get_session(self, block_hash: Union(str, bytes))->session.UploadSession:
block_hash = reconstructhash.deconstruct_hash(bytesconverter.bytes_to_str(block_hash))
for session in self.sessions:
if session.block_hash == block_hash: return session
raise KeyError
def clean_session(self, specific_session: Union[str, UploadSession]=None):
comm_inst: OnionrCommunicatorDaemon = self._too_many.get_by_string("OnionrCommunicatorDaemon")
sessions_to_delete = []
if comm_inst.getUptime() < 120: return
onlinePeerCount = len(comm_inst.onlinePeers)
# If we have no online peers right now,
if onlinePeerCount == 0: return
for session in self.sessions:
# if over 50% of peers that were online for a session have become unavailable, don't kill sessions
if session.total_success_count > onlinePeerCount:
if onlinePeerCount / session.total_success_count >= 0.5: return
# Clean sessions if they have uploaded to enough online peers
if session.total_success_count <= 0: continue
if (session.total_success_count / onlinePeerCount) >= onionrvalues.MIN_BLOCK_UPLOAD_PEER_PERCENT:
sessions_to_delete.append(session)
for session in sessions_to_delete:
self.sessions.remove(session)
# TODO cleanup to one round of search
# Remove the blocks from the sessions, upload list, and waitforshare list
try:
comm_inst.blocksToUpload.remove(reconstructhash.reconstruct_hash(session.block_hash))
except ValueError:
pass
try:
comm_inst.blocksToUpload.remove(session.block_hash)
except ValueError:
pass
localcommand.local_command('waitforshare/{session.block_hash}')