progress in removing core

master
Kevin Froman 2019-07-18 12:40:48 -05:00
parent e69c8dbb60
commit 1775b96a04
24 changed files with 187 additions and 155 deletions

View File

@ -20,9 +20,8 @@
import base64, os import base64, os
import flask import flask
from gevent.pywsgi import WSGIServer from gevent.pywsgi import WSGIServer
import logger
from onionrutils import epoch from onionrutils import epoch
import httpapi import httpapi, filepaths, logger
from . import register_private_blueprints from . import register_private_blueprints
class PrivateAPI: class PrivateAPI:
''' '''
@ -41,9 +40,8 @@ class PrivateAPI:
config = onionrInst.config config = onionrInst.config
self.config = config self.config = config
self.debug = debug self.debug = debug
self._core = onionrInst.onionrCore
self.startTime = epoch.get_epoch() self.startTime = epoch.get_epoch()
self._crypto = self._core._crypto self._crypto = onionrInst.onionrCrypto
app = flask.Flask(__name__) app = flask.Flask(__name__)
bindPort = int(config.get('client.client.port', 59496)) bindPort = int(config.get('client.client.port', 59496))
self.bindPort = bindPort self.bindPort = bindPort
@ -53,7 +51,7 @@ class PrivateAPI:
self.publicAPI = None # gets set when the thread calls our setter... bad hack but kinda necessary with flask self.publicAPI = None # gets set when the thread calls our setter... bad hack but kinda necessary with flask
#threading.Thread(target=PublicAPI, args=(self,)).start() #threading.Thread(target=PublicAPI, args=(self,)).start()
self.host = httpapi.apiutils.setbindip.set_bind_IP(self._core.privateApiHostFile, self._core) self.host = httpapi.apiutils.setbindip.set_bind_IP(filepaths.private_API_host_file)
logger.info('Running api on %s:%s' % (self.host, self.bindPort)) logger.info('Running api on %s:%s' % (self.host, self.bindPort))
self.httpServer = '' self.httpServer = ''

View File

@ -21,7 +21,8 @@ import time
import flask import flask
from gevent.pywsgi import WSGIServer from gevent.pywsgi import WSGIServer
from httpapi import apiutils, security, fdsafehandler, miscpublicapi from httpapi import apiutils, security, fdsafehandler, miscpublicapi
import logger, onionr import logger, onionr, filepaths
from utils import gettransports
class PublicAPI: class PublicAPI:
''' '''
The new client api server, isolated from the public api The new client api server, isolated from the public api
@ -32,9 +33,8 @@ class PublicAPI:
app.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024 app.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024
self.i2pEnabled = config.get('i2p.host', False) self.i2pEnabled = config.get('i2p.host', False)
self.hideBlocks = [] # Blocks to be denied sharing self.hideBlocks = [] # Blocks to be denied sharing
self.host = apiutils.setbindip.set_bind_IP(clientAPI._core.publicApiHostFile, clientAPI._core) self.host = apiutils.setbindip.set_bind_IP(filepaths.public_API_host_file)
self.torAdder = clientAPI._core.hsAddress self.torAdder = gettransports.get_transports[0]
self.i2pAdder = clientAPI._core.i2pAddress
self.bindPort = config.get('client.public.port') self.bindPort = config.get('client.public.port')
self.lastRequest = 0 self.lastRequest = 0
self.hitCount = 0 # total rec requests to public api since server started self.hitCount = 0 # total rec requests to public api since server started
@ -45,10 +45,6 @@ class PublicAPI:
# Set instances, then startup our public api server # Set instances, then startup our public api server
clientAPI.setPublicAPIInstance(self) clientAPI.setPublicAPIInstance(self)
while self.torAdder == '':
clientAPI._core.refreshFirstStartVars()
self.torAdder = clientAPI._core.hsAddress
time.sleep(0.1)
app.register_blueprint(security.public.PublicAPISecurity(self).public_api_security_bp) app.register_blueprint(security.public.PublicAPISecurity(self).public_api_security_bp)
app.register_blueprint(miscpublicapi.endpoints.PublicEndpoints(self).public_endpoints_bp) app.register_blueprint(miscpublicapi.endpoints.PublicEndpoints(self).public_endpoints_bp)

View File

@ -19,7 +19,7 @@
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
import sys, os, time import sys, os, time
import core, config, logger, onionr import config, logger, onionr
import onionrexceptions, onionrpeers, onionrevents as events, onionrplugins as plugins, onionrblockapi as block import onionrexceptions, onionrpeers, onionrevents as events, onionrplugins as plugins, onionrblockapi as block
from . import onlinepeers from . import onlinepeers
from communicatorutils import servicecreator, onionrcommunicatortimers from communicatorutils import servicecreator, onionrcommunicatortimers
@ -29,8 +29,8 @@ from communicatorutils import daemonqueuehandler, announcenode, deniableinserts
from communicatorutils import cooldownpeer, housekeeping, netcheck from communicatorutils import cooldownpeer, housekeeping, netcheck
from onionrutils import localcommand, epoch from onionrutils import localcommand, epoch
from etc import humanreadabletime from etc import humanreadabletime
import onionrservices, onionr, onionrproofs import onionrservices, onionr, filepaths
from coredb import daemonqueue from coredb import daemonqueue, dbfiles
OnionrCommunicatorTimers = onionrcommunicatortimers.OnionrCommunicatorTimers OnionrCommunicatorTimers = onionrcommunicatortimers.OnionrCommunicatorTimers
config.reload() config.reload()
@ -48,7 +48,6 @@ class OnionrCommunicatorDaemon:
# initialize core with Tor socks port being 3rd argument # initialize core with Tor socks port being 3rd argument
self.proxyPort = proxyPort self.proxyPort = proxyPort
self._core = onionrInst.onionrCore
self.blocksToUpload = [] self.blocksToUpload = []
@ -84,7 +83,7 @@ class OnionrCommunicatorDaemon:
self.dbTimestamps = {} self.dbTimestamps = {}
# Clear the daemon queue for any dead messages # Clear the daemon queue for any dead messages
if os.path.exists(self._core.queueDB): if os.path.exists(dbfiles.daemon_queue_db):
daemonqueue.clear_daemon_queue() daemonqueue.clear_daemon_queue()
# Loads in and starts the enabled plugins # Loads in and starts the enabled plugins
@ -102,8 +101,8 @@ class OnionrCommunicatorDaemon:
OnionrCommunicatorTimers(self, self.runCheck, 2, maxThreads=1) OnionrCommunicatorTimers(self, self.runCheck, 2, maxThreads=1)
# Timers to periodically lookup new blocks and download them # Timers to periodically lookup new blocks and download them
OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks', 25), requiresPeer=True, maxThreads=1) OnionrCommunicatorTimers(self, self.lookupBlocks, config.get('timers.lookupBlocks', 25), requiresPeer=True, maxThreads=1)
OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks', 30), requiresPeer=True, maxThreads=2) OnionrCommunicatorTimers(self, self.getBlocks, config.get('timers.getBlocks', 30), requiresPeer=True, maxThreads=2)
# Timer to reset the longest offline peer so contact can be attempted again # Timer to reset the longest offline peer so contact can be attempted again
OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, myArgs=[self]) OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, myArgs=[self])
@ -125,7 +124,7 @@ class OnionrCommunicatorDaemon:
# Setup direct connections # Setup direct connections
if config.get('general.socket_servers', False): if config.get('general.socket_servers', False):
self.services = onionrservices.OnionrServices(self._core) self.services = onionrservices.OnionrServices()
self.active_services = [] self.active_services = []
self.service_greenlets = [] self.service_greenlets = []
OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, maxThreads=50, myArgs=[self]) OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, maxThreads=50, myArgs=[self])
@ -182,7 +181,7 @@ class OnionrCommunicatorDaemon:
else: else:
for server in self.service_greenlets: for server in self.service_greenlets:
server.stop() server.stop()
localcommand.local_command(self._core, 'shutdown') # shutdown the api localcommand.local_command('shutdown') # shutdown the api
time.sleep(0.5) time.sleep(0.5)
def lookupAdders(self): def lookupAdders(self):
@ -211,7 +210,7 @@ class OnionrCommunicatorDaemon:
def peerCleanup(self): def peerCleanup(self):
'''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)''' '''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)'''
onionrpeers.peer_cleanup(self._core) onionrpeers.peer_cleanup()
self.decrementThreadCount('peerCleanup') self.decrementThreadCount('peerCleanup')
def getPeerProfileInstance(self, peer): def getPeerProfileInstance(self, peer):
@ -223,7 +222,7 @@ class OnionrCommunicatorDaemon:
break break
else: else:
# if the peer's profile is not loaded, return a new one. connectNewPeer adds it the list on connect # if the peer's profile is not loaded, return a new one. connectNewPeer adds it the list on connect
retData = onionrpeers.PeerProfiles(peer, self._core) retData = onionrpeers.PeerProfiles(peer)
return retData return retData
def getUptime(self): def getUptime(self):
@ -249,7 +248,7 @@ def startCommunicator(onionrInst, proxyPort):
OnionrCommunicatorDaemon(onionrInst, proxyPort) OnionrCommunicatorDaemon(onionrInst, proxyPort)
def run_file_exists(daemon): def run_file_exists(daemon):
if os.path.isfile(daemon._core.dataDir + '.runcheck'): if os.path.isfile(filepaths.run_check_file):
os.remove(daemon._core.dataDir + '.runcheck') os.remove(filepaths.run_check_file)
return True return True
return False return False

View File

@ -17,11 +17,14 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
from utils import readstatic, gettransports
from coredb import keydb
bootstrap_peers = readstatic.read_static('bootstrap-nodes.txt').split(',')
def add_bootstrap_list_to_peer_list(comm_inst, peerList): def add_bootstrap_list_to_peer_list(comm_inst, peerList):
''' '''
Add the bootstrap list to the peer list (no duplicates) Add the bootstrap list to the peer list (no duplicates)
''' '''
for i in comm_inst._core.bootstrapList: for i in bootstrap_peers:
if i not in peerList and i not in comm_inst.offlinePeers and i != comm_inst._core.hsAddress and len(str(i).strip()) > 0: if i not in peerList and i not in comm_inst.offlinePeers and i != gettransports.get_transports()[0] and len(str(i).strip()) > 0:
peerList.append(i) peerList.append(i)
comm_inst._core.addAddress(i) keydb.addkeys.add_address(i)

View File

@ -24,7 +24,7 @@ def get_online_peers(comm_inst):
''' '''
Manages the comm_inst.onlinePeers attribute list, connects to more peers if we have none connected Manages the comm_inst.onlinePeers attribute list, connects to more peers if we have none connected
''' '''
config = comm_inst._core.config config = comm_inst.config
logger.debug('Refreshing peer pool...') logger.debug('Refreshing peer pool...')
maxPeers = int(config.get('peers.max_connect', 10)) maxPeers = int(config.get('peers.max_connect', 10))
needed = maxPeers - len(comm_inst.onlinePeers) needed = maxPeers - len(comm_inst.onlinePeers)

View File

@ -17,6 +17,7 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
import secrets
def pick_online_peer(comm_inst): def pick_online_peer(comm_inst):
'''randomly picks peer from pool without bias (using secrets module)''' '''randomly picks peer from pool without bias (using secrets module)'''
retData = '' retData = ''
@ -26,7 +27,7 @@ def pick_online_peer(comm_inst):
break break
try: try:
# get a random online peer, securely. May get stuck in loop if network is lost or if all peers in pool magically disconnect at once # get a random online peer, securely. May get stuck in loop if network is lost or if all peers in pool magically disconnect at once
retData = comm_inst.onlinePeers[comm_inst._core._crypto.secrets.randbelow(peerLength)] retData = comm_inst.onlinePeers[secrets.randbelow(peerLength)]
except IndexError: except IndexError:
pass pass
else: else:

View File

@ -21,6 +21,7 @@ import streamedrequests
import logger import logger
from onionrutils import epoch, basicrequests from onionrutils import epoch, basicrequests
from . import onlinepeers from . import onlinepeers
from coredb import keydb
def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_size=5242880): def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_size=5242880):
'''Perform a get request to a peer''' '''Perform a get request to a peer'''
penalty_score = -10 penalty_score = -10
@ -30,9 +31,9 @@ def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_
if len(data) > 0: if len(data) > 0:
url += '&data=' + data url += '&data=' + data
comm_inst._core.setAddressInfo(peer, 'lastConnectAttempt', epoch.get_epoch()) # mark the time we're trying to request this peer keydb.transportinfo.set_address_info(peer, 'lastConnectAttempt', epoch.get_epoch()) # mark the time we're trying to request this peer
try: try:
retData = basicrequests.do_get_request(comm_inst._core, url, port=comm_inst.proxyPort, max_size=max_resp_size) retData = basicrequests.do_get_request(url, port=comm_inst.proxyPort, max_size=max_resp_size)
except streamedrequests.exceptions.ResponseLimitReached: except streamedrequests.exceptions.ResponseLimitReached:
logger.warn('Request failed due to max response size being overflowed', terminal=True) logger.warn('Request failed due to max response size being overflowed', terminal=True)
retData = False retData = False
@ -48,6 +49,6 @@ def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_
except ValueError: except ValueError:
pass pass
else: else:
comm_inst._core.setAddressInfo(peer, 'lastConnect', epoch.get_epoch()) keydb.transportinfo.set_address_info(peer, 'lastConnect', epoch.get_epoch())
comm_inst.getPeerProfileInstance(peer).addScore(1) comm_inst.getPeerProfileInstance(peer).addScore(1)
return retData # If returnHeaders, returns tuple of data, headers. if not, just data string return retData # If returnHeaders, returns tuple of data, headers. if not, just data string

View File

@ -22,7 +22,7 @@ import onionrproofs, logger
from etc import onionrvalues from etc import onionrvalues
from onionrutils import basicrequests, bytesconverter from onionrutils import basicrequests, bytesconverter
from communicator import onlinepeers from communicator import onlinepeers
from coredb import keydb
def announce_node(daemon): def announce_node(daemon):
'''Announce our node to our peers''' '''Announce our node to our peers'''
ov = onionrvalues.OnionrValues() ov = onionrvalues.OnionrValues()
@ -33,7 +33,7 @@ def announce_node(daemon):
if len(daemon.announceCache) >= 10000: if len(daemon.announceCache) >= 10000:
daemon.announceCache.popitem() daemon.announceCache.popitem()
if daemon._core.config.get('general.security_level', 0) == 0: if daemon.config.get('general.security_level', 0) == 0:
# Announce to random online peers # Announce to random online peers
for i in daemon.onlinePeers: for i in daemon.onlinePeers:
if not i in daemon.announceCache and not i in daemon.announceProgress: if not i in daemon.announceCache and not i in daemon.announceProgress:
@ -43,18 +43,14 @@ def announce_node(daemon):
peer = onlinepeers.pick_online_peer(daemon) peer = onlinepeers.pick_online_peer(daemon)
for x in range(1): for x in range(1):
if x == 1 and daemon._core.config.get('i2p.host'): ourID = daemon.hsAddress
ourID = daemon._core.config.get('i2p.own_addr').strip()
else:
ourID = daemon._core.hsAddress.strip()
url = 'http://' + peer + '/announce' url = 'http://' + peer + '/announce'
data = {'node': ourID} data = {'node': ourID}
combinedNodes = ourID + peer combinedNodes = ourID + peer
if ourID != 1: if ourID != 1:
#TODO: Extend existingRand for i2p existingRand = bytesconverter.bytes_to_str(keydb.addressinfo.get_address_info(peer, 'powValue'))
existingRand = bytesconverter.bytes_to_str(daemon._core.getAddressInfo(peer, 'powValue'))
# Reset existingRand if it no longer meets the minimum POW # Reset existingRand if it no longer meets the minimum POW
if type(existingRand) is type(None) or not existingRand.endswith('0' * ov.announce_pow): if type(existingRand) is type(None) or not existingRand.endswith('0' * ov.announce_pow):
existingRand = '' existingRand = ''
@ -77,10 +73,10 @@ def announce_node(daemon):
daemon.announceCache[peer] = data['random'] daemon.announceCache[peer] = data['random']
if not announceFail: if not announceFail:
logger.info('Announcing node to ' + url) logger.info('Announcing node to ' + url)
if basicrequests.do_post_request(daemon._core, url, data) == 'Success': if basicrequests.do_post_request(url, data) == 'Success':
logger.info('Successfully introduced node to ' + peer, terminal=True) logger.info('Successfully introduced node to ' + peer, terminal=True)
retData = True retData = True
daemon._core.setAddressInfo(peer, 'introduced', 1) keydb.addressinfo.set_address_info(peer, 'introduced', 1)
daemon._core.setAddressInfo(peer, 'powValue', data['random']) keydb.addressinfo.set_address_info(peer, 'powValue', data['random'])
daemon.decrementThreadCount('announce_node') daemon.decrementThreadCount('announce_node')
return retData return retData

View File

@ -22,9 +22,9 @@ import onionrexceptions, logger, onionrpeers
from utils import networkmerger from utils import networkmerger
from onionrutils import stringvalidators, epoch from onionrutils import stringvalidators, epoch
from communicator import peeraction, bootstrappeers from communicator import peeraction, bootstrappeers
from coredb import keydb
def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False): def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
config = comm_inst._core.config config = comm_inst.config
retData = False retData = False
tried = comm_inst.offlinePeers tried = comm_inst.offlinePeers
if peer != '': if peer != '':
@ -33,10 +33,10 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
else: else:
raise onionrexceptions.InvalidAddress('Will not attempt connection test to invalid address') raise onionrexceptions.InvalidAddress('Will not attempt connection test to invalid address')
else: else:
peerList = comm_inst._core.listAdders() peerList = keydb.listkeys.list_adders()
mainPeerList = comm_inst._core.listAdders() mainPeerList = keydb.listkeys.list_adders()
peerList = onionrpeers.get_score_sorted_peer_list(comm_inst._core) peerList = onionrpeers.get_score_sorted_peer_list()
# If we don't have enough peers connected or random chance, select new peers to try # If we don't have enough peers connected or random chance, select new peers to try
if len(peerList) < 8 or secrets.randbelow(4) == 3: if len(peerList) < 8 or secrets.randbelow(4) == 3:
@ -56,7 +56,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
if not config.get('tor.v3onions') and len(address) == 62: if not config.get('tor.v3onions') and len(address) == 62:
continue continue
# Don't connect to our own address # Don't connect to our own address
if address == comm_inst._core.hsAddress: if address == comm_inst.hsAddress:
continue continue
# Don't connect to invalid address or if its already been tried/connected, or if its cooled down # Don't connect to invalid address or if its already been tried/connected, or if its cooled down
if len(address) == 0 or address in tried or address in comm_inst.onlinePeers or address in comm_inst.cooldownPeer: if len(address) == 0 or address in tried or address in comm_inst.onlinePeers or address in comm_inst.cooldownPeer:
@ -68,7 +68,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
time.sleep(0.1) time.sleep(0.1)
if address not in mainPeerList: if address not in mainPeerList:
# Add a peer to our list if it isn't already since it successfully connected # Add a peer to our list if it isn't already since it successfully connected
networkmerger.mergeAdders(address, comm_inst._core) networkmerger.mergeAdders(address)
if address not in comm_inst.onlinePeers: if address not in comm_inst.onlinePeers:
logger.info('Connected to ' + address, terminal=True) logger.info('Connected to ' + address, terminal=True)
comm_inst.onlinePeers.append(address) comm_inst.onlinePeers.append(address)
@ -80,7 +80,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
if profile.address == address: if profile.address == address:
break break
else: else:
comm_inst.peerProfiles.append(onionrpeers.PeerProfiles(address, comm_inst._core)) comm_inst.peerProfiles.append(onionrpeers.PeerProfiles(address))
break break
else: else:
# Mark a peer as tried if they failed to respond to ping # Mark a peer as tried if they failed to respond to ping

View File

@ -21,6 +21,7 @@ from onionrutils import epoch
from communicator import onlinepeers from communicator import onlinepeers
def cooldown_peer(comm_inst): def cooldown_peer(comm_inst):
'''Randomly add an online peer to cooldown, so we can connect a new one''' '''Randomly add an online peer to cooldown, so we can connect a new one'''
config = comm_inst.config
onlinePeerAmount = len(comm_inst.onlinePeers) onlinePeerAmount = len(comm_inst.onlinePeers)
minTime = 300 minTime = 300
cooldownTime = 600 cooldownTime = 600
@ -34,7 +35,7 @@ def cooldown_peer(comm_inst):
del comm_inst.cooldownPeer[peer] del comm_inst.cooldownPeer[peer]
# Cool down a peer, if we have max connections alive for long enough # Cool down a peer, if we have max connections alive for long enough
if onlinePeerAmount >= comm_inst._core.config.get('peers.max_connect', 10, save = True): if onlinePeerAmount >= config.get('peers.max_connect', 10, save = True):
finding = True finding = True
while finding: while finding:

View File

@ -21,11 +21,12 @@ import logger
import onionrevents as events import onionrevents as events
from onionrutils import localcommand from onionrutils import localcommand
from coredb import daemonqueue from coredb import daemonqueue
import filepaths
def handle_daemon_commands(comm_inst): def handle_daemon_commands(comm_inst):
cmd = daemonqueue.daemon_queue() cmd = daemonqueue.daemon_queue()
response = '' response = ''
if cmd is not False: if cmd is not False:
events.event('daemon_command', onionr = comm_inst._core.onionrInst, data = {'cmd' : cmd}) events.event('daemon_command', onionr = comm_inst.onionrInst, data = {'cmd' : cmd})
if cmd[0] == 'shutdown': if cmd[0] == 'shutdown':
comm_inst.shutdown = True comm_inst.shutdown = True
elif cmd[0] == 'announceNode': elif cmd[0] == 'announceNode':
@ -35,13 +36,13 @@ def handle_daemon_commands(comm_inst):
logger.debug("No nodes connected. Will not introduce node.") logger.debug("No nodes connected. Will not introduce node.")
elif cmd[0] == 'runCheck': # deprecated elif cmd[0] == 'runCheck': # deprecated
logger.debug('Status check; looks good.') logger.debug('Status check; looks good.')
open(comm_inst._core.dataDir + '.runcheck', 'w+').close() open(filepaths.run_check_file + '.runcheck', 'w+').close()
elif cmd[0] == 'connectedPeers': elif cmd[0] == 'connectedPeers':
response = '\n'.join(list(comm_inst.onlinePeers)).strip() response = '\n'.join(list(comm_inst.onlinePeers)).strip()
if response == '': if response == '':
response = 'none' response = 'none'
elif cmd[0] == 'localCommand': elif cmd[0] == 'localCommand':
response = localcommand.local_command(comm_inst._core, cmd[1]) response = localcommand.local_command(cmd[1])
elif cmd[0] == 'pex': elif cmd[0] == 'pex':
for i in comm_inst.timers: for i in comm_inst.timers:
if i.timerFunction.__name__ == 'lookupAdders': if i.timerFunction.__name__ == 'lookupAdders':
@ -51,7 +52,7 @@ def handle_daemon_commands(comm_inst):
if cmd[0] not in ('', None): if cmd[0] not in ('', None):
if response != '': if response != '':
localcommand.local_command(comm_inst._core, 'queueResponseAdd/' + cmd[4], post=True, postData={'data': response}) localcommand.local_command('queueResponseAdd/' + cmd[4], post=True, postData={'data': response})
response = '' response = ''
comm_inst.decrementThreadCount('handle_daemon_commands') comm_inst.decrementThreadCount('handle_daemon_commands')

View File

@ -22,9 +22,13 @@ import logger, onionrpeers
from onionrutils import blockmetadata, stringvalidators, validatemetadata from onionrutils import blockmetadata, stringvalidators, validatemetadata
from . import shoulddownload from . import shoulddownload
from communicator import peeraction, onlinepeers from communicator import peeraction, onlinepeers
import onionrcrypto, onionrstorage, onionrblacklist, storagecounter
def download_blocks_from_communicator(comm_inst): def download_blocks_from_communicator(comm_inst):
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon) assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
crypto = onionrcrypto.OnionrCrypto()
blacklist = onionrblacklist.OnionrBlackList()
storage_counter = storagecounter.StorageCounter()
for blockHash in list(comm_inst.blockQueue): for blockHash in list(comm_inst.blockQueue):
if len(comm_inst.onlinePeers) == 0: if len(comm_inst.onlinePeers) == 0:
break break
@ -38,7 +42,7 @@ def download_blocks_from_communicator(comm_inst):
if not shoulddownload.should_download(comm_inst, blockHash): if not shoulddownload.should_download(comm_inst, blockHash):
continue continue
if comm_inst.shutdown or not comm_inst.isOnline or comm_inst._core.storage_counter.isFull(): if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.isFull():
# Exit loop if shutting down or offline, or disk allocation reached # Exit loop if shutting down or offline, or disk allocation reached
break break
# Do not download blocks being downloaded # Do not download blocks being downloaded
@ -50,7 +54,7 @@ def download_blocks_from_communicator(comm_inst):
if len(blockPeers) == 0: if len(blockPeers) == 0:
peerUsed = onlinepeers.pick_online_peer(comm_inst) peerUsed = onlinepeers.pick_online_peer(comm_inst)
else: else:
blockPeers = comm_inst._core._crypto.randomShuffle(blockPeers) blockPeers = crypto.randomShuffle(blockPeers)
peerUsed = blockPeers.pop(0) peerUsed = blockPeers.pop(0)
if not comm_inst.shutdown and peerUsed.strip() != '': if not comm_inst.shutdown and peerUsed.strip() != '':
@ -62,7 +66,7 @@ def download_blocks_from_communicator(comm_inst):
except AttributeError: except AttributeError:
pass pass
realHash = comm_inst._core._crypto.sha3Hash(content) realHash = ccrypto.sha3Hash(content)
try: try:
realHash = realHash.decode() # bytes on some versions for some reason realHash = realHash.decode() # bytes on some versions for some reason
except AttributeError: except AttributeError:
@ -71,11 +75,11 @@ def download_blocks_from_communicator(comm_inst):
content = content.decode() # decode here because sha3Hash needs bytes above content = content.decode() # decode here because sha3Hash needs bytes above
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0] metadata = metas[0]
if validatemetadata.validate_metadata(comm_inst._core, metadata, metas[2]): # check if metadata is valid, and verify nonce if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
if comm_inst._core._crypto.verifyPow(content): # check if POW is enough/correct if crypto.verifyPow(content): # check if POW is enough/correct
logger.info('Attempting to save block %s...' % blockHash[:12]) logger.info('Attempting to save block %s...' % blockHash[:12])
try: try:
comm_inst._core.setData(content) onionrstorage.setdata.set_data(content)
except onionrexceptions.DataExists: except onionrexceptions.DataExists:
logger.warn('Data is already set for %s ' % (blockHash,)) logger.warn('Data is already set for %s ' % (blockHash,))
except onionrexceptions.DiskAllocationReached: except onionrexceptions.DiskAllocationReached:
@ -83,24 +87,24 @@ def download_blocks_from_communicator(comm_inst):
removeFromQueue = False removeFromQueue = False
else: else:
blockmetadb.add_to_block_DB(blockHash, dataSaved=True) # add block to meta db blockmetadb.add_to_block_DB(blockHash, dataSaved=True) # add block to meta db
blockmetadata.process_block_metadata(comm_inst._core, blockHash) # caches block metadata values to block database blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database
else: else:
logger.warn('POW failed for block %s.' % (blockHash,)) logger.warn('POW failed for block %s.' % (blockHash,))
else: else:
if comm_inst._core._blacklist.inBlacklist(realHash): if blacklist.inBlacklist(realHash):
logger.warn('Block %s is blacklisted.' % (realHash,)) logger.warn('Block %s is blacklisted.' % (realHash,))
else: else:
logger.warn('Metadata for block %s is invalid.' % (blockHash,)) logger.warn('Metadata for block %s is invalid.' % (blockHash,))
comm_inst._core._blacklist.addToDB(blockHash) blacklist.addToDB(blockHash)
else: else:
# if block didn't meet expected hash # if block didn't meet expected hash
tempHash = comm_inst._core._crypto.sha3Hash(content) # lazy hack, TODO use var tempHash = crypto.sha3Hash(content) # lazy hack, TODO use var
try: try:
tempHash = tempHash.decode() tempHash = tempHash.decode()
except AttributeError: except AttributeError:
pass pass
# Punish peer for sharing invalid block (not always malicious, but is bad regardless) # Punish peer for sharing invalid block (not always malicious, but is bad regardless)
onionrpeers.PeerProfiles(peerUsed, comm_inst._core).addScore(-50) onionrpeers.PeerProfiles(peerUsed).addScore(-50)
if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253': if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
# Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error. # Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash) logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)

View File

@ -23,7 +23,7 @@ def should_download(comm_inst, block_hash):
if block_hash in blockmetadb.get_block_list(): # Dont download block we have if block_hash in blockmetadb.get_block_list(): # Dont download block we have
ret_data = False ret_data = False
else: else:
if comm_inst._core._blacklist.inBlacklist(block_hash): # Dont download blacklisted block if comm_inst.blacklist.inBlacklist(block_hash): # Dont download blacklisted block
ret_data = False ret_data = False
if ret_data is False: if ret_data is False:
# Remove block from communicator queue if it shouldnt be downloaded # Remove block from communicator queue if it shouldnt be downloaded

View File

@ -41,17 +41,11 @@ class Core:
# set data dir # set data dir
self.dataDir = identifyhome.identify_home() self.dataDir = identifyhome.identify_home()
try:
self.usageFile = self.dataDir + 'disk-usage.txt' self.usageFile = self.dataDir + 'disk-usage.txt'
self.config = config self.config = config
self.maxBlockSize = 10000000 # max block size in bytes self.maxBlockSize = 10000000 # max block size in bytes
self.onionrInst = None self.onionrInst = None
self.blockDataLocation = self.dataDir + 'blocks/'
self.blockDataDB = self.blockDataLocation + 'block-data.db'
self.publicApiHostFile = self.dataDir + 'public-host.txt'
self.privateApiHostFile = self.dataDir + 'private-host.txt'
self.addressDB = self.dataDir + 'address.db'
self.hsAddress = '' self.hsAddress = ''
self.i2pAddress = config.get('i2p.own_addr', None) self.i2pAddress = config.get('i2p.own_addr', None)
self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt' self.bootstrapFileLocation = 'static-data/bootstrap-nodes.txt'
@ -70,19 +64,6 @@ class Core:
self.socketReasons = {} self.socketReasons = {}
self.socketServerResponseData = {} self.socketServerResponseData = {}
if not os.path.exists(self.dataDir):
os.mkdir(self.dataDir)
if not os.path.exists(self.dataDir + 'blocks/'):
os.mkdir(self.dataDir + 'blocks/')
if not os.path.exists(self.blockDB):
self.createBlockDB()
if not os.path.exists(self.forwardKeysFile):
dbcreator.createForwardKeyDB()
if not os.path.exists(self.peerDB):
self.createPeerDB()
if not os.path.exists(self.addressDB):
self.createAddressDB()
if os.path.exists(self.dataDir + '/hs/hostname'): if os.path.exists(self.dataDir + '/hs/hostname'):
with open(self.dataDir + '/hs/hostname', 'r') as hs: with open(self.dataDir + '/hs/hostname', 'r') as hs:
self.hsAddress = hs.read().strip() self.hsAddress = hs.read().strip()
@ -102,20 +83,6 @@ class Core:
self._blacklist = onionrblacklist.OnionrBlackList(self) self._blacklist = onionrblacklist.OnionrBlackList(self)
self.serializer = serializeddata.SerializedData(self) self.serializer = serializeddata.SerializedData(self)
except Exception as error:
logger.error('Failed to initialize core Onionr library.', error=error, terminal=True)
logger.fatal('Cannot recover from error.', terminal=True)
sys.exit(1)
return
def refreshFirstStartVars(self):
'''
Hack to refresh some vars which may not be set on first start
'''
if os.path.exists(self.dataDir + '/hs/hostname'):
with open(self.dataDir + '/hs/hostname', 'r') as hs:
self.hsAddress = hs.read().strip()
def addPeer(self, peerID, name=''): def addPeer(self, peerID, name=''):
''' '''
Adds a public key to the key database (misleading function name) Adds a public key to the key database (misleading function name)

View File

@ -29,6 +29,8 @@ def createAddressDB():
2: Tor v2 (like facebookcorewwwi.onion) 2: Tor v2 (like facebookcorewwwi.onion)
3: Tor v3 3: Tor v3
''' '''
if os.path.exists(dbfiles.address_info_db):
raise FileExistsError("Address database already exists")
conn = sqlite3.connect(dbfiles.address_info_db) conn = sqlite3.connect(dbfiles.address_info_db)
c = conn.cursor() c = conn.cursor()
c.execute('''CREATE TABLE adders( c.execute('''CREATE TABLE adders(
@ -52,6 +54,8 @@ def createPeerDB():
''' '''
Generate the peer sqlite3 database and populate it with the peers table. Generate the peer sqlite3 database and populate it with the peers table.
''' '''
if os.path.exists(dbfiles.user_id_info_db):
raise FileExistsError("User database already exists")
# generate the peer database # generate the peer database
conn = sqlite3.connect(dbfiles.user_id_info_db) conn = sqlite3.connect(dbfiles.user_id_info_db)
c = conn.cursor() c = conn.cursor()
@ -146,9 +150,14 @@ def createDaemonDB():
''' '''
Create the daemon queue database Create the daemon queue database
''' '''
if os.path.exists(dbfiles.daemon_queue_db):
raise FileExistsError("Daemon queue db already exists")
conn = sqlite3.connect(dbfiles.daemon_queue_db, timeout=10) conn = sqlite3.connect(dbfiles.daemon_queue_db, timeout=10)
c = conn.cursor() c = conn.cursor()
# Create table # Create table
c.execute('''CREATE TABLE commands (id integer primary key autoincrement, command text, data text, date text, responseID text)''') c.execute('''CREATE TABLE commands (id integer primary key autoincrement, command text, data text, date text, responseID text)''')
conn.commit() conn.commit()
conn.close() conn.close()
create_funcs = [createAddressDB, createPeerDB, createBlockDB, createBlockDataDB, createForwardKeyDB, createDaemonDB]

View File

@ -1,6 +1,6 @@
from utils import identifyhome from utils import identifyhome
home = identifyhome.identify_home() home = identifyhome.identify_home()
if not home.endswith('/') home += '/' if not home.endswith('/'): home += '/'
usage_file = home + 'disk-usage.txt' usage_file = home + 'disk-usage.txt'
block_data_location = home + 'blocks/' block_data_location = home + 'blocks/'
@ -9,3 +9,7 @@ private_API_host_file = home + 'private-host.txt'
bootstrap_file_location = 'static-data/bootstrap-nodes.txt' bootstrap_file_location = 'static-data/bootstrap-nodes.txt'
data_nonce_file = home + 'block-nonces.dat' data_nonce_file = home + 'block-nonces.dat'
forward_keys_file = home + 'forward-keys.db' forward_keys_file = home + 'forward-keys.db'
tor_hs_address_file = home + 'hs/hostname'
run_check_file = home + '.runcheck'

View File

@ -1,9 +1,7 @@
import random, socket import random, socket
import config, logger import config, logger
def set_bind_IP(filePath='', core_inst=None): def set_bind_IP(filePath=''):
'''Set a random localhost IP to a specified file (intended for private or public API localhost IPs)''' '''Set a random localhost IP to a specified file (intended for private or public API localhost IPs)'''
if not core_inst is None:
config = core_inst.config
if config.get('general.random_bind_ip', True): if config.get('general.random_bind_ip', True):
hostOctets = [str(127), str(random.randint(0x02, 0xFF)), str(random.randint(0x02, 0xFF)), str(random.randint(0x02, 0xFF))] hostOctets = [str(127), str(random.randint(0x02, 0xFF)), str(random.randint(0x02, 0xFF)), str(random.randint(0x02, 0xFF))]

View File

@ -57,7 +57,7 @@ def error(data, error = None, timestamp = True, prompt = True, terminal = False,
def fatal(data, error = None, timestamp=True, prompt = True, terminal = False, level = settings.LEVEL_FATAL): def fatal(data, error = None, timestamp=True, prompt = True, terminal = False, level = settings.LEVEL_FATAL):
if not error is None: if not error is None:
debug('Error: ' + str(error) + parse_error(), terminal = terminal) debug('Error: ' + str(error) + parse_error(), terminal = terminal)
if get_level() <= level: if settings.get_level() <= level:
log('#', data, colors.bg.red + colors.fg.green + colors.bold, timestamp = timestamp, fd = sys.stderr, prompt = prompt, terminal = terminal) log('#', data, colors.bg.red + colors.fg.green + colors.bold, timestamp = timestamp, fd = sys.stderr, prompt = prompt, terminal = terminal)
# returns a formatted error message # returns a formatted error message

View File

@ -18,13 +18,20 @@
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
import filepaths import filepaths
import config
def get_client_API_server(): def get_client_API_server():
config.reload()
retData = '' retData = ''
getconf = lambda: config.get('client.client.port')
port = getconf()
if port is None:
config.reload()
port = getconf()
try: try:
with open(filepaths.private_API_host_file, 'r') as host: with open(filepaths.private_API_host_file, 'r') as host:
hostname = host.read() hostname = host.read()
except FileNotFoundError: except FileNotFoundError:
raise FileNotFoundError raise FileNotFoundError
else: else:
retData += '%s:%s' % (hostname, core_inst.config.get('client.client.port')) retData += '%s:%s' % (hostname, port)
return retData return retData

View File

@ -18,38 +18,38 @@
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
import urllib, requests, time import urllib, requests, time
import logger import logger, config
from . import getclientapiserver from . import getclientapiserver
hostname = '' hostname = ''
waited = 0 waited = 0
maxWait = 3 maxWait = 3
config.reload()
def get_hostname(): def get_hostname():
while hostname == '': while hostname == '':
try: try:
hostname = getclientapiserver.get_client_API_server(core_inst) hostname = getclientapiserver.get_client_API_server()
except FileNotFoundError: except FileNotFoundError:
time.sleep(1) time.sleep(1)
waited += 1 waited += 1
if waited == maxWait: if waited == maxWait:
return False return False
return hostname return hostname
hostname = get_hostname()
def local_command(core_inst, command, data='', silent = True, post=False, postData = {}, maxWait=20): def local_command(command, data='', silent = True, post=False, postData = {}, maxWait=20):
''' '''
Send a command to the local http API server, securely. Intended for local clients, DO NOT USE for remote peers. Send a command to the local http API server, securely. Intended for local clients, DO NOT USE for remote peers.
''' '''
# TODO: URL encode parameters, just as an extra measure. May not be needed, but should be added regardless. # TODO: URL encode parameters, just as an extra measure. May not be needed, but should be added regardless.
if hostname == False: if hostname == '':
hostname = get_hostname() hostname = get_hostname()
if data != '': if data != '':
data = '&data=' + urllib.parse.quote_plus(data) data = '&data=' + urllib.parse.quote_plus(data)
payload = 'http://%s/%s%s' % (hostname, command, data) payload = 'http://%s/%s%s' % (hostname, command, data)
try: try:
if post: if post:
retData = requests.post(payload, data=postData, headers={'token': core_inst.config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text retData = requests.post(payload, data=postData, headers={'token': config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text
else: else:
retData = requests.get(payload, headers={'token': core_inst.config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text retData = requests.get(payload, headers={'token': config.get('client.webpassword'), 'Connection':'close'}, timeout=(maxWait, maxWait)).text
except Exception as error: except Exception as error:
if not silent: if not silent:
logger.error('Failed to make local request (command: %s):%s' % (command, error), terminal=True) logger.error('Failed to make local request (command: %s):%s' % (command, error), terminal=True)

View File

@ -0,0 +1,17 @@
from onionrutils import localcommand
import config
config.reload()
running_detected = False # if we know the api server is running
first_get = True
def config_get(key):
ret_data = False
if running_detected or first_get:
first_get = False
ret_data = localcommand.local_command('/config/get/' + key)
if ret_data == False:
running_detected = False
ret_data = config.get(key)
else:
running_detected = False
return ret_data

View File

@ -0,0 +1,13 @@
from . import identifyhome
import dbcreator, filepaths
home = identifyhome.identify_home()
if not os.path.exists(home):
os.mkdir(home)
os.mkdir(filepaths.block_data_location)
for db in dbcreator.create_funcs:
try:
db()
except FileExistsError:
pass

View File

@ -0,0 +1,7 @@
import filepaths
files = [filepaths.tor_hs_address_file]
transports = []
for file in files:
with open(file, 'r') as transport_file:
transports.append(transport_file.read())

View File

@ -0,0 +1,10 @@
import os
def read_static(file, ret_bin=False):
static_file = os.path.realpath(__file__) + '../static-data/' + file
if ret_bin:
mode = 'rb'
else:
mode = 'r'
with open(static_file, mode) as f:
return f.read()