progress in removing core
This commit is contained in:
parent
e69c8dbb60
commit
1775b96a04
24 changed files with 187 additions and 155 deletions
|
@ -19,7 +19,7 @@
|
|||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
'''
|
||||
import sys, os, time
|
||||
import core, config, logger, onionr
|
||||
import config, logger, onionr
|
||||
import onionrexceptions, onionrpeers, onionrevents as events, onionrplugins as plugins, onionrblockapi as block
|
||||
from . import onlinepeers
|
||||
from communicatorutils import servicecreator, onionrcommunicatortimers
|
||||
|
@ -29,8 +29,8 @@ from communicatorutils import daemonqueuehandler, announcenode, deniableinserts
|
|||
from communicatorutils import cooldownpeer, housekeeping, netcheck
|
||||
from onionrutils import localcommand, epoch
|
||||
from etc import humanreadabletime
|
||||
import onionrservices, onionr, onionrproofs
|
||||
from coredb import daemonqueue
|
||||
import onionrservices, onionr, filepaths
|
||||
from coredb import daemonqueue, dbfiles
|
||||
OnionrCommunicatorTimers = onionrcommunicatortimers.OnionrCommunicatorTimers
|
||||
|
||||
config.reload()
|
||||
|
@ -48,7 +48,6 @@ class OnionrCommunicatorDaemon:
|
|||
|
||||
# initialize core with Tor socks port being 3rd argument
|
||||
self.proxyPort = proxyPort
|
||||
self._core = onionrInst.onionrCore
|
||||
|
||||
self.blocksToUpload = []
|
||||
|
||||
|
@ -84,7 +83,7 @@ class OnionrCommunicatorDaemon:
|
|||
self.dbTimestamps = {}
|
||||
|
||||
# Clear the daemon queue for any dead messages
|
||||
if os.path.exists(self._core.queueDB):
|
||||
if os.path.exists(dbfiles.daemon_queue_db):
|
||||
daemonqueue.clear_daemon_queue()
|
||||
|
||||
# Loads in and starts the enabled plugins
|
||||
|
@ -102,8 +101,8 @@ class OnionrCommunicatorDaemon:
|
|||
OnionrCommunicatorTimers(self, self.runCheck, 2, maxThreads=1)
|
||||
|
||||
# Timers to periodically lookup new blocks and download them
|
||||
OnionrCommunicatorTimers(self, self.lookupBlocks, self._core.config.get('timers.lookupBlocks', 25), requiresPeer=True, maxThreads=1)
|
||||
OnionrCommunicatorTimers(self, self.getBlocks, self._core.config.get('timers.getBlocks', 30), requiresPeer=True, maxThreads=2)
|
||||
OnionrCommunicatorTimers(self, self.lookupBlocks, config.get('timers.lookupBlocks', 25), requiresPeer=True, maxThreads=1)
|
||||
OnionrCommunicatorTimers(self, self.getBlocks, config.get('timers.getBlocks', 30), requiresPeer=True, maxThreads=2)
|
||||
|
||||
# Timer to reset the longest offline peer so contact can be attempted again
|
||||
OnionrCommunicatorTimers(self, onlinepeers.clear_offline_peer, 58, myArgs=[self])
|
||||
|
@ -125,7 +124,7 @@ class OnionrCommunicatorDaemon:
|
|||
|
||||
# Setup direct connections
|
||||
if config.get('general.socket_servers', False):
|
||||
self.services = onionrservices.OnionrServices(self._core)
|
||||
self.services = onionrservices.OnionrServices()
|
||||
self.active_services = []
|
||||
self.service_greenlets = []
|
||||
OnionrCommunicatorTimers(self, servicecreator.service_creator, 5, maxThreads=50, myArgs=[self])
|
||||
|
@ -182,7 +181,7 @@ class OnionrCommunicatorDaemon:
|
|||
else:
|
||||
for server in self.service_greenlets:
|
||||
server.stop()
|
||||
localcommand.local_command(self._core, 'shutdown') # shutdown the api
|
||||
localcommand.local_command('shutdown') # shutdown the api
|
||||
time.sleep(0.5)
|
||||
|
||||
def lookupAdders(self):
|
||||
|
@ -211,7 +210,7 @@ class OnionrCommunicatorDaemon:
|
|||
|
||||
def peerCleanup(self):
|
||||
'''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)'''
|
||||
onionrpeers.peer_cleanup(self._core)
|
||||
onionrpeers.peer_cleanup()
|
||||
self.decrementThreadCount('peerCleanup')
|
||||
|
||||
def getPeerProfileInstance(self, peer):
|
||||
|
@ -223,7 +222,7 @@ class OnionrCommunicatorDaemon:
|
|||
break
|
||||
else:
|
||||
# if the peer's profile is not loaded, return a new one. connectNewPeer adds it the list on connect
|
||||
retData = onionrpeers.PeerProfiles(peer, self._core)
|
||||
retData = onionrpeers.PeerProfiles(peer)
|
||||
return retData
|
||||
|
||||
def getUptime(self):
|
||||
|
@ -249,7 +248,7 @@ def startCommunicator(onionrInst, proxyPort):
|
|||
OnionrCommunicatorDaemon(onionrInst, proxyPort)
|
||||
|
||||
def run_file_exists(daemon):
|
||||
if os.path.isfile(daemon._core.dataDir + '.runcheck'):
|
||||
os.remove(daemon._core.dataDir + '.runcheck')
|
||||
if os.path.isfile(filepaths.run_check_file):
|
||||
os.remove(filepaths.run_check_file)
|
||||
return True
|
||||
return False
|
|
@ -17,11 +17,14 @@
|
|||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
'''
|
||||
from utils import readstatic, gettransports
|
||||
from coredb import keydb
|
||||
bootstrap_peers = readstatic.read_static('bootstrap-nodes.txt').split(',')
|
||||
def add_bootstrap_list_to_peer_list(comm_inst, peerList):
|
||||
'''
|
||||
Add the bootstrap list to the peer list (no duplicates)
|
||||
'''
|
||||
for i in comm_inst._core.bootstrapList:
|
||||
if i not in peerList and i not in comm_inst.offlinePeers and i != comm_inst._core.hsAddress and len(str(i).strip()) > 0:
|
||||
for i in bootstrap_peers:
|
||||
if i not in peerList and i not in comm_inst.offlinePeers and i != gettransports.get_transports()[0] and len(str(i).strip()) > 0:
|
||||
peerList.append(i)
|
||||
comm_inst._core.addAddress(i)
|
||||
keydb.addkeys.add_address(i)
|
|
@ -24,7 +24,7 @@ def get_online_peers(comm_inst):
|
|||
'''
|
||||
Manages the comm_inst.onlinePeers attribute list, connects to more peers if we have none connected
|
||||
'''
|
||||
config = comm_inst._core.config
|
||||
config = comm_inst.config
|
||||
logger.debug('Refreshing peer pool...')
|
||||
maxPeers = int(config.get('peers.max_connect', 10))
|
||||
needed = maxPeers - len(comm_inst.onlinePeers)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
'''
|
||||
import secrets
|
||||
def pick_online_peer(comm_inst):
|
||||
'''randomly picks peer from pool without bias (using secrets module)'''
|
||||
retData = ''
|
||||
|
@ -26,7 +27,7 @@ def pick_online_peer(comm_inst):
|
|||
break
|
||||
try:
|
||||
# get a random online peer, securely. May get stuck in loop if network is lost or if all peers in pool magically disconnect at once
|
||||
retData = comm_inst.onlinePeers[comm_inst._core._crypto.secrets.randbelow(peerLength)]
|
||||
retData = comm_inst.onlinePeers[secrets.randbelow(peerLength)]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
|
|
|
@ -21,6 +21,7 @@ import streamedrequests
|
|||
import logger
|
||||
from onionrutils import epoch, basicrequests
|
||||
from . import onlinepeers
|
||||
from coredb import keydb
|
||||
def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_size=5242880):
|
||||
'''Perform a get request to a peer'''
|
||||
penalty_score = -10
|
||||
|
@ -30,9 +31,9 @@ def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_
|
|||
if len(data) > 0:
|
||||
url += '&data=' + data
|
||||
|
||||
comm_inst._core.setAddressInfo(peer, 'lastConnectAttempt', epoch.get_epoch()) # mark the time we're trying to request this peer
|
||||
keydb.transportinfo.set_address_info(peer, 'lastConnectAttempt', epoch.get_epoch()) # mark the time we're trying to request this peer
|
||||
try:
|
||||
retData = basicrequests.do_get_request(comm_inst._core, url, port=comm_inst.proxyPort, max_size=max_resp_size)
|
||||
retData = basicrequests.do_get_request(url, port=comm_inst.proxyPort, max_size=max_resp_size)
|
||||
except streamedrequests.exceptions.ResponseLimitReached:
|
||||
logger.warn('Request failed due to max response size being overflowed', terminal=True)
|
||||
retData = False
|
||||
|
@ -48,6 +49,6 @@ def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_
|
|||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
comm_inst._core.setAddressInfo(peer, 'lastConnect', epoch.get_epoch())
|
||||
keydb.transportinfo.set_address_info(peer, 'lastConnect', epoch.get_epoch())
|
||||
comm_inst.getPeerProfileInstance(peer).addScore(1)
|
||||
return retData # If returnHeaders, returns tuple of data, headers. if not, just data string
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue