progress in removing core
This commit is contained in:
parent
e69c8dbb60
commit
1775b96a04
24 changed files with 187 additions and 155 deletions
|
@ -22,7 +22,7 @@ import onionrproofs, logger
|
|||
from etc import onionrvalues
|
||||
from onionrutils import basicrequests, bytesconverter
|
||||
from communicator import onlinepeers
|
||||
|
||||
from coredb import keydb
|
||||
def announce_node(daemon):
|
||||
'''Announce our node to our peers'''
|
||||
ov = onionrvalues.OnionrValues()
|
||||
|
@ -33,7 +33,7 @@ def announce_node(daemon):
|
|||
if len(daemon.announceCache) >= 10000:
|
||||
daemon.announceCache.popitem()
|
||||
|
||||
if daemon._core.config.get('general.security_level', 0) == 0:
|
||||
if daemon.config.get('general.security_level', 0) == 0:
|
||||
# Announce to random online peers
|
||||
for i in daemon.onlinePeers:
|
||||
if not i in daemon.announceCache and not i in daemon.announceProgress:
|
||||
|
@ -43,18 +43,14 @@ def announce_node(daemon):
|
|||
peer = onlinepeers.pick_online_peer(daemon)
|
||||
|
||||
for x in range(1):
|
||||
if x == 1 and daemon._core.config.get('i2p.host'):
|
||||
ourID = daemon._core.config.get('i2p.own_addr').strip()
|
||||
else:
|
||||
ourID = daemon._core.hsAddress.strip()
|
||||
ourID = daemon.hsAddress
|
||||
|
||||
url = 'http://' + peer + '/announce'
|
||||
data = {'node': ourID}
|
||||
|
||||
combinedNodes = ourID + peer
|
||||
if ourID != 1:
|
||||
#TODO: Extend existingRand for i2p
|
||||
existingRand = bytesconverter.bytes_to_str(daemon._core.getAddressInfo(peer, 'powValue'))
|
||||
existingRand = bytesconverter.bytes_to_str(keydb.addressinfo.get_address_info(peer, 'powValue'))
|
||||
# Reset existingRand if it no longer meets the minimum POW
|
||||
if type(existingRand) is type(None) or not existingRand.endswith('0' * ov.announce_pow):
|
||||
existingRand = ''
|
||||
|
@ -77,10 +73,10 @@ def announce_node(daemon):
|
|||
daemon.announceCache[peer] = data['random']
|
||||
if not announceFail:
|
||||
logger.info('Announcing node to ' + url)
|
||||
if basicrequests.do_post_request(daemon._core, url, data) == 'Success':
|
||||
if basicrequests.do_post_request(url, data) == 'Success':
|
||||
logger.info('Successfully introduced node to ' + peer, terminal=True)
|
||||
retData = True
|
||||
daemon._core.setAddressInfo(peer, 'introduced', 1)
|
||||
daemon._core.setAddressInfo(peer, 'powValue', data['random'])
|
||||
keydb.addressinfo.set_address_info(peer, 'introduced', 1)
|
||||
keydb.addressinfo.set_address_info(peer, 'powValue', data['random'])
|
||||
daemon.decrementThreadCount('announce_node')
|
||||
return retData
|
|
@ -22,9 +22,9 @@ import onionrexceptions, logger, onionrpeers
|
|||
from utils import networkmerger
|
||||
from onionrutils import stringvalidators, epoch
|
||||
from communicator import peeraction, bootstrappeers
|
||||
|
||||
from coredb import keydb
|
||||
def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
|
||||
config = comm_inst._core.config
|
||||
config = comm_inst.config
|
||||
retData = False
|
||||
tried = comm_inst.offlinePeers
|
||||
if peer != '':
|
||||
|
@ -33,10 +33,10 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
|
|||
else:
|
||||
raise onionrexceptions.InvalidAddress('Will not attempt connection test to invalid address')
|
||||
else:
|
||||
peerList = comm_inst._core.listAdders()
|
||||
peerList = keydb.listkeys.list_adders()
|
||||
|
||||
mainPeerList = comm_inst._core.listAdders()
|
||||
peerList = onionrpeers.get_score_sorted_peer_list(comm_inst._core)
|
||||
mainPeerList = keydb.listkeys.list_adders()
|
||||
peerList = onionrpeers.get_score_sorted_peer_list()
|
||||
|
||||
# If we don't have enough peers connected or random chance, select new peers to try
|
||||
if len(peerList) < 8 or secrets.randbelow(4) == 3:
|
||||
|
@ -56,7 +56,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
|
|||
if not config.get('tor.v3onions') and len(address) == 62:
|
||||
continue
|
||||
# Don't connect to our own address
|
||||
if address == comm_inst._core.hsAddress:
|
||||
if address == comm_inst.hsAddress:
|
||||
continue
|
||||
# Don't connect to invalid address or if its already been tried/connected, or if its cooled down
|
||||
if len(address) == 0 or address in tried or address in comm_inst.onlinePeers or address in comm_inst.cooldownPeer:
|
||||
|
@ -68,7 +68,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
|
|||
time.sleep(0.1)
|
||||
if address not in mainPeerList:
|
||||
# Add a peer to our list if it isn't already since it successfully connected
|
||||
networkmerger.mergeAdders(address, comm_inst._core)
|
||||
networkmerger.mergeAdders(address)
|
||||
if address not in comm_inst.onlinePeers:
|
||||
logger.info('Connected to ' + address, terminal=True)
|
||||
comm_inst.onlinePeers.append(address)
|
||||
|
@ -80,7 +80,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
|
|||
if profile.address == address:
|
||||
break
|
||||
else:
|
||||
comm_inst.peerProfiles.append(onionrpeers.PeerProfiles(address, comm_inst._core))
|
||||
comm_inst.peerProfiles.append(onionrpeers.PeerProfiles(address))
|
||||
break
|
||||
else:
|
||||
# Mark a peer as tried if they failed to respond to ping
|
||||
|
|
|
@ -21,6 +21,7 @@ from onionrutils import epoch
|
|||
from communicator import onlinepeers
|
||||
def cooldown_peer(comm_inst):
|
||||
'''Randomly add an online peer to cooldown, so we can connect a new one'''
|
||||
config = comm_inst.config
|
||||
onlinePeerAmount = len(comm_inst.onlinePeers)
|
||||
minTime = 300
|
||||
cooldownTime = 600
|
||||
|
@ -34,7 +35,7 @@ def cooldown_peer(comm_inst):
|
|||
del comm_inst.cooldownPeer[peer]
|
||||
|
||||
# Cool down a peer, if we have max connections alive for long enough
|
||||
if onlinePeerAmount >= comm_inst._core.config.get('peers.max_connect', 10, save = True):
|
||||
if onlinePeerAmount >= config.get('peers.max_connect', 10, save = True):
|
||||
finding = True
|
||||
|
||||
while finding:
|
||||
|
|
|
@ -21,11 +21,12 @@ import logger
|
|||
import onionrevents as events
|
||||
from onionrutils import localcommand
|
||||
from coredb import daemonqueue
|
||||
import filepaths
|
||||
def handle_daemon_commands(comm_inst):
|
||||
cmd = daemonqueue.daemon_queue()
|
||||
response = ''
|
||||
if cmd is not False:
|
||||
events.event('daemon_command', onionr = comm_inst._core.onionrInst, data = {'cmd' : cmd})
|
||||
events.event('daemon_command', onionr = comm_inst.onionrInst, data = {'cmd' : cmd})
|
||||
if cmd[0] == 'shutdown':
|
||||
comm_inst.shutdown = True
|
||||
elif cmd[0] == 'announceNode':
|
||||
|
@ -35,13 +36,13 @@ def handle_daemon_commands(comm_inst):
|
|||
logger.debug("No nodes connected. Will not introduce node.")
|
||||
elif cmd[0] == 'runCheck': # deprecated
|
||||
logger.debug('Status check; looks good.')
|
||||
open(comm_inst._core.dataDir + '.runcheck', 'w+').close()
|
||||
open(filepaths.run_check_file + '.runcheck', 'w+').close()
|
||||
elif cmd[0] == 'connectedPeers':
|
||||
response = '\n'.join(list(comm_inst.onlinePeers)).strip()
|
||||
if response == '':
|
||||
response = 'none'
|
||||
elif cmd[0] == 'localCommand':
|
||||
response = localcommand.local_command(comm_inst._core, cmd[1])
|
||||
response = localcommand.local_command(cmd[1])
|
||||
elif cmd[0] == 'pex':
|
||||
for i in comm_inst.timers:
|
||||
if i.timerFunction.__name__ == 'lookupAdders':
|
||||
|
@ -51,7 +52,7 @@ def handle_daemon_commands(comm_inst):
|
|||
|
||||
if cmd[0] not in ('', None):
|
||||
if response != '':
|
||||
localcommand.local_command(comm_inst._core, 'queueResponseAdd/' + cmd[4], post=True, postData={'data': response})
|
||||
localcommand.local_command('queueResponseAdd/' + cmd[4], post=True, postData={'data': response})
|
||||
response = ''
|
||||
|
||||
comm_inst.decrementThreadCount('handle_daemon_commands')
|
|
@ -22,9 +22,13 @@ import logger, onionrpeers
|
|||
from onionrutils import blockmetadata, stringvalidators, validatemetadata
|
||||
from . import shoulddownload
|
||||
from communicator import peeraction, onlinepeers
|
||||
import onionrcrypto, onionrstorage, onionrblacklist, storagecounter
|
||||
|
||||
def download_blocks_from_communicator(comm_inst):
|
||||
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
|
||||
crypto = onionrcrypto.OnionrCrypto()
|
||||
blacklist = onionrblacklist.OnionrBlackList()
|
||||
storage_counter = storagecounter.StorageCounter()
|
||||
for blockHash in list(comm_inst.blockQueue):
|
||||
if len(comm_inst.onlinePeers) == 0:
|
||||
break
|
||||
|
@ -38,7 +42,7 @@ def download_blocks_from_communicator(comm_inst):
|
|||
if not shoulddownload.should_download(comm_inst, blockHash):
|
||||
continue
|
||||
|
||||
if comm_inst.shutdown or not comm_inst.isOnline or comm_inst._core.storage_counter.isFull():
|
||||
if comm_inst.shutdown or not comm_inst.isOnline or storage_counter.isFull():
|
||||
# Exit loop if shutting down or offline, or disk allocation reached
|
||||
break
|
||||
# Do not download blocks being downloaded
|
||||
|
@ -50,7 +54,7 @@ def download_blocks_from_communicator(comm_inst):
|
|||
if len(blockPeers) == 0:
|
||||
peerUsed = onlinepeers.pick_online_peer(comm_inst)
|
||||
else:
|
||||
blockPeers = comm_inst._core._crypto.randomShuffle(blockPeers)
|
||||
blockPeers = crypto.randomShuffle(blockPeers)
|
||||
peerUsed = blockPeers.pop(0)
|
||||
|
||||
if not comm_inst.shutdown and peerUsed.strip() != '':
|
||||
|
@ -62,7 +66,7 @@ def download_blocks_from_communicator(comm_inst):
|
|||
except AttributeError:
|
||||
pass
|
||||
|
||||
realHash = comm_inst._core._crypto.sha3Hash(content)
|
||||
realHash = ccrypto.sha3Hash(content)
|
||||
try:
|
||||
realHash = realHash.decode() # bytes on some versions for some reason
|
||||
except AttributeError:
|
||||
|
@ -71,11 +75,11 @@ def download_blocks_from_communicator(comm_inst):
|
|||
content = content.decode() # decode here because sha3Hash needs bytes above
|
||||
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
|
||||
metadata = metas[0]
|
||||
if validatemetadata.validate_metadata(comm_inst._core, metadata, metas[2]): # check if metadata is valid, and verify nonce
|
||||
if comm_inst._core._crypto.verifyPow(content): # check if POW is enough/correct
|
||||
if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
|
||||
if crypto.verifyPow(content): # check if POW is enough/correct
|
||||
logger.info('Attempting to save block %s...' % blockHash[:12])
|
||||
try:
|
||||
comm_inst._core.setData(content)
|
||||
onionrstorage.setdata.set_data(content)
|
||||
except onionrexceptions.DataExists:
|
||||
logger.warn('Data is already set for %s ' % (blockHash,))
|
||||
except onionrexceptions.DiskAllocationReached:
|
||||
|
@ -83,24 +87,24 @@ def download_blocks_from_communicator(comm_inst):
|
|||
removeFromQueue = False
|
||||
else:
|
||||
blockmetadb.add_to_block_DB(blockHash, dataSaved=True) # add block to meta db
|
||||
blockmetadata.process_block_metadata(comm_inst._core, blockHash) # caches block metadata values to block database
|
||||
blockmetadata.process_block_metadata(blockHash) # caches block metadata values to block database
|
||||
else:
|
||||
logger.warn('POW failed for block %s.' % (blockHash,))
|
||||
else:
|
||||
if comm_inst._core._blacklist.inBlacklist(realHash):
|
||||
if blacklist.inBlacklist(realHash):
|
||||
logger.warn('Block %s is blacklisted.' % (realHash,))
|
||||
else:
|
||||
logger.warn('Metadata for block %s is invalid.' % (blockHash,))
|
||||
comm_inst._core._blacklist.addToDB(blockHash)
|
||||
blacklist.addToDB(blockHash)
|
||||
else:
|
||||
# if block didn't meet expected hash
|
||||
tempHash = comm_inst._core._crypto.sha3Hash(content) # lazy hack, TODO use var
|
||||
tempHash = crypto.sha3Hash(content) # lazy hack, TODO use var
|
||||
try:
|
||||
tempHash = tempHash.decode()
|
||||
except AttributeError:
|
||||
pass
|
||||
# Punish peer for sharing invalid block (not always malicious, but is bad regardless)
|
||||
onionrpeers.PeerProfiles(peerUsed, comm_inst._core).addScore(-50)
|
||||
onionrpeers.PeerProfiles(peerUsed).addScore(-50)
|
||||
if tempHash != 'ed55e34cb828232d6c14da0479709bfa10a0923dca2b380496e6b2ed4f7a0253':
|
||||
# Dumb hack for 404 response from peer. Don't log it if 404 since its likely not malicious or a critical error.
|
||||
logger.warn('Block hash validation failed for ' + blockHash + ' got ' + tempHash)
|
||||
|
|
|
@ -23,7 +23,7 @@ def should_download(comm_inst, block_hash):
|
|||
if block_hash in blockmetadb.get_block_list(): # Dont download block we have
|
||||
ret_data = False
|
||||
else:
|
||||
if comm_inst._core._blacklist.inBlacklist(block_hash): # Dont download blacklisted block
|
||||
if comm_inst.blacklist.inBlacklist(block_hash): # Dont download blacklisted block
|
||||
ret_data = False
|
||||
if ret_data is False:
|
||||
# Remove block from communicator queue if it shouldnt be downloaded
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue