refactoring communicator into module

This commit is contained in:
Kevin Froman 2019-07-10 17:38:20 -05:00
parent be318f2403
commit 9bf6c76557
12 changed files with 205 additions and 102 deletions

View file

@ -21,6 +21,7 @@ import base64
import onionrproofs, logger
from etc import onionrvalues
from onionrutils import basicrequests, bytesconverter
from communicator import onlinepeers
def announce_node(daemon):
'''Announce our node to our peers'''
@ -39,7 +40,7 @@ def announce_node(daemon):
peer = i
break
else:
peer = daemon.pickOnlinePeer()
peer = onlinepeers.pick_online_peer(daemon)
for x in range(1):
if x == 1 and daemon._core.config.get('i2p.host'):

View file

@ -17,15 +17,12 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import time, sys
import time, sys, secrets
import onionrexceptions, logger, onionrpeers
from utils import networkmerger
from onionrutils import stringvalidators, epoch
# secrets module was added into standard lib in 3.6+
if sys.version_info[0] == 3 and sys.version_info[1] < 6:
from dependencies import secrets
elif sys.version_info[0] == 3 and sys.version_info[1] >= 6:
import secrets
from communicator import peeraction
def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
config = comm_inst._core.config
retData = False
@ -67,7 +64,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
if comm_inst.shutdown:
return
# Ping a peer,
if comm_inst.peerAction(address, 'ping') == 'pong!':
if peeraction.peer_action(comm_inst, address, 'ping') == 'pong!':
time.sleep(0.1)
if address not in mainPeerList:
# Add a peer to our list if it isn't already since it successfully connected

View file

@ -21,6 +21,7 @@ import communicator, onionrexceptions
import logger, onionrpeers
from onionrutils import blockmetadata, stringvalidators, validatemetadata
from . import shoulddownload
from communicator import peeraction, onlinepeers
def download_blocks_from_communicator(comm_inst):
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
@ -47,14 +48,14 @@ def download_blocks_from_communicator(comm_inst):
comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
if len(blockPeers) == 0:
peerUsed = comm_inst.pickOnlinePeer()
peerUsed = onlinepeers.pick_online_peer(comm_inst)
else:
blockPeers = comm_inst._core._crypto.randomShuffle(blockPeers)
peerUsed = blockPeers.pop(0)
if not comm_inst.shutdown and peerUsed.strip() != '':
logger.info("Attempting to download %s from %s..." % (blockHash[:12], peerUsed))
content = comm_inst.peerAction(peerUsed, 'getdata/' + blockHash, max_resp_size=3000000) # block content from random peer (includes metadata)
content = peeraction.peer_action(comm_inst, peerUsed, 'getdata/' + blockHash, max_resp_size=3000000) # block content from random peer (includes metadata)
if content != False and len(content) > 0:
try:
content = content.encode()

View file

@ -19,7 +19,7 @@
'''
import logger
from onionrutils import stringvalidators
from communicator import peeraction, onlinepeers
def lookup_new_peer_transports_with_communicator(comm_inst):
logger.info('Looking up new addresses...')
tryAmount = 1
@ -29,8 +29,8 @@ def lookup_new_peer_transports_with_communicator(comm_inst):
if len(newPeers) > 10000:
# Don't get new peers if we have too many queued up
break
peer = comm_inst.pickOnlinePeer()
newAdders = comm_inst.peerAction(peer, action='pex')
peer = onlinepeers.pick_online_peer(comm_inst)
newAdders = peeraction.peer_action(comm_inst, peer, action='pex')
try:
newPeers = newAdders.split(',')
except AttributeError:

View file

@ -19,6 +19,7 @@
'''
import logger, onionrproofs
from onionrutils import stringvalidators, epoch
from communicator import peeraction, onlinepeers
def lookup_blocks_from_communicator(comm_inst):
logger.info('Looking up new blocks...')
@ -39,7 +40,7 @@ def lookup_blocks_from_communicator(comm_inst):
if comm_inst._core.storage_counter.isFull():
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
break
peer = comm_inst.pickOnlinePeer() # select random online peer
peer = onlinepeers.pick_online_peer(comm_inst) # select random online peer
# if we've already tried all the online peers this time around, stop
if peer in triedPeers:
if len(comm_inst.onlinePeers) == len(triedPeers):
@ -57,7 +58,7 @@ def lookup_blocks_from_communicator(comm_inst):
else:
listLookupCommand += '?date=%s' % (lastLookupTime,)
try:
newBlocks = comm_inst.peerAction(peer, listLookupCommand) # get list of new block hashes
newBlocks = peeraction.peer_action(comm_inst, peer, listLookupCommand) # get list of new block hashes
except Exception as error:
logger.warn('Could not get new blocks from %s.' % peer, error = error)
newBlocks = False

View file

@ -21,6 +21,7 @@ import logger
from communicatorutils import proxypicker
import onionrblockapi as block
from onionrutils import localcommand, stringvalidators, basicrequests
from communicator import onlinepeers
def upload_blocks_from_communicator(comm_inst):
# when inserting a block, we try to upload it to a few peers to add some deniability
@ -35,7 +36,7 @@ def upload_blocks_from_communicator(comm_inst):
comm_inst.decrementThreadCount('uploadBlock')
return
for i in range(min(len(comm_inst.onlinePeers), 6)):
peer = comm_inst.pickOnlinePeer()
peer = onlinepeers.pick_online_peer(comm_inst)
if peer in triedPeers:
continue
triedPeers.append(peer)