added readme for communicator
parent
ae435bd5f9
commit
636cf3a8d1
|
@ -0,0 +1,15 @@
|
||||||
|
# Onionr Communicator
|
||||||
|
|
||||||
|
Onionr communicator is the Onionr client. It "connects" to remote Onionr peers and does things such as:
|
||||||
|
|
||||||
|
* Finding new peers
|
||||||
|
* Uploading blocks
|
||||||
|
* Downloading blocks
|
||||||
|
* Daemon maintnence/housekeeping
|
||||||
|
|
||||||
|
## Files
|
||||||
|
|
||||||
|
* \_\_init\_\_.py: Contains the main communicator code. Inits and launches the communicator and sets up the timers
|
||||||
|
* peeraction.py: contains a function to send commands to remote peers
|
||||||
|
* bootstrappers.py: adds peers from the bootstrap list to the communicator to try to connect to them
|
||||||
|
* onlinepers: management of the online peer pool for the communicator
|
|
@ -20,26 +20,27 @@
|
||||||
import streamedrequests
|
import streamedrequests
|
||||||
import logger
|
import logger
|
||||||
from onionrutils import epoch, basicrequests
|
from onionrutils import epoch, basicrequests
|
||||||
from . import onlinepeers
|
|
||||||
from coredb import keydb
|
from coredb import keydb
|
||||||
def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_size=5242880):
|
from . import onlinepeers
|
||||||
|
def peer_action(comm_inst, peer, action, returnHeaders=False, max_resp_size=5242880):
|
||||||
'''Perform a get request to a peer'''
|
'''Perform a get request to a peer'''
|
||||||
penalty_score = -10
|
penalty_score = -10
|
||||||
if len(peer) == 0:
|
if not peer:
|
||||||
return False
|
return False
|
||||||
url = 'http://%s/%s' % (peer, action)
|
url = 'http://%s/%s' % (peer, action)
|
||||||
if len(data) > 0:
|
|
||||||
url += '&data=' + data
|
|
||||||
|
|
||||||
keydb.transportinfo.set_address_info(peer, 'lastConnectAttempt', epoch.get_epoch()) # mark the time we're trying to request this peer
|
# mark the time we're trying to request this peer
|
||||||
|
keydb.transportinfo.set_address_info(peer, 'lastConnectAttempt', epoch.get_epoch())
|
||||||
|
|
||||||
try:
|
try:
|
||||||
retData = basicrequests.do_get_request( url, port=comm_inst.proxyPort, max_size=max_resp_size)
|
ret_data = basicrequests.do_get_request(url, port=comm_inst.proxyPort,
|
||||||
|
max_size=max_resp_size)
|
||||||
except streamedrequests.exceptions.ResponseLimitReached:
|
except streamedrequests.exceptions.ResponseLimitReached:
|
||||||
logger.warn('Request failed due to max response size being overflowed', terminal=True)
|
logger.warn('Request failed due to max response size being overflowed', terminal=True)
|
||||||
retData = False
|
ret_data = False
|
||||||
penalty_score = -100
|
penalty_score = -100
|
||||||
# if request failed, (error), mark peer offline
|
# if request failed, (error), mark peer offline
|
||||||
if retData == False:
|
if not ret_data:
|
||||||
try:
|
try:
|
||||||
comm_inst.getPeerProfileInstance(peer).addScore(penalty_score)
|
comm_inst.getPeerProfileInstance(peer).addScore(penalty_score)
|
||||||
onlinepeers.remove_online_peer(comm_inst, peer)
|
onlinepeers.remove_online_peer(comm_inst, peer)
|
||||||
|
@ -52,4 +53,4 @@ def peer_action(comm_inst, peer, action, data='', returnHeaders=False, max_resp_
|
||||||
peer_profile = comm_inst.getPeerProfileInstance(peer)
|
peer_profile = comm_inst.getPeerProfileInstance(peer)
|
||||||
peer_profile.update_connect_time()
|
peer_profile.update_connect_time()
|
||||||
peer_profile.addScore(1)
|
peer_profile.addScore(1)
|
||||||
return retData # If returnHeaders, returns tuple of data, headers. if not, just data string
|
return ret_data # If returnHeaders, returns tuple of data, headers. if not, just data string
|
||||||
|
|
Loading…
Reference in New Issue