OnionrUtils fully removed (but not fully bug free)
flow now uses daemon thread for displaying output
This commit is contained in:
parent
909c002dc4
commit
c7e06205b7
50 changed files with 280 additions and 330 deletions
|
@ -20,6 +20,7 @@
|
|||
import base64
|
||||
import onionrproofs, logger
|
||||
from etc import onionrvalues
|
||||
from onionrutils import basicrequests
|
||||
|
||||
def announce_node(daemon):
|
||||
'''Announce our node to our peers'''
|
||||
|
@ -75,8 +76,8 @@ def announce_node(daemon):
|
|||
daemon.announceCache[peer] = data['random']
|
||||
if not announceFail:
|
||||
logger.info('Announcing node to ' + url)
|
||||
if daemon._core._utils.doPostRequest(url, data) == 'Success':
|
||||
logger.info('Successfully introduced node to ' + peer)
|
||||
if basicrequests.do_post_request(daemon._core, url, data) == 'Success':
|
||||
logger.info('Successfully introduced node to ' + peer, terminal=True)
|
||||
retData = True
|
||||
daemon._core.setAddressInfo(peer, 'introduced', 1)
|
||||
daemon._core.setAddressInfo(peer, 'powValue', data['random'])
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
import time, sys
|
||||
import onionrexceptions, logger, onionrpeers
|
||||
from utils import networkmerger
|
||||
from onionrutils import stringvalidators
|
||||
from onionrutils import stringvalidators, epoch
|
||||
# secrets module was added into standard lib in 3.6+
|
||||
if sys.version_info[0] == 3 and sys.version_info[1] < 6:
|
||||
from dependencies import secrets
|
||||
|
@ -75,7 +75,7 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
|
|||
if address not in comm_inst.onlinePeers:
|
||||
logger.info('Connected to ' + address, terminal=True)
|
||||
comm_inst.onlinePeers.append(address)
|
||||
comm_inst.connectTimes[address] = comm_inst._core._utils.getEpoch()
|
||||
comm_inst.connectTimes[address] = epoch.get_epoch()
|
||||
retData = address
|
||||
|
||||
# add peer to profile list if they're not in it
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
'''
|
||||
from onionrutils import epoch
|
||||
def cooldown_peer(comm_inst):
|
||||
'''Randomly add an online peer to cooldown, so we can connect a new one'''
|
||||
onlinePeerAmount = len(comm_inst.onlinePeers)
|
||||
|
@ -28,7 +29,7 @@ def cooldown_peer(comm_inst):
|
|||
# Remove peers from cooldown that have been there long enough
|
||||
tempCooldown = dict(comm_inst.cooldownPeer)
|
||||
for peer in tempCooldown:
|
||||
if (comm_inst._core._utils.getEpoch() - tempCooldown[peer]) >= cooldownTime:
|
||||
if (epoch.get_epoch() - tempCooldown[peer]) >= cooldownTime:
|
||||
del comm_inst.cooldownPeer[peer]
|
||||
|
||||
# Cool down a peer, if we have max connections alive for long enough
|
||||
|
@ -38,7 +39,7 @@ def cooldown_peer(comm_inst):
|
|||
while finding:
|
||||
try:
|
||||
toCool = min(tempConnectTimes, key=tempConnectTimes.get)
|
||||
if (comm_inst._core._utils.getEpoch() - tempConnectTimes[toCool]) < minTime:
|
||||
if (epoch.get_epoch() - tempConnectTimes[toCool]) < minTime:
|
||||
del tempConnectTimes[toCool]
|
||||
else:
|
||||
finding = False
|
||||
|
@ -46,6 +47,6 @@ def cooldown_peer(comm_inst):
|
|||
break
|
||||
else:
|
||||
comm_inst.removeOnlinePeer(toCool)
|
||||
comm_inst.cooldownPeer[toCool] = comm_inst._core._utils.getEpoch()
|
||||
comm_inst.cooldownPeer[toCool] = epoch.get_epoch()
|
||||
|
||||
comm_inst.decrementThreadCount('cooldown_peer')
|
|
@ -19,7 +19,7 @@
|
|||
'''
|
||||
import communicator, onionrexceptions
|
||||
import logger, onionrpeers
|
||||
from onionrutils import blockmetadata
|
||||
from onionrutils import blockmetadata, stringvalidators, validatemetadata
|
||||
|
||||
def download_blocks_from_communicator(comm_inst):
|
||||
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
|
||||
|
@ -48,7 +48,7 @@ def download_blocks_from_communicator(comm_inst):
|
|||
continue
|
||||
if comm_inst._core._blacklist.inBlacklist(blockHash):
|
||||
continue
|
||||
if comm_inst._core._utils.storageCounter.isFull():
|
||||
if comm_inst._core.storage_counter.isFull():
|
||||
break
|
||||
comm_inst.currentDownloading.append(blockHash) # So we can avoid concurrent downloading in other threads of same block
|
||||
if len(blockPeers) == 0:
|
||||
|
@ -75,7 +75,7 @@ def download_blocks_from_communicator(comm_inst):
|
|||
content = content.decode() # decode here because sha3Hash needs bytes above
|
||||
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
|
||||
metadata = metas[0]
|
||||
if comm_inst._core._utils.validateMetadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
|
||||
if validatemetadata.validate_metadata(comm_inist._core, metadata, metas[2]): # check if metadata is valid, and verify nonce
|
||||
if comm_inst._core._crypto.verifyPow(content): # check if POW is enough/correct
|
||||
logger.info('Attempting to save block %s...' % blockHash[:12])
|
||||
try:
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
import sqlite3
|
||||
import logger
|
||||
from onionrusers import onionrusers
|
||||
from onionrutils import epoch
|
||||
def clean_old_blocks(comm_inst):
|
||||
'''Delete old blocks if our disk allocation is full/near full, and also expired blocks'''
|
||||
|
||||
|
@ -29,7 +30,7 @@ def clean_old_blocks(comm_inst):
|
|||
comm_inst._core.removeBlock(bHash)
|
||||
logger.info('Deleted block: %s' % (bHash,))
|
||||
|
||||
while comm_inst._core._utils.storageCounter.isFull():
|
||||
while comm_inst._core.storage_counter.isFull():
|
||||
oldest = comm_inst._core.getBlockList()[0]
|
||||
comm_inst._core._blacklist.addToDB(oldest)
|
||||
comm_inst._core.removeBlock(oldest)
|
||||
|
@ -41,7 +42,7 @@ def clean_keys(comm_inst):
|
|||
'''Delete expired forward secrecy keys'''
|
||||
conn = sqlite3.connect(comm_inst._core.peerDB, timeout=10)
|
||||
c = conn.cursor()
|
||||
time = comm_inst._core._utils.getEpoch()
|
||||
time = epoch.get_epoch()
|
||||
deleteKeys = []
|
||||
|
||||
for entry in c.execute("SELECT * FROM forwardKeys WHERE expire <= ?", (time,)):
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
'''
|
||||
import logger, onionrproofs
|
||||
from onionrutils import stringvalidators, epoch
|
||||
|
||||
def lookup_blocks_from_communicator(comm_inst):
|
||||
logger.info('Looking up new blocks...')
|
||||
tryAmount = 2
|
||||
|
@ -34,7 +36,7 @@ def lookup_blocks_from_communicator(comm_inst):
|
|||
if not comm_inst.isOnline:
|
||||
break
|
||||
# check if disk allocation is used
|
||||
if comm_inst._core._utils.storageCounter.isFull():
|
||||
if comm_inst._core.storage_counter.isFull():
|
||||
logger.debug('Not looking up new blocks due to maximum amount of allowed disk space used')
|
||||
break
|
||||
peer = comm_inst.pickOnlinePeer() # select random online peer
|
||||
|
@ -60,11 +62,11 @@ def lookup_blocks_from_communicator(comm_inst):
|
|||
logger.warn('Could not get new blocks from %s.' % peer, error = error)
|
||||
newBlocks = False
|
||||
else:
|
||||
comm_inst.dbTimestamps[peer] = comm_inst._core._utils.getRoundedEpoch(roundS=60)
|
||||
comm_inst.dbTimestamps[peer] = epoch.get_rounded_epoch(roundS=60)
|
||||
if newBlocks != False:
|
||||
# if request was a success
|
||||
for i in newBlocks.split('\n'):
|
||||
if comm_inst._core._utils.validateHash(i):
|
||||
if stringvalidators.validate_hash(i):
|
||||
# if newline seperated string is valid hash
|
||||
if not i in existingBlocks:
|
||||
# if block does not exist on disk and is not already in block queue
|
||||
|
|
|
@ -20,14 +20,14 @@
|
|||
'''
|
||||
import logger
|
||||
from utils import netutils
|
||||
from onionrutils import localcommand
|
||||
from onionrutils import localcommand, epoch
|
||||
def net_check(comm_inst):
|
||||
'''Check if we are connected to the internet or not when we can't connect to any peers'''
|
||||
rec = False # for detecting if we have received incoming connections recently
|
||||
c = comm_inst._core
|
||||
if len(comm_inst.onlinePeers) == 0:
|
||||
try:
|
||||
if (c._utils.getEpoch() - int(localcommand.local_command(c, '/lastconnect'))) <= 60:
|
||||
if (epoch.get_epoch() - int(localcommand.local_command(c, '/lastconnect'))) <= 60:
|
||||
comm_inst.isOnline = True
|
||||
rec = True
|
||||
except ValueError:
|
||||
|
|
|
@ -20,16 +20,17 @@
|
|||
import logger
|
||||
from communicatorutils import proxypicker
|
||||
import onionrblockapi as block
|
||||
from onionrutils import localcommand
|
||||
from onionrutils import localcommand, stringvalidators, basicrequests
|
||||
|
||||
def upload_blocks_from_communicator(comm_inst):
|
||||
# when inserting a block, we try to upload it to a few peers to add some deniability
|
||||
triedPeers = []
|
||||
finishedUploads = []
|
||||
comm_inst.blocksToUpload = comm_inst._core._crypto.randomShuffle(comm_inst.blocksToUpload)
|
||||
core = comm_inst._core
|
||||
comm_inst.blocksToUpload = core._crypto.randomShuffle(comm_inst.blocksToUpload)
|
||||
if len(comm_inst.blocksToUpload) != 0:
|
||||
for bl in comm_inst.blocksToUpload:
|
||||
if not comm_inst._core._utils.validateHash(bl):
|
||||
if not stringvalidators.validate_hash(bl):
|
||||
logger.warn('Requested to upload invalid block')
|
||||
comm_inst.decrementThreadCount('uploadBlock')
|
||||
return
|
||||
|
@ -42,8 +43,8 @@ def upload_blocks_from_communicator(comm_inst):
|
|||
data = {'block': block.Block(bl).getRaw()}
|
||||
proxyType = proxypicker.pick_proxy(peer)
|
||||
logger.info("Uploading block to " + peer)
|
||||
if not comm_inst._core._utils.doPostRequest(url, data=data, proxyType=proxyType) == False:
|
||||
localcommand.local_command(comm_inst._core, 'waitforshare/' + bl, post=True)
|
||||
if not basicrequests.do_post_request(core, url, data=data, proxyType=proxyType) == False:
|
||||
localcommand.local_command(core, 'waitforshare/' + bl, post=True)
|
||||
finishedUploads.append(bl)
|
||||
for x in finishedUploads:
|
||||
try:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue