+added python info to version command

+uploads now have sessions
master
Kevin Froman 2019-09-16 20:16:06 -05:00
parent 4b8fe7eeb3
commit 0405eda30e
14 changed files with 210 additions and 29 deletions

View File

@ -43,7 +43,7 @@ class OnionrCommunicatorDaemon:
self.config = config self.config = config
self.storage_counter = storagecounter.StorageCounter() self.storage_counter = storagecounter.StorageCounter()
self.isOnline = True # Assume we're connected to the internet self.isOnline = True # Assume we're connected to the internet
self.shared_state = shared_state self.shared_state = shared_state # TooManyObjects module
# list of timer instances # list of timer instances
self.timers = [] self.timers = []
@ -53,6 +53,8 @@ class OnionrCommunicatorDaemon:
# Upload information, list of blocks to upload # Upload information, list of blocks to upload
self.blocksToUpload = [] self.blocksToUpload = []
self.upload_session_manager = self.shared_state.get(uploadblocks.sessionmanager.BlockUploadSessionManager)
self.shared_state.share_object()
# loop time.sleep delay in seconds # loop time.sleep delay in seconds
self.delay = 1 self.delay = 1

View File

@ -44,7 +44,7 @@ def peer_action(comm_inst, peer, action, returnHeaders=False, max_resp_size=5242
onlinepeers.remove_online_peer(comm_inst, peer) onlinepeers.remove_online_peer(comm_inst, peer)
keydb.transportinfo.set_address_info(peer, 'lastConnectAttempt', epoch.get_epoch()) keydb.transportinfo.set_address_info(peer, 'lastConnectAttempt', epoch.get_epoch())
if action != 'ping' and not comm_inst.shutdown: if action != 'ping' and not comm_inst.shutdown:
logger.warn('Lost connection to ' + peer, terminal=True) logger.warn(f'Lost connection to {peer}', terminal=True)
onlinepeers.get_online_peers(comm_inst) # Will only add a new peer to pool if needed onlinepeers.get_online_peers(comm_inst) # Will only add a new peer to pool if needed
except ValueError: except ValueError:
pass pass

View File

@ -30,7 +30,8 @@ def download_blocks_from_communicator(comm_inst):
blacklist = onionrblacklist.OnionrBlackList() blacklist = onionrblacklist.OnionrBlackList()
storage_counter = storagecounter.StorageCounter() storage_counter = storagecounter.StorageCounter()
LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter LOG_SKIP_COUNT = 50 # for how many iterations we skip logging the counter
count = 0 count: int = 0
metadata_validation_result: bool = False
# Iterate the block queue in the communicator # Iterate the block queue in the communicator
for blockHash in list(comm_inst.blockQueue): for blockHash in list(comm_inst.blockQueue):
count += 1 count += 1
@ -80,7 +81,11 @@ def download_blocks_from_communicator(comm_inst):
#content = content.decode() # decode here because sha3Hash needs bytes above #content = content.decode() # decode here because sha3Hash needs bytes above
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0] metadata = metas[0]
if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid, and verify nonce try:
metadata_validation_result = validatemetadata.validate_metadata(metadata, metas[2])
except onionrexceptions.DataExists:
metadata_validation_result = False
if metadata_validation_result: # check if metadata is valid, and verify nonce
if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
logger.info('Attempting to save block %s...' % blockHash[:12]) logger.info('Attempting to save block %s...' % blockHash[:12])
try: try:

View File

@ -3,6 +3,7 @@
Upload blocks in the upload queue to peers from the communicator Upload blocks in the upload queue to peers from the communicator
''' '''
from __future__ import annotations
''' '''
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
@ -17,6 +18,9 @@
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
from typing import Union, TYPE_CHECKING
import threading
import logger import logger
from communicatorutils import proxypicker from communicatorutils import proxypicker
import onionrexceptions import onionrexceptions
@ -24,13 +28,15 @@ import onionrblockapi as block
from onionrutils import localcommand, stringvalidators, basicrequests from onionrutils import localcommand, stringvalidators, basicrequests
from communicator import onlinepeers from communicator import onlinepeers
import onionrcrypto import onionrcrypto
from . import sessionmanager
from . import session def upload_blocks_from_communicator(comm_inst: OnionrCommunicatorDaemon):
"""Accepts a communicator instance and uploads blocks from its upload queue"""
def upload_blocks_from_communicator(comm_inst): """when inserting a block, we try to upload
# when inserting a block, we try to upload it to a few peers to add some deniability it to a few peers to add some deniability & increase functionality"""
TIMER_NAME = "upload_blocks_from_communicator" TIMER_NAME = "upload_blocks_from_communicator"
session_manager: sessionmanager.BlockUploadSessionManager = comm_inst.shared_state.get(sessionmanager.BlockUploadSessionManager)
triedPeers = [] triedPeers = []
finishedUploads = [] finishedUploads = []
comm_inst.blocksToUpload = onionrcrypto.cryptoutils.random_shuffle(comm_inst.blocksToUpload) comm_inst.blocksToUpload = onionrcrypto.cryptoutils.random_shuffle(comm_inst.blocksToUpload)
@ -40,29 +46,44 @@ def upload_blocks_from_communicator(comm_inst):
logger.warn('Requested to upload invalid block', terminal=True) logger.warn('Requested to upload invalid block', terminal=True)
comm_inst.decrementThreadCount(TIMER_NAME) comm_inst.decrementThreadCount(TIMER_NAME)
return return
session = session_manager.add_session(bl)
for i in range(min(len(comm_inst.onlinePeers), 6)): for i in range(min(len(comm_inst.onlinePeers), 6)):
peer = onlinepeers.pick_online_peer(comm_inst) peer = onlinepeers.pick_online_peer(comm_inst)
if peer in triedPeers:
continue
triedPeers.append(peer)
url = 'http://%s/upload' % (peer,)
try: try:
#data = {'block': block.Block(bl).getRaw()} session.peer_exists[peer]
continue
except KeyError:
pass
try:
if session.peer_fails[peer] > 3: continue
except KeyError:
pass
if peer in triedPeers: continue
triedPeers.append(peer)
url = f'http://{peer}/upload'
try:
data = block.Block(bl).getRaw() data = block.Block(bl).getRaw()
except onionrexceptions.NoDataAvailable: except onionrexceptions.NoDataAvailable:
finishedUploads.append(bl) finishedUploads.append(bl)
break break
proxyType = proxypicker.pick_proxy(peer) proxyType = proxypicker.pick_proxy(peer)
logger.info("Uploading block %s to %s" % (bl[:8], peer), terminal=True) logger.info(f"Uploading block {bl[:8]} to {peer}", terminal=True)
resp = basicrequests.do_post_request(url, data=data, proxyType=proxyType, content_type='application/octet-stream') resp = basicrequests.do_post_request(url, data=data, proxyType=proxyType, content_type='application/octet-stream')
if not resp == False: if not resp == False:
if resp == 'success': if resp == 'success':
localcommand.local_command('waitforshare/' + bl, post=True) session.success()
finishedUploads.append(bl) session.peer_exists[peer] = True
elif resp == 'exists': elif resp == 'exists':
finishedUploads.append(bl) session.success()
session.peer_exists[peer] = True
else: else:
logger.warn('Failed to upload %s, reason: %s' % (bl[:8], resp[:150]), terminal=True) session.fail()
session.fail_peer(peer)
comm_inst.getPeerProfileInstance(peer).addScore(-5)
logger.warn(f'Failed to upload {bl[:8]}, reason: {resp[:15]}', terminal=True)
else:
session.fail()
session_manager.clean_session()
for x in finishedUploads: for x in finishedUploads:
try: try:
comm_inst.blocksToUpload.remove(x) comm_inst.blocksToUpload.remove(x)

View File

@ -3,6 +3,7 @@
Virtual upload "sessions" for blocks Virtual upload "sessions" for blocks
""" """
from __future__ import annotations
""" """
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
@ -33,10 +34,12 @@ class UploadSession:
block_hash = reconstructhash.reconstruct_hash(block_hash) block_hash = reconstructhash.reconstruct_hash(block_hash)
if not stringvalidators.validate_hash(block_hash): raise ValueError if not stringvalidators.validate_hash(block_hash): raise ValueError
self.start_time = epoch.get_epoch()
self.block_hash = reconstructhash.deconstruct_hash(block_hash) self.block_hash = reconstructhash.deconstruct_hash(block_hash)
self.total_fail_count: int = 0 self.total_fail_count: int = 0
self.total_success_count: int = 0 self.total_success_count: int = 0
self.peer_fails = {} self.peer_fails = {}
self.peer_exists = {}
def fail_peer(self, peer): def fail_peer(self, peer):
try: try:

View File

@ -0,0 +1,85 @@
"""
Onionr - Private P2P Communication
Manager for upload 'sessions'
"""
from __future__ import annotations
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from typing import Iterable, Union
from onionrutils import bytesconverter
from onionrutils import localcommand
from etc import onionrvalues
from etc import waitforsetvar
from utils import reconstructhash
from . import session
class BlockUploadSessionManager:
"""Holds block UploadSession instances. Optionally accepts iterable of sessions to added on init
Arguments: old_session: iterable of old UploadSession objects"""
def __init__(self, old_sessions:Iterable=None):
#self._too_many: TooMany = None
if old_sessions is None:
self.sessions = []
else:
self.sessions = old_session
def add_session(self, session_or_block: Union(str, bytes, session.UploadSession))->session.UploadSession:
"""Create (or add existing) block upload session from a str/bytes block hex hash, existing UploadSession"""
if isinstance(session_or_block, session.UploadSession):
if not session_or_block in self.sessions:
self.sessions.append(session_or_block)
return session_or_block
try:
return self.get_session(session_or_block)
except KeyError:
pass
# convert bytes hash to str
if isinstance(session_or_block, bytes): session_or_block = bytesconverter.bytes_to_str(session_or_block)
# intentionally not elif
if isinstance(session_or_block, str):
new_session = session.UploadSession(session_or_block)
self.sessions.append(new_session)
return new_session
def get_session(self, block_hash: Union(str, bytes))->session.UploadSession:
block_hash = reconstructhash.deconstruct_hash(bytesconverter.bytes_to_str(block_hash))
for session in self.sessions:
if session.block_hash == block_hash: return session
raise KeyError
def clean_session(self, specific_session: Union[str, UploadSession]=None):
comm_inst: OnionrCommunicatorDaemon = self._too_many.get_by_string("OnionrCommunicatorDaemon")
sessions_to_delete = []
if comm_inst.getUptime() < 120: return
for session in self.sessions:
if (session.total_success_count / len(comm_inst.onlinePeers)) >= onionrvalues.MIN_BLOCK_UPLOAD_PEER_PERCENT:
sessions_to_delete.append(session)
for session in sessions_to_delete:
self.sessions.remove(session)
# TODO cleanup to one round of search
# Remove the blocks from the sessions, upload list, and waitforshare list
try:
comm_inst.blocksToUpload.remove(reconstructhash.reconstruct_hash(session.block_hash))
except ValueError:
pass
try:
comm_inst.blocksToUpload.remove(session.block_hash)
except ValueError:
pass
localcommand.local_command('waitforshare/{session.block_hash}')

View File

@ -31,6 +31,9 @@ MAX_BLOCK_CLOCK_SKEW = 120
MAIN_PUBLIC_KEY_SIZE = 32 MAIN_PUBLIC_KEY_SIZE = 32
ORIG_RUN_DIR_ENV_VAR = 'ORIG_ONIONR_RUN_DIR' ORIG_RUN_DIR_ENV_VAR = 'ORIG_ONIONR_RUN_DIR'
# Block creation anonymization requirements
MIN_BLOCK_UPLOAD_PEER_PERCENT = 0.1
# Begin OnionrValues migrated values # Begin OnionrValues migrated values
ANNOUNCE_POW = 5 ANNOUNCE_POW = 5
DEFAULT_EXPIRE = 2592000 DEFAULT_EXPIRE = 2592000

View File

@ -1,4 +1,8 @@
def wait_for_set_var(obj, attribute): from __future__ import annotations
while True: from typing import Union, Generic
if hasattr(obj, attribute): from gevent import sleep
break def wait_for_set_var(obj, attribute, sleep_seconds: Union[int, float]=0):
"""Wait for an object to get an attribute with an optional sleep time"""
while not hasattr(obj, attribute):
if hasattr(obj, attribute): break
if sleep_seconds > 0: sleep(sleep_seconds)

View File

@ -26,6 +26,7 @@ from netcontroller import NetController
from serializeddata import SerializedData from serializeddata import SerializedData
from onionrutils import mnemonickeys from onionrutils import mnemonickeys
from onionrutils import bytesconverter from onionrutils import bytesconverter
from utils import reconstructhash
pub_key = onionrcrypto.pub_key.replace('=', '') pub_key = onionrcrypto.pub_key.replace('=', '')
@ -78,6 +79,7 @@ class PrivateEndpoints:
def waitforshare(name): def waitforshare(name):
'''Used to prevent the **public** api from sharing blocks we just created''' '''Used to prevent the **public** api from sharing blocks we just created'''
if not name.isalnum(): raise ValueError('block hash needs to be alpha numeric') if not name.isalnum(): raise ValueError('block hash needs to be alpha numeric')
name = reconstructhash.reconstruct_hash(name)
if name in client_api.publicAPI.hideBlocks: if name in client_api.publicAPI.hideBlocks:
client_api.publicAPI.hideBlocks.remove(name) client_api.publicAPI.hideBlocks.remove(name)
return Response("removed") return Response("removed")

View File

@ -11,5 +11,11 @@ def version(verbosity = 5, function = logger.info):
if verbosity >= 1: if verbosity >= 1:
function(onionrvalues.ONIONR_TAGLINE, terminal=True) function(onionrvalues.ONIONR_TAGLINE, terminal=True)
if verbosity >= 2: if verbosity >= 2:
function('Running on %s %s' % (platform.platform(), platform.release()), terminal=True) pf = platform.platform()
release = platform.release()
python_imp = platform.python_implementation()
python_version = platform.python_version()
function(f'{python_imp} {python_version} on {pf} {release}', terminal=True)
function('Onionr data dir: %s' % identifyhome.identify_home(), terminal=True) function('Onionr data dir: %s' % identifyhome.identify_home(), terminal=True)
version.onionr_help = 'Shows environment details including Onionr version & data directory, OS and Python version'

View File

@ -93,13 +93,14 @@ def validate_metadata(metadata, block_data) -> bool:
try: try:
with open(filepaths.data_nonce_file, 'r') as nonceFile: with open(filepaths.data_nonce_file, 'r') as nonceFile:
if nonce in nonceFile.read(): if nonce in nonceFile.read():
ret_data = False # we've seen that nonce before, so we can't pass metadata # we've seen that nonce before, so we can't pass metadata
raise onionrexceptions.DataExists raise onionrexceptions.DataExists
except FileNotFoundError: except FileNotFoundError:
ret_data = True ret_data = True
except onionrexceptions.DataExists: except onionrexceptions.DataExists:
# do not set ret_data to True, because nonce has been seen before # do not set ret_data to True, because data has been seen before
pass logger.warn(f'{nonce} seen before')
raise onionrexceptions.DataExists
else: else:
ret_data = True ret_data = True
else: else:

View File

@ -49,6 +49,8 @@ saveBtn.onclick = function(){
}}) }})
.then((resp) => resp.text()) // Transform the data into text .then((resp) => resp.text()) // Transform the data into text
.then(function(data) { .then(function(data) {
alert('Config saved') PNotify.success({
text: 'Config saved'
})
}) })
} }

View File

@ -13,6 +13,8 @@ document.getElementById('openSite').onclick = function(){
window.location.href = '/site/' + hash window.location.href = '/site/' + hash
} }
else{ else{
alert('Invalid site hash') PNotify.notice({
text: 'Invalid site hash'
})
} }
} }

View File

@ -0,0 +1,45 @@
import sys, os
sys.path.append(".")
sys.path.append("onionr/")
import unittest, uuid, time, threading
TEST_DIR = 'testdata/%s-%s' % (uuid.uuid4(), os.path.basename(__file__)) + '/'
os.environ["ONIONR_HOME"] = TEST_DIR
from etc import waitforsetvar
def set_test_var_delay(obj, delay=0):
if delay > 0: time.sleep(delay)
obj.test_var = True
class TestWaitForSetVar(unittest.TestCase):
def test_no_wait(self):
test_object = lambda: None
threading.Thread(target=set_test_var_delay, args=[test_object]).start()
waitforsetvar.wait_for_set_var(test_object, 'test_var')
self.assertTrue(test_object.test_var)
def test_negative_wait(self):
test_object = lambda: None
threading.Thread(target=set_test_var_delay, args=[test_object, -1]).start()
waitforsetvar.wait_for_set_var(test_object, 'test_var')
self.assertTrue(test_object.test_var)
def test_zero_wait(self):
test_object = lambda: None
threading.Thread(target=set_test_var_delay, args=[test_object, 0]).start()
waitforsetvar.wait_for_set_var(test_object, 'test_var')
self.assertTrue(test_object.test_var)
def test_one_wait(self):
test_object = lambda: None
threading.Thread(target=set_test_var_delay, args=[test_object, 1]).start()
waitforsetvar.wait_for_set_var(test_object, 'test_var')
self.assertTrue(test_object.test_var)
def test_three_wait(self):
test_object = lambda: None
threading.Thread(target=set_test_var_delay, args=[test_object, 3]).start()
waitforsetvar.wait_for_set_var(test_object, 'test_var')
self.assertTrue(test_object.test_var)
unittest.main()