progress in removing core

master
Kevin Froman 2019-07-22 00:24:42 -05:00
parent 26c3b519c7
commit a74f2c5051
18 changed files with 83 additions and 38 deletions

View File

@ -214,7 +214,7 @@ class OnionrCommunicatorDaemon:
def peerCleanup(self): def peerCleanup(self):
'''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)''' '''This just calls onionrpeers.cleanupPeers, which removes dead or bad peers (offline too long, too slow)'''
onionrpeers.peer_cleanup() onionrpeers.peer_cleanup(self.onionrInst)
self.decrementThreadCount('peerCleanup') self.decrementThreadCount('peerCleanup')
def getPeerProfileInstance(self, peer): def getPeerProfileInstance(self, peer):

View File

@ -50,7 +50,7 @@ def announce_node(daemon):
combinedNodes = ourID + peer combinedNodes = ourID + peer
if ourID != 1: if ourID != 1:
existingRand = bytesconverter.bytes_to_str(keydb.addressinfo.get_address_info(peer, 'powValue')) existingRand = bytesconverter.bytes_to_str(keydb.transportinfo.get_address_info(peer, 'powValue'))
# Reset existingRand if it no longer meets the minimum POW # Reset existingRand if it no longer meets the minimum POW
if type(existingRand) is type(None) or not existingRand.endswith('0' * ov.announce_pow): if type(existingRand) is type(None) or not existingRand.endswith('0' * ov.announce_pow):
existingRand = '' existingRand = ''
@ -76,7 +76,7 @@ def announce_node(daemon):
if basicrequests.do_post_request(url, data) == 'Success': if basicrequests.do_post_request(url, data) == 'Success':
logger.info('Successfully introduced node to ' + peer, terminal=True) logger.info('Successfully introduced node to ' + peer, terminal=True)
retData = True retData = True
keydb.addressinfo.set_address_info(peer, 'introduced', 1) keydb.transportinfo.set_address_info(peer, 'introduced', 1)
keydb.addressinfo.set_address_info(peer, 'powValue', data['random']) keydb.transportinfo.set_address_info(peer, 'powValue', data['random'])
daemon.decrementThreadCount('announce_node') daemon.decrementThreadCount('announce_node')
return retData return retData

View File

@ -64,7 +64,8 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
if comm_inst.shutdown: if comm_inst.shutdown:
return return
# Ping a peer, # Ping a peer,
if peeraction.peer_action(comm_inst, address, 'ping') == 'pong!': ret = peeraction.peer_action(comm_inst, address, 'ping')
if ret == 'pong!':
time.sleep(0.1) time.sleep(0.1)
if address not in mainPeerList: if address not in mainPeerList:
# Add a peer to our list if it isn't already since it successfully connected # Add a peer to our list if it isn't already since it successfully connected
@ -85,5 +86,5 @@ def connect_new_peer_to_communicator(comm_inst, peer='', useBootstrap=False):
else: else:
# Mark a peer as tried if they failed to respond to ping # Mark a peer as tried if they failed to respond to ping
tried.append(address) tried.append(address)
logger.debug('Failed to connect to ' + address) logger.debug('Failed to connect to %s: %s ' % (address, ret))
return retData return retData

View File

@ -20,13 +20,12 @@
import communicator, onionrexceptions import communicator, onionrexceptions
import logger, onionrpeers import logger, onionrpeers
from onionrutils import blockmetadata, stringvalidators, validatemetadata from onionrutils import blockmetadata, stringvalidators, validatemetadata
from coredb import blockmetadb
from . import shoulddownload from . import shoulddownload
from communicator import peeraction, onlinepeers from communicator import peeraction, onlinepeers
import onionrcrypto, onionrstorage, onionrblacklist, storagecounter import onionrcrypto, onionrstorage, onionrblacklist, storagecounter
def download_blocks_from_communicator(comm_inst): def download_blocks_from_communicator(comm_inst):
assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon) assert isinstance(comm_inst, communicator.OnionrCommunicatorDaemon)
crypto = onionrcrypto.OnionrCrypto()
blacklist = onionrblacklist.OnionrBlackList() blacklist = onionrblacklist.OnionrBlackList()
storage_counter = storagecounter.StorageCounter() storage_counter = storagecounter.StorageCounter()
for blockHash in list(comm_inst.blockQueue): for blockHash in list(comm_inst.blockQueue):
@ -54,7 +53,7 @@ def download_blocks_from_communicator(comm_inst):
if len(blockPeers) == 0: if len(blockPeers) == 0:
peerUsed = onlinepeers.pick_online_peer(comm_inst) peerUsed = onlinepeers.pick_online_peer(comm_inst)
else: else:
blockPeers = crypto.randomShuffle(blockPeers) blockPeers = onionrcrypto.cryptoutils.random_shuffle(blockPeers)
peerUsed = blockPeers.pop(0) peerUsed = blockPeers.pop(0)
if not comm_inst.shutdown and peerUsed.strip() != '': if not comm_inst.shutdown and peerUsed.strip() != '':
@ -66,7 +65,7 @@ def download_blocks_from_communicator(comm_inst):
except AttributeError: except AttributeError:
pass pass
realHash = ccrypto.sha3Hash(content) realHash = onionrcrypto.hashers.sha3_hash(content)
try: try:
realHash = realHash.decode() # bytes on some versions for some reason realHash = realHash.decode() # bytes on some versions for some reason
except AttributeError: except AttributeError:
@ -76,7 +75,7 @@ def download_blocks_from_communicator(comm_inst):
metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata metas = blockmetadata.get_block_metadata_from_data(content) # returns tuple(metadata, meta), meta is also in metadata
metadata = metas[0] metadata = metas[0]
if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid, and verify nonce if validatemetadata.validate_metadata(metadata, metas[2]): # check if metadata is valid, and verify nonce
if crypto.verifyPow(content): # check if POW is enough/correct if onionrcrypto.cryptoutils.verify_POW(content): # check if POW is enough/correct
logger.info('Attempting to save block %s...' % blockHash[:12]) logger.info('Attempting to save block %s...' % blockHash[:12])
try: try:
onionrstorage.setdata.set_data(content) onionrstorage.setdata.set_data(content)

View File

@ -18,12 +18,14 @@
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
from coredb import blockmetadb from coredb import blockmetadb
import onionrblacklist
def should_download(comm_inst, block_hash): def should_download(comm_inst, block_hash):
blacklist = onionrblacklist.OnionrBlackList()
ret_data = True ret_data = True
if block_hash in blockmetadb.get_block_list(): # Dont download block we have if block_hash in blockmetadb.get_block_list(): # Dont download block we have
ret_data = False ret_data = False
else: else:
if comm_inst.blacklist.inBlacklist(block_hash): # Dont download blacklisted block if blacklist.inBlacklist(block_hash): # Dont download blacklisted block
ret_data = False ret_data = False
if ret_data is False: if ret_data is False:
# Remove block from communicator queue if it shouldnt be downloaded # Remove block from communicator queue if it shouldnt be downloaded

View File

@ -21,6 +21,7 @@ import logger
from onionrutils import stringvalidators from onionrutils import stringvalidators
from communicator import peeraction, onlinepeers from communicator import peeraction, onlinepeers
from utils import gettransports from utils import gettransports
transports = gettransports.get()
def lookup_new_peer_transports_with_communicator(comm_inst): def lookup_new_peer_transports_with_communicator(comm_inst):
logger.info('Looking up new addresses...') logger.info('Looking up new addresses...')
tryAmount = 1 tryAmount = 1
@ -41,7 +42,7 @@ def lookup_new_peer_transports_with_communicator(comm_inst):
invalid = [] invalid = []
for x in newPeers: for x in newPeers:
x = x.strip() x = x.strip()
if not stringvalidators.validate_transport(x) or x in comm_inst.newPeers or x == gettransports.transports[0]: if not stringvalidators.validate_transport(x) or x in comm_inst.newPeers or x in transports:
# avoid adding if its our address # avoid adding if its our address
invalid.append(x) invalid.append(x)
for x in invalid: for x in invalid:

View File

@ -21,6 +21,8 @@ import logger, onionrproofs
from onionrutils import stringvalidators, epoch from onionrutils import stringvalidators, epoch
from communicator import peeraction, onlinepeers from communicator import peeraction, onlinepeers
from coredb import blockmetadb from coredb import blockmetadb
import onionrblacklist
blacklist = onionrblacklist.OnionrBlackList()
def lookup_blocks_from_communicator(comm_inst): def lookup_blocks_from_communicator(comm_inst):
logger.info('Looking up new blocks...') logger.info('Looking up new blocks...')
tryAmount = 2 tryAmount = 2
@ -72,7 +74,7 @@ def lookup_blocks_from_communicator(comm_inst):
if not i in existingBlocks: if not i in existingBlocks:
# if block does not exist on disk and is not already in block queue # if block does not exist on disk and is not already in block queue
if i not in comm_inst.blockQueue: if i not in comm_inst.blockQueue:
if onionrproofs.hashMeetsDifficulty(i) and not comm_inst.blacklist.inBlacklist(i): if onionrproofs.hashMeetsDifficulty(i) and not blacklist.inBlacklist(i):
if len(comm_inst.blockQueue) <= 1000000: if len(comm_inst.blockQueue) <= 1000000:
comm_inst.blockQueue[i] = [peer] # add blocks to download queue comm_inst.blockQueue[i] = [peer] # add blocks to download queue
new_block_count += 1 new_block_count += 1

View File

@ -22,14 +22,14 @@ from communicatorutils import proxypicker
import onionrblockapi as block import onionrblockapi as block
from onionrutils import localcommand, stringvalidators, basicrequests from onionrutils import localcommand, stringvalidators, basicrequests
from communicator import onlinepeers from communicator import onlinepeers
import onionrcrypto
def upload_blocks_from_communicator(comm_inst): def upload_blocks_from_communicator(comm_inst):
# when inserting a block, we try to upload it to a few peers to add some deniability # when inserting a block, we try to upload it to a few peers to add some deniability
TIMER_NAME = "upload_blocks_from_communicator" TIMER_NAME = "upload_blocks_from_communicator"
triedPeers = [] triedPeers = []
finishedUploads = [] finishedUploads = []
comm_inst.blocksToUpload = comm_inst.crypto.randomShuffle(comm_inst.blocksToUpload) comm_inst.blocksToUpload = onionrcrypto.cryptoutils.random_shuffle(comm_inst.blocksToUpload)
if len(comm_inst.blocksToUpload) != 0: if len(comm_inst.blocksToUpload) != 0:
for bl in comm_inst.blocksToUpload: for bl in comm_inst.blocksToUpload:
if not stringvalidators.validate_hash(bl): if not stringvalidators.validate_hash(bl):

View File

@ -75,7 +75,7 @@ def daemon_queue_get_response(responseID=''):
''' '''
Get a response sent by communicator to the API, by requesting to the API Get a response sent by communicator to the API, by requesting to the API
''' '''
if len(responseID) > 0: raise ValueError('ResponseID should not be empty') if len(responseID) == 0: raise ValueError('ResponseID should not be empty')
resp = localcommand.local_command(dbfiles.daemon_queue_db, 'queueResponse/' + responseID) resp = localcommand.local_command(dbfiles.daemon_queue_db, 'queueResponse/' + responseID)
return resp return resp

View File

@ -21,7 +21,7 @@ import sqlite3
import logger import logger
from onionrutils import epoch from onionrutils import epoch
from .. import dbfiles from .. import dbfiles
from . import userinfo from . import userinfo, transportinfo
def list_peers(randomOrder=True, getPow=False, trust=0): def list_peers(randomOrder=True, getPow=False, trust=0):
''' '''
Return a list of public keys (misleading function name) Return a list of public keys (misleading function name)
@ -78,7 +78,7 @@ def list_adders(randomOrder=True, i2p=True, recent=0):
testList = list(addressList) # create new list to iterate testList = list(addressList) # create new list to iterate
for address in testList: for address in testList:
try: try:
if recent > 0 and (epoch.get_epoch() - userinfo.get_user_info(address, 'lastConnect')) > recent: if recent > 0 and (epoch.get_epoch() - transportinfo.get_address_info(address, 'lastConnect')) > recent:
raise TypeError # If there is no last-connected date or it was too long ago, don't add peer to list if recent is not 0 raise TypeError # If there is no last-connected date or it was too long ago, don't add peer to list if recent is not 0
except TypeError: except TypeError:
addressList.remove(address) addressList.remove(address)

View File

@ -20,11 +20,12 @@
from flask import Blueprint, request, abort from flask import Blueprint, request, abort
from onionrservices import httpheaders from onionrservices import httpheaders
from onionrutils import epoch from onionrutils import epoch
from utils import gettransports
class PublicAPISecurity: class PublicAPISecurity:
def __init__(self, public_api): def __init__(self, public_api):
public_api_security_bp = Blueprint('publicapisecurity', __name__) public_api_security_bp = Blueprint('publicapisecurity', __name__)
self.public_api_security_bp = public_api_security_bp self.public_api_security_bp = public_api_security_bp
transports = gettransports.get()
@public_api_security_bp.before_app_request @public_api_security_bp.before_app_request
def validate_request(): def validate_request():
@ -32,10 +33,7 @@ class PublicAPISecurity:
# If high security level, deny requests to public (HS should be disabled anyway for Tor, but might not be for I2P) # If high security level, deny requests to public (HS should be disabled anyway for Tor, but might not be for I2P)
if public_api.config.get('general.security_level', default=1) > 0: if public_api.config.get('general.security_level', default=1) > 0:
abort(403) abort(403)
if type(public_api.torAdder) is None and type(public_api.i2pAdder) is None: if request.host not in transports:
# abort if our hs addresses are not known
abort(403)
if request.host not in (public_api.i2pAdder, public_api.torAdder):
# Disallow connection if wrong HTTP hostname, in order to prevent DNS rebinding attacks # Disallow connection if wrong HTTP hostname, in order to prevent DNS rebinding attacks
abort(403) abort(403)
public_api.hitCount += 1 # raise hit count for valid requests public_api.hitCount += 1 # raise hit count for valid requests

View File

@ -1,5 +1,6 @@
from . import safecompare, replayvalidation, randomshuffle from . import safecompare, replayvalidation, randomshuffle, verifypow
replay_validator = replayvalidation.replay_timestamp_validation replay_validator = replayvalidation.replay_timestamp_validation
random_shuffle = randomshuffle.random_shuffle random_shuffle = randomshuffle.random_shuffle
safe_compare = safecompare.safe_compare safe_compare = safecompare.safe_compare
verify_POW = verifypow.verify_POW

View File

@ -0,0 +1,36 @@
from .. import hashers
import config, onionrproofs, logger
def verify_POW(blockContent):
'''
Verifies the proof of work associated with a block
'''
retData = False
dataLen = len(blockContent)
try:
blockContent = blockContent.encode()
except AttributeError:
pass
blockHash = hashers.sha3_hash(blockContent)
try:
blockHash = blockHash.decode() # bytes on some versions for some reason
except AttributeError:
pass
difficulty = onionrproofs.getDifficultyForNewBlock(blockContent, ourBlock=False)
if difficulty < int(config.get('general.minimum_block_pow')):
difficulty = int(config.get('general.minimum_block_pow'))
mainHash = '0000000000000000000000000000000000000000000000000000000000000000'#nacl.hash.blake2b(nacl.utils.random()).decode()
puzzle = mainHash[:difficulty]
if blockHash[:difficulty] == puzzle:
# logger.debug('Validated block pow')
retData = True
else:
logger.debug("Invalid token, bad proof")
return retData

View File

@ -21,7 +21,7 @@ import sqlite3
import logger import logger
from onionrutils import epoch from onionrutils import epoch
from . import scoresortedpeerlist, peerprofiles from . import scoresortedpeerlist, peerprofiles
import onionrblacklist import onionrblacklist, config
from coredb import keydb from coredb import keydb
def peer_cleanup(onionr_inst): def peer_cleanup(onionr_inst):
'''Removes peers who have been offline too long or score too low''' '''Removes peers who have been offline too long or score too low'''
@ -40,7 +40,7 @@ def peer_cleanup(onionr_inst):
if peerprofiles.PeerProfiles(address).score < min_score: if peerprofiles.PeerProfiles(address).score < min_score:
keydb.removekeys.remove_address(address) keydb.removekeys.remove_address(address)
try: try:
if (int(epoch.get_epoch()) - int(keydb.transportinfo.get_address_info(address, 'dateSeen'))) >= 600: if (int(epoch.get_epoch()) - int(keydb.transportinfo.get_address_info(address, 'lastConnect'))) >= 600:
expireTime = 600 expireTime = 600
else: else:
expireTime = 86400 expireTime = 86400

View File

@ -94,6 +94,7 @@ class DataPOW:
self.data = data self.data = data
self.threadCount = threadCount self.threadCount = threadCount
self.rounds = 0 self.rounds = 0
self.hashing = False
if forceDifficulty == 0: if forceDifficulty == 0:
dataLen = sys.getsizeof(data) dataLen = sys.getsizeof(data)
@ -189,6 +190,7 @@ class POW:
self.data = data self.data = data
self.metadata = metadata self.metadata = metadata
self.threadCount = threadCount self.threadCount = threadCount
self.hashing = False
json_metadata = json.dumps(metadata).encode() json_metadata = json.dumps(metadata).encode()

View File

@ -25,7 +25,6 @@ import config, filepaths, onionrcrypto
def validate_metadata(metadata, blockData): def validate_metadata(metadata, blockData):
'''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string''' '''Validate metadata meets onionr spec (does not validate proof value computation), take in either dictionary or json string'''
# TODO, make this check sane sizes # TODO, make this check sane sizes
crypto = onionrcrypto.OnionrCrypto()
requirements = onionrvalues.OnionrValues() requirements = onionrvalues.OnionrValues()
retData = False retData = False
maxClockDifference = 120 maxClockDifference = 120
@ -81,7 +80,7 @@ def validate_metadata(metadata, blockData):
else: else:
# if metadata loop gets no errors, it does not break, therefore metadata is valid # if metadata loop gets no errors, it does not break, therefore metadata is valid
# make sure we do not have another block with the same data content (prevent data duplication and replay attacks) # make sure we do not have another block with the same data content (prevent data duplication and replay attacks)
nonce = bytesconverter.bytes_to_str(crypto.sha3Hash(blockData)) nonce = bytesconverter.bytes_to_str(onionrcrypto.hashers.sha3_hash(blockData))
try: try:
with open(filepaths.data_nonce_file, 'r') as nonceFile: with open(filepaths.data_nonce_file, 'r') as nonceFile:
if nonce in nonceFile.read(): if nonce in nonceFile.read():

0
onionr/utils/__init__.py Normal file
View File

View File

@ -1,14 +1,18 @@
import filepaths import filepaths, time
files = [filepaths.tor_hs_address_file] files = [filepaths.tor_hs_address_file]
def get(): def get():
transports = [] transports = []
while len(transports) == 0:
for file in files: for file in files:
try: try:
with open(file, 'r') as transport_file: with open(file, 'r') as transport_file:
transports.append(transport_file.read()) transports.append(transport_file.read().strip())
except FileNotFoundError: except FileNotFoundError:
transports.append('') transports.append('')
pass else:
break
else:
time.sleep(1)
return list(transports) return list(transports)