More decoupling, removed unnecessary announceCache
parent
fad5e8547e
commit
080f33bf1f
|
@ -71,6 +71,7 @@ class OnionrCommunicatorDaemon:
|
||||||
self.kv.put('dbTimestamps', {})
|
self.kv.put('dbTimestamps', {})
|
||||||
self.kv.put('blocksToUpload', [])
|
self.kv.put('blocksToUpload', [])
|
||||||
self.kv.put('cooldownPeer', {})
|
self.kv.put('cooldownPeer', {})
|
||||||
|
self.kv.put('generating_blocks', [])
|
||||||
|
|
||||||
if config.get('general.offline_mode', False):
|
if config.get('general.offline_mode', False):
|
||||||
self.isOnline = False
|
self.isOnline = False
|
||||||
|
@ -93,10 +94,6 @@ class OnionrCommunicatorDaemon:
|
||||||
# list of peer's profiles (onionrpeers.PeerProfile instances)
|
# list of peer's profiles (onionrpeers.PeerProfile instances)
|
||||||
self.peerProfiles = []
|
self.peerProfiles = []
|
||||||
|
|
||||||
self.announceProgress = {}
|
|
||||||
|
|
||||||
self.generating_blocks = []
|
|
||||||
|
|
||||||
# amount of threads running by name, used to prevent too many
|
# amount of threads running by name, used to prevent too many
|
||||||
self.threadCounts = {}
|
self.threadCounts = {}
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ Remove block hash from daemon's upload list.
|
||||||
"""
|
"""
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
from deadsimplekv import DeadSimpleKV
|
||||||
from communicator import OnionrCommunicatorDaemon
|
from communicator import OnionrCommunicatorDaemon
|
||||||
from onionrtypes import BlockHash
|
from onionrtypes import BlockHash
|
||||||
"""
|
"""
|
||||||
|
@ -25,7 +26,8 @@ if TYPE_CHECKING:
|
||||||
def remove_from_insert_queue(comm_inst: "OnionrCommunicatorDaemon",
|
def remove_from_insert_queue(comm_inst: "OnionrCommunicatorDaemon",
|
||||||
b_hash: "BlockHash"):
|
b_hash: "BlockHash"):
|
||||||
"""Remove block hash from daemon's upload list."""
|
"""Remove block hash from daemon's upload list."""
|
||||||
|
kv: "DeadSimpleKV" = comm_inst.shared_state.get_by_string("DeadSimpleKV")
|
||||||
try:
|
try:
|
||||||
comm_inst.generating_blocks.remove(b_hash)
|
kv.get('generating_blocks').remove(b_hash)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -25,8 +25,9 @@ if TYPE_CHECKING:
|
||||||
|
|
||||||
def clear_offline_peer(comm_inst: 'OnionrCommunicatorDaemon'):
|
def clear_offline_peer(comm_inst: 'OnionrCommunicatorDaemon'):
|
||||||
"""Remove the longest offline peer to retry later."""
|
"""Remove the longest offline peer to retry later."""
|
||||||
|
kv: "DeadSimpleKV" = comm_inst.shared_state.get_by_string("DeadSimpleKV")
|
||||||
try:
|
try:
|
||||||
removed = comm_inst..pop(0)
|
removed = kv.get('offlinePeers').pop(0)
|
||||||
except IndexError:
|
except IndexError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -44,8 +44,7 @@ def announce_node(daemon):
|
||||||
if daemon.config.get('general.security_level', 0) == 0:
|
if daemon.config.get('general.security_level', 0) == 0:
|
||||||
# Announce to random online peers
|
# Announce to random online peers
|
||||||
for i in kv.get('onlinePeers'):
|
for i in kv.get('onlinePeers'):
|
||||||
if i not in kv.get('announceCache') and\
|
if i not in kv.get('announceCache'):
|
||||||
i not in daemon.announceProgress:
|
|
||||||
peer = i
|
peer = i
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -35,18 +35,17 @@ def client_api_insert_block():
|
||||||
insert_data: JSONSerializable = request.get_json(force=True)
|
insert_data: JSONSerializable = request.get_json(force=True)
|
||||||
message = insert_data['message']
|
message = insert_data['message']
|
||||||
message_hash = bytesconverter.bytes_to_str(hashers.sha3_hash(message))
|
message_hash = bytesconverter.bytes_to_str(hashers.sha3_hash(message))
|
||||||
|
kv: 'DeadSimpleKV' = g.too_many.get_by_string('DeadSimpleKV')
|
||||||
|
|
||||||
# Detect if message (block body) is not specified
|
# Detect if message (block body) is not specified
|
||||||
if type(message) is None:
|
if type(message) is None:
|
||||||
return 'failure due to unspecified message', 400
|
return 'failure due to unspecified message', 400
|
||||||
|
|
||||||
# Detect if block with same message is already being inserted
|
# Detect if block with same message is already being inserted
|
||||||
if message_hash in g.too_many.get_by_string(
|
if message_hash in kv.get('generating_blocks'):
|
||||||
"OnionrCommunicatorDaemon").generating_blocks:
|
|
||||||
return 'failure due to duplicate insert', 400
|
return 'failure due to duplicate insert', 400
|
||||||
else:
|
else:
|
||||||
g.too_many.get_by_string(
|
kv.get('generating_blocks').append(message_hash)
|
||||||
"OnionrCommunicatorDaemon").generating_blocks.append(message_hash)
|
|
||||||
|
|
||||||
encrypt_type = ''
|
encrypt_type = ''
|
||||||
sign = True
|
sign = True
|
||||||
|
|
Loading…
Reference in New Issue