def masternode_validate_registration_ticket(data, *args, **kwargs): # parse inputs artist_pk = kwargs.get('sender_id') mn_ticket_logger.info( 'Masternode validate regticket, data: {}'.format(data)) regticket_serialized, regticket_signature_serialized = data regticket = RegistrationTicket(serialized=regticket_serialized) signed_regticket = Signature(serialized=regticket_signature_serialized) require_true(signed_regticket.pastelid == regticket.author) signed_regticket.validate(regticket) # validate registration ticket regticket.validate() upload_code = uuid.uuid4().bytes # TODO: clean upload code and regticket from local db when ticket was placed on the blockchain # TODO: clean upload code and regticket from local db if they're old enough MASTERNODE_DB.connect(reuse_if_open=True) Regticket.create(regticket=regticket_serialized, upload_code=upload_code, created=datetime.now(), artists_signature_ticket=regticket_signature_serialized, artist_pk=artist_pk, image_hash=regticket.imagedata_hash) return upload_code
def masternode_image_upload_request_mn0(data, *args, **kwargs): # parse inputs upload_code = data['upload_code'] image_data = data['image_data'] mn_ticket_logger.info( 'Masternode image upload received, upload_code: {}'.format( upload_code)) sender_id = kwargs.get('sender_id') MASTERNODE_DB.connect(reuse_if_open=True) try: regticket_db = Regticket.get(upload_code=upload_code) regticket = RegistrationTicket(serialized=regticket_db.regticket) if regticket.author != sender_id: raise Exception( 'Given upload code was created by other public key') mn_ticket_logger.info( 'Given upload code exists with required public key') except DoesNotExist: mn_ticket_logger.exception( 'Given upload code DOES NOT exists with required public key') raise result = get_blockchain_connection().getlocalfee() fee = result['localfee'] regticket_db.image_data = image_data regticket_db.localfee = fee regticket_db.save() return fee
def recalculate_mn_chunk_ranking_table(): """ This method recalculates all ranks of masternodes for each chunk. Is tend to be slow (if the number of chunks and masternode will be big), so it needs to be called only when new masternode is added. There is a sense to limit frequence of calls (say, no more then once a minute or so). """ # calculate each masternode rank for each chunk tasks_logger.info( 'ChunkMnRanked table has {} record. Recalculating...'.format( ChunkMnRanked.select().count())) subquery = ''' select chunk_id, masternode_id, row_number() over (partition by chunk_id order by distance asc) as r from chunkmndistance ''' # leave only top `Settings.REPLICATION_FACTOR` masternodes (which are considered as chunk owners). sql = '''select chunk_id, masternode_id, r from ({}) as t where t.r<={}'''.format( subquery, Settings.REPLICATION_FACTOR) # delete old rows ChunkMnRanked.delete().execute() # insert (chunk, masternode, rank) for all chunk-owners in a separate table for convinience insert_sql = '''insert into chunkmnranked (chunk_id, masternode_id, rank) {}'''.format( sql) MASTERNODE_DB.execute_sql(insert_sql) tasks_logger.info('...Done. Now here are {} records'.format( ChunkMnRanked.select().count()))
def regticket_status(data, *args, **kwargs): # verify identity - return status only to regticket creator sender_id = kwargs.get('sender_id') upload_code = data.get('upload_code') MASTERNODE_DB.connect(reuse_if_open=True) try: regticket_db = Regticket.get(artist_pk=sender_id, upload_code=upload_code) except DoesNotExist: raise Exception( 'Given upload code DOES NOT exists with required public key') return {'status': regticket_db.status, 'error': regticket_db.error}
def masternode_mn1_confirm(data, *args, **kwargs): # parse inputs artist_pk, image_hash, serialized_signature = data sender_id = kwargs.get('sender_id') MASTERNODE_DB.connect(reuse_if_open=True) mn_ticket_logger.info( 'masternode_mn1_confirm: received confirmation from {}'.format( sender_id)) regticket_db_set = Regticket.select().where( Regticket.artist_pk == artist_pk, Regticket.image_hash == image_hash) if len(regticket_db_set) == 0: raise Exception( 'Regticket not found for given artist ID and image hash') if len(regticket_db_set) > 2: regticket_db = regticket_db_set[-1] Regticket.delete().where(Regticket.id < regticket_db.id) else: regticket_db = regticket_db_set[0] if regticket_db.is_valid_mn1 is None: # first confirmation has came regticket_db.is_valid_mn1 = True regticket_db.mn1_pk = sender_id regticket_db.mn1_serialized_signature = serialized_signature regticket_db.save() else: if regticket_db.is_valid_mn2 is None: if regticket_db.mn1_pk == sender_id: raise Exception( 'I already have confirmation from this masternode') # second confirmation has came regticket_db.is_valid_mn2 = True regticket_db.mn2_pk = sender_id regticket_db.mn2_serialized_signature = serialized_signature regticket_db.save() regticket = RegistrationTicket(serialized=regticket_db.regticket) # store image and thumbnail in chunkstorage masternode_place_image_data_in_chunkstorage( regticket, regticket_db.image_data) txid = regticket_db.write_to_blockchain() return txid else: raise Exception('All 2 confirmations received for a given ticket') mn_ticket_logger.info('Confirmation from MN received') return 'Validation passed'
def __receive_rpc_sql(self, sql, *args, **kwargs): self.__logger.info('SQL request received') if not isinstance(sql, str): raise TypeError("SQL must be a string!") from core_modules.database import MASTERNODE_DB c = MASTERNODE_DB.execute_sql(sql) r = c.fetchall() result = [] fields = [x[0] for x in c.description] for record in r: dict_record = dict() for i in range(len(record)): dict_record[fields[i]] = record[i] result.append(dict_record) return {"result": result}
def setUp(self): MASTERNODE_DB.init(':memory:') MASTERNODE_DB.connect(reuse_if_open=True) MASTERNODE_DB.create_tables(DB_MODELS) Masternode.create( ext_address='127.0.0.1:444', pastel_id= 'jXZVtBmehoxYPotVrLdByFNNcB8jsryXhFPgqRa95i2x1mknbzSef1oGjnzfiwRtzReimfugvg41VtA7qGfDZR' ) Masternode.create( ext_address='127.0.0.1:4441', pastel_id= 'jXZVtBmehoxYPotVrLdByFNNcB7jsryXhFPgqRa95i2x1mknbzSef1oGjnzfiwRtzReimfugvg41VtA7qGfDZR' )
def setUp(self): # warnings.simplefilter('ignore') # MASTERNODE_DB.init(':memory:') # MASTERNODE_DB.connect(reuse_if_open=True) # MASTERNODE_DB.create_tables(DB_MODELS) switch_pastelid(CLIENT_PASTELID, PASSPHRASE) MASTERNODE_DB.init(':memory:') MASTERNODE_DB.connect(reuse_if_open=True) MASTERNODE_DB.create_tables(DB_MODELS) # cleanup chunk storage import shutil try: shutil.rmtree(Settings.CHUNK_DATA_DIR) shutil.rmtree(Settings.TEMP_STORAGE_DIR) except FileNotFoundError: pass
def setUp(self): MASTERNODE_DB.init(':memory:') MASTERNODE_DB.connect(reuse_if_open=True) MASTERNODE_DB.create_tables(DB_MODELS) for i in range(3): Masternode.create( ext_address='127.0.0.1:444{}'.format(i), pastel_id= 'jXZVtBmehoxYPotVrLdByFNNcB8jsryXhFPgqRa95i2x1mknbzSef1oGjnzfiwRtzReimfugvg41VtA7qGfDZ{}' .format(i)) Chunk.create( chunk_id= '1231231231231231232323934384834890089238429382938429384934{}'. format(i), image_hash=b'asdasdasd') index_new_chunks()
def create_database(): MASTERNODE_DB.init(Settings.MN_DATABASE_FILE) MASTERNODE_DB.connect(reuse_if_open=True) MASTERNODE_DB.create_tables(DB_MODELS)
""" PyNode entrypoint """ from peewee import logger as peewee_logger from bitcoinrpc.authproxy import log as bitcoinrpc_logger from core_modules.settings import Settings from core_modules.database import MASTERNODE_DB from pynode.masternode_daemon import MasterNodeDaemon from os import path from utils.create_tables import create_database if __name__ == "__main__": peewee_logger.disabled = True bitcoinrpc_logger.disabled = True if not path.exists(Settings.MN_DATABASE_FILE): print("Database file {} does not exist".format( Settings.MN_DATABASE_FILE)) print("Creating database...") create_database() print("Database {} created!".format(Settings.MN_DATABASE_FILE)) # initialize the database MASTERNODE_DB.init(Settings.MN_DATABASE_FILE) mnd = MasterNodeDaemon() mnd.run_event_loop()
def setUp(self): warnings.simplefilter('ignore') MASTERNODE_DB.init(':memory:') MASTERNODE_DB.connect(reuse_if_open=True) MASTERNODE_DB.create_tables(DB_MODELS)
def setUp(self): MASTERNODE_DB.init(':memory:') MASTERNODE_DB.connect(reuse_if_open=True) MASTERNODE_DB.create_tables(DB_MODELS)