Example #1
0
def main():
    #For test only, ask for the port
    port = int(input("Enter port :\n"))
    #Init Graphical env
    app = QtGui.QApplication(sys.argv)
    
    #Init data containers for GUI
    meta , net = dataMgr.CreateContainers()
    cons = consensus.Consensus()
    
    #Test case for easy gui work

    if port == -1 :
        print("GUI TEST MODE SELECTED") 
        peer_mgr = peersMgr.TestPeer(cons, meta)
        peer_mgr.start()
        peer_creator = serverMgr.TestPeerCreator(peer_mgr.message_queue , meta)

    else:
        #Init Peer Manager
        peer_mgr = peersMgr.PeersManager(cons, meta)
        peer_mgr.start()

        #Init server 
        serv =  serverMgr.ServerManager(peer_mgr.get_peer_Q(), meta, port)
        serv.start()
    
        peer_creator = serverMgr.PeerCreator(peer_mgr.get_peer_Q(), meta)

    #Init GUI
    wind = mainGui.initialize_gui(cons.vis_data.model, meta.model\
            , peer_mgr.get_mess_Q(), peer_creator)
    app.exec_()
Example #2
0
    def initialise_consensus(self):
        self.consensus_data = []
        self.clustered = []
        self.unclustered = []
        con = None

        for spec in self.spectral_data:
            con = consensus.Consensus(spec)
            self.consensus_data.append(con)
Example #3
0
def run():
    #Check old processes are not running
    check_old_processes_running()

    #Load config
    config_data = ConfigParser.RawConfigParser()
    config_data.read('chain_config.cfg')
    EXT_TX_PER_LOOP = config_data.getint(
        'Transaction generation and processing', 'ext_tx_per_loop')
    USER_TX_PER_LOOP = config_data.getint(
        'Transaction generation and processing', 'user_tx_per_loop')
    LOOPS_PER_TX = config_data.getint('Transaction generation and processing',
                                      'loops_per_tx')
    START_TIME = config_data.getint('Transaction generation and processing',
                                    'start_time')
    DKG_RENEWAL_INTERVAL = config_data.getint('Consensus',
                                              'dkg_renewal_interval')
    BLOCK_TIME = config_data.getint('General', 'block_time')
    TIMEOUT = config_data.getint('General', 'timeout')
    DKG_TIMEOUT = config_data.getint('Consensus', 'dkg_timeout')

    #Telemetry initialization
    start_time = time.time()

    init_logger()

    delays_blocks = open_log_block_process_delay()
    delays_txs = open_log_delay_create_txs()

    #Modules initialization
    mainLog.info("Initializing Chain")
    chain = init_chain()
    last_block = chain.get_head_block().header.number
    mainLog.debug("Last block: %s", last_block)

    mainLog.info("Initializing Keystore")
    keys, addresses = init_keystore()
    mainLog.info("Loaded %s keys", len(keys))
    mainLog.info("----------------LOADED ADDRESSES---------------------")
    mainLog.info([add.encode("HEX") for add in addresses])
    mainLog.info("----------------END ADDRESS LIST---------------------")

    mainLog.info("Initializing P2P")
    p2p = init_p2p(chain.get_head_block().header.number)

    mainLog.info("Initializing Parser")
    user = init_user()
    try:
        user.read_transactions("./transactions.txt")
    except Exception as e:
        mainLog.critical("Exception while reading user transactions")
        mainLog.exception(e)
        p2p.stop()
        sys.exit(0)

    mainLog.info("Initializing OOR")
    oor = init_oor()

    #Variables initialization
    end = 0
    count = 0
    dkg_on = False
    exit_from_dkg = False
    processed_user = 0
    user_tx_count = 0

    block_num = chain.get_head_block().header.number
    timestamp = chain.get_head_block().header.timestamp
    last_random_no = chain.get_head_block().header.random_number.encode('hex')
    current_group_sig = chain.get_head_block().header.group_sig
    current_group_key = chain.get_current_group_key()

    my_dkgIDs = []

    myIPs = IPSet()
    for i in range(len(keys)):
        myIPs.update(chain.get_own_ips(keys[i].address))
    mainLog.info("Own IPs at startup are: %s", myIPs)

    dkg_group = chain.get_current_dkg_group()
    in_dkg_group, my_dkgIDs = find_me_in_dkg_group(dkg_group, addresses)

    mainLog.info("Initializing Consensus")
    consensus = cons.Consensus(dkg_group, my_dkgIDs, last_random_no,
                               current_group_key, block_num, current_group_sig,
                               current_group_key)

    isMaster = load_master_private_keys(consensus, my_dkgIDs)
    if not in_dkg_group:
        consensus.store_ids(dkg_group)
    else:
        mainLog.warning(
            "TODO: nodes that belong to the DKG group and connect after the DKG do not have private keys, so they shouldn't create shares. Needs to be disabled!"
        )
        if not isMaster:
            create_shares = False
    if isMaster:
        consensus.create_shares(last_random_no, block_num, count)
        create_shares = True
    cache = Share_Cache()

    before = time.time()
    last_random_no, block_num, count = perform_bootstrap(
        chain, p2p, consensus, delays_blocks, delays_txs, DKG_RENEWAL_INTERVAL,
        last_random_no, block_num, count)
    after = time.time()
    elapsed = after - before
    mainLog.info("Bootstrap finished. Elapsed time: %s", elapsed)
    timestamp = chain.get_head_block().header.timestamp
    current_group_sig = chain.get_head_block().header.group_sig
    current_group_key = chain.get_current_group_key()
    last_random_no = chain.get_head_block().header.random_number.encode('hex')

    from_bootstrap = True

    while (not end):

        #Process new blocks. DOES NOT support bootstrap
        try:
            block = p2p.get_block()
            while block is not None:
                #FALSE: Only nodes that do NOT belong to the DKG get stuck here until they receive the block with the new group key
                mainLog.info("Received new block no. %s", block.number)
                mainLog.info("Block Data: Group Signature: %s --Random number: %s --Group Key: %s", block.header.group_sig, \
                             block.header.random_number.encode('hex'),  block.header.group_pubkey)
                res = False
                try:
                    signer = consensus.get_next_signer(block.count)
                    expected_message = str(last_random_no) + str(
                        block_num) + str(count)
                    #Use in case the OR in the next line does not work
                    #                    if exit_from_dkg or dkg_on:
                    #                        usePrevGroupKey = True
                    #                    else:
                    #                        usePrevGroupKey = False
                    if consensus.verify_group_sig(expected_message,
                                                  block.header.group_sig,
                                                  exit_from_dkg or dkg_on):
                        mainLog.debug("Verify Group Signature OK")
                    else:
                        raise BlsInvalidGroupSignature()
                    if in_dkg_group and exit_from_dkg:
                        # We ONLY enter here if the node belongs to the DKG group and just finished a new DKG
                        exit_from_dkg = False
                        if block.header.group_pubkey != consensus.get_current_group_key(
                        ):
                            mainLog.error(
                                "FATAL ERROR. A node in the DKG group received a block with a Group Public Key not matching the generated from the DKG."
                            )
                            raise Exception(
                                "Unexpected group key in block header. Stopping"
                            )
                        signer = chain.extract_first_ip_from_address(
                            signing_addr)
                    elif dkg_on:
                        # We ONLY enter here if the nodes DOES NOT belong to the DKG group and is waiting for a current DKG to finish
                        signer = chain.extract_first_ip_from_address(
                            signing_addr)
                        consensus.set_current_group_key(
                            block.header.group_pubkey)
                        dkg_on = False
                    mainLog.debug(
                        "Verifying new block signature, signer should be %s",
                        signer)
                    mainLog.debug("Owner of the previous IP is address %s",
                                  chain.get_addr_from_ip(signer).encode("HEX"))
                    mainLog.debug("Coinbase in the block is: %s",
                                  block.header.coinbase.encode("HEX"))
                    res = chain.verify_block_signature(block, signer)
                except UnsignedBlock as e:
                    mainLog.exception(e)
                    mainLog.error("Unsigned block. Skipping")
                    res = False
                except InvalidBlockSigner as e:
                    mainLog.exception(e)
                    mainLog.error(
                        "Block no. %s signautre is invalid! Ignoring.",
                        block.number)
                    res = False
                except BlsInvalidGroupSignature as e:
                    mainLog.exception(e)
                    mainLog.error(
                        "Block no. %s: invalid unexpected or invalid BLS group signature! Ignoring.",
                        block.number)
                    res = False
                except Exception as e:
                    mainLog.error(
                        "Unrecoverable error when checking block signature. Exiting.",
                        block.number)
                    mainLog.exception(e)
                    raise e
                if res:
                    # correct block
                    before = time.time()
                    chain.add_block(block)
                    after = time.time()
                    delay = after - before
                    delays_blocks.write(
                        str(block.number) + ',' + str(delay) + '\n')
                    delays_txs.write("Added new block no." +
                                     str(block.number) + '\n')
                    timestamp = chain.get_head_block().header.timestamp
                    block_num = chain.get_head_block().header.number
                    last_random_no = block.header.random_number.encode('hex')
                    if from_bootstrap:
                        from_bootstrap = False
                        consensus.bootstrap_only_set_random_no_manual(
                            last_random_no)
                        consensus.bootstrap_only_set_group_sig_manual(
                            block.header.group_sig)
                    #after a correct block: reset BLS and create and broadcast new shares (like receiving a new block)
                    consensus.calculate_next_signer(block_num)
                    consensus.reset_bls()
                    if in_dkg_group and create_shares:
                        count = 0
                        new_shares = consensus.create_shares(
                            last_random_no, block_num, count)
                        for share in new_shares:
                            p2p.broadcast_share(share)
                            cache.store_bls(share)
                            mainLog.info("Sent a new share to the network")
                else:
                    mainLog.error(
                        "Received an erroneous block. Ignoring block...")

                block = p2p.get_block()
        except Exception as e:
            mainLog.critical("Exception while processing a received block")
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

        #Process transactions from the network
        processed = 0
        try:
            tx_ext = p2p.get_tx()
            while tx_ext is not None:
                #Check that the transaction has not been sent from this node or already processed
                processed = processed + 1
                if not (chain.in_chain(tx_ext) or chain.in_pool(tx_ext)):
                    mainLog.info("Received external transaction: to: %s hash %s", \
                    tx_ext.to.encode('HEX'), tx_ext.hash.encode('HEX'))
                    try:
                        chain.add_pending_transaction(tx_ext)
                        # Correct tx
                        p2p.broadcast_tx(tx_ext)
                    except Exception as e:
                        mainLog.info("Discarded invalid external transaction: to: %s", \
                        tx_ext.to.encode("HEX"))
                        mainLog.exception(e)
                if processed < EXT_TX_PER_LOOP:
                    tx_ext = p2p.get_tx()
                else:
                    tx_ext = None
        except Exception as e:
            mainLog.critical(
                "Exception while processing a received transaction")
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

        #Check if the node has to sign the next block. Control also timeouts
        #Before we wait for the block time
        try:
            timestamp = chain.get_head_block().header.timestamp
            block_num = chain.get_head_block().header.number
            if ((time.time() - timestamp) >= BLOCK_TIME):
                #Time to create a new block
                if (time.time() - timestamp) >= TIMEOUT:
                    #The expected signer didn't create a block. Trigger a recalculation of the random number to select a new signer
                    #TODO: does NOT work because it will enter all the time when the timeout expires
                    #                    count = count + 1
                    #                    timeout_expired =  True
                    #                    if count == 0:
                    #                        consensus.reset_bls()
                    #                    consensus.create_share(count)
                    #                    p2p.broadcast_share(new_share)
                    #                    mainLog.info("Timeout expired. Recalculated random no and sent a new share to the network")
                    mainLog.info("Contextual information: Current time: %s --Last block timestamp: %s --Last random number: %s --Last block number: %s", \
                                 time.time(), timestamp, consensus.get_current_random_no(), block_num)
                    raise Exception(
                        "FATAL ERROR, Block tiemout expired. The feature to re-calculte the random number after a block timeout exprity is not implemented. Stopping..."
                    )
                if (consensus.shares_ready() or exit_from_dkg) and not dkg_on:
                    if not exit_from_dkg:
                        #Normal operation
                        signer = consensus.get_next_signer(count)
                        signing_addr = chain.get_addr_from_ip(signer)
                        #When we exit a new DKG round, the variable signing_addr stores the next signer (we are temporarily overriding the BLS RN generation)
                    if signing_addr in addresses:
                        exit_from_dkg = False
                        mainLog.info(
                            "This node has to sign a block, selected IP: %s",
                            signer)
                        mainLog.info("Associated address: %s",
                                     signing_addr.encode("HEX"))
                        new_block = chain.create_block(signing_addr, consensus.get_current_random_no(), \
                                    consensus.get_current_group_key(), consensus.get_current_group_sig(), count)
                        try:
                            key_pos = addresses.index(signing_addr)
                        except:
                            raise Exception(
                                "FATAL ERROR: This node does not own the indicated key to sign the block (not present in the keystore)"
                            )
                        sig_key = keys[key_pos]
                        new_block.sign(sig_key.privkey)
                        mainLog.info("Created new block no. %s, timestamp %s, coinbase %s", \
                            new_block.header.number, new_block.header.timestamp, new_block.header.coinbase.encode("HEX"))
                        mainLog.info(
                            "New block signature data: v %s -- r %s -- s %s",
                            new_block.v, new_block.r, new_block.s)
                        mainLog.info("Block Group Signature: %s --Random number: %s --Group Key: %s", new_block.header.group_sig, \
                             new_block.header.random_number.encode('hex'),  new_block.header.group_pubkey)
                        mainLog.info("This block contains %s transactions",
                                     new_block.transaction_count)
                        #                        mainLog.info("Sleeping 2s to give way to clock drift...")
                        #                        time.sleep(2)
                        #Like receiving a new block
                        before = time.time()
                        chain.add_block(new_block)
                        after = time.time()
                        delay = after - before
                        delays_blocks.write(
                            str(new_block.number) + ',' + str(delay) + '\n')
                        delays_txs.write("Added new block no." +
                                         str(new_block.number) + '\n')
                        p2p.broadcast_block(new_block)
                        #after a correct block, create and broadcast new share
                        count = 0
                        #timeout_expired = False
                        block_num = new_block.number
                        consensus.calculate_next_signer(block_num)
                        last_random_no = consensus.get_current_random_no()
                        consensus.reset_bls()
                        time.sleep(10)
                        if in_dkg_group and create_shares:
                            count = 0
                            new_shares = consensus.create_shares(
                                last_random_no, block_num, count)
                            for share in new_shares:
                                p2p.broadcast_share(share)
                                cache.store_bls(share)
                                mainLog.info("Sent a new share to the network")

        except Exception as e:
            mainLog.critical(
                "Exception while checking if the node has to sign the next block"
            )
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

        # Process transactions from the user
        if ((time.time() - start_time) > START_TIME
                or isMaster) and not dkg_on:
            if user_tx_count == LOOPS_PER_TX - 1:
                try:
                    tx_int = user.get_tx()
                    while tx_int is not None:
                        before = time.time()
                        processed_user = processed_user + 1
                        user_tx_count = 0
                        try:
                            try:
                                key_pos = addresses.index(tx_int["from"])
                                #mainLog.debug("Found key in %s", key_pos)
                            except:
                                raise Exception(
                                    "Key indicated in from field is not in present in the keystore"
                                )
                            key = keys[key_pos]
                            tx = chain.parse_transaction(tx_int)
                            tx.sign(key.privkey)
                            mainLog.info("Processing user transaction, from: %s --  to: %s -- hash %s -- value %s", \
                            tx_int["from"].encode("HEX"), tx_int["to"].encode("HEX"), tx.hash.encode("HEX"), tx_int["value"])
                            #mainLog.debug("TX signed. Info: v %s -- r %s -- s %s -- NONCE %s", tx.v, \
                            #tx.r, str(tx.s), tx.nonce)
                            # correct tx
                            try:
                                chain.add_pending_transaction(tx)
                            except Exception as e:
                                raise e
                            p2p.broadcast_tx(tx)
                            after = time.time()
                            delay = after - before
                            delays_txs.write(
                                str(tx.hash.encode("HEX")) + ',' + str(delay) +
                                '\n')
                            #mainLog.info("Sent transaction to the network, from: %s --  to: %s --  value: %s", \
                            #tx_int["from"].encode("HEX"), tx.to.encode("HEX"), tx.ip_network)
    #                        seen_tx.append(tx.hash)
                        except Exception as e:
                            mainLog.error(
                                "Error when creating user transaction, ignoring transaction."
                            )
                            mainLog.exception(e.message)
#                        Temporarily diabled because we want 1 tx per 2 loops
#                        if processed < USER_TX_PER_LOOP:
#                            tx_int = user.get_tx()
#                        else:
#                            tx_int = None
                        tx_int = None
                except Exception as e:
                    mainLog.exception(e)
                    p2p.stop()
                    sys.exit(0)
            else:
                user_tx_count = user_tx_count + 1

        #answer queries from OOR
        try:
            nonce, afi, address = oor.get_query()
            if nonce is not None and afi is not None and address is not None:
                info = chain.query_eid(ipaddr=address, nonce=nonce)
                oor.send(info)
        except Exception as e:
            mainLog.critical("Exception while answering queries from OOR")
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

#########Answer queries from the network
#blocks
        try:
            block_numbers = p2p.get_block_queries()
            if block_numbers is not None:
                mainLog.info("Answering query for block nos. %s",
                             block_numbers)
                response = []
                for number in block_numbers:
                    response.append(chain.get_block_by_number(number))
                p2p.answer_block_queries(response)
        except Exception as e:
            mainLog.critical(
                "Exception while answering queries from the network")
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

        #transaction pool
        try:
            if p2p.tx_pool_query():
                mainLog.info("Answering tx pool query")
                pool = chain.get_pending_transactions()
                p2p.answer_tx_pool_query(pool)
        except Exception as e:
            mainLog.critical("Exception while answering the transaction pool")
            mainLog.exception(e)
            # Stop P2P
            p2p.stop()
            sys.exit(0)


########Consensus
#Get shares from the network
        try:
            share = p2p.get_share()
            while share is not None and not dkg_on:
                mainLog.info("Received new BLS share from P2P.")
                if not cache.in_bls_cache(share):
                    mainLog.info("Share not in cache, processing")
                    if share.block_number == block_num:
                        msg = str(last_random_no) + str(block_num) + str(count)
                        res = consensus.store_share(share, msg, block_num)
                    elif share.block_number > block_num:
                        mainLog.debug(
                            "Receive a share for a future block number. Saving for later..."
                        )
                        mainLog.debug(
                            "Current block no. %s, block no. in share: %s",
                            block_num, share.block_number)
                        cache.store_future_bls(share)
                    else:
                        mainLog.debug(
                            "Receive a share for a past block number. VERY STRANGE!!!  Discarding..."
                        )
                    cache.store_bls(share)
                    p2p.broadcast_share(share)
                share = p2p.get_share()
            while cache.pending_future_bls(block_num):
                share = cache.get_future_bls(block_num)
                msg = str(last_random_no) + str(block_num) + str(count)
                res = consensus.store_share(share, msg, block_num)
        except Exception as e:
            mainLog.critical("Exception while processing received shares")
            mainLog.exception(e)
            # Stop P2P
            p2p.stop()
            sys.exit(0)

        if (time.time() - timestamp) > (BLOCK_TIME *
                                        0.75) and not consensus.shares_ready():
            mainLog.warning(
                "This node has not computed yet Group Signature...")
            if (time.time() - timestamp) > (BLOCK_TIME *
                                            0.99) and not isMaster:
                mainLog.warning(
                    "It is nearly block time and we don't have Group Signature. Activating boostrap mode."
                )
                from_bootstrap = True

        #DKG management

        #Trigger new DKG
        try:
            if (
                (block_num + 1) % DKG_RENEWAL_INTERVAL == 0
            ) and not dkg_on and not exit_from_dkg and consensus.shares_ready(
            ):
                mainLog.info(
                    "Next block needs new Group Key. Triggering DKG renewal.")
                dkg_on = True
                create_shares = True
                dkg_group = chain.get_current_dkg_group()
                in_dkg_group, my_dkgIDs = find_me_in_dkg_group(
                    dkg_group, addresses)
                if in_dkg_group:
                    to_send = consensus.new_dkg(dkg_group, my_dkgIDs)
                    for dkg_share in to_send:
                        cache.store_dkg(dkg_share)
                        p2p.send_dkg_share(dkg_share)
                    #For cases when one node does ALL the DKG
                    if consensus.all_node_dkgs_finished():
                        dkg_on = False
                        exit_from_dkg = True
                        mainLog.info(
                            "DKG Finished sucessfully for all node IDs. Exiting loop and resuming normal operation."
                        )
                else:
                    # Configure nodes that do not participate in the DKG so they can verfiy BLS shares later
                    consensus.store_ids(dkg_group)
                #Define new signer that has to be in the dkg_group. Selected randomly from the people in the group (temporal override of the BLS RN generation)
                random_no = chain.get_block_by_number(
                    block_num).header.random_number.encode('hex')
                random_pos = compress_random_no_to_int(random_no,
                                                       16) % len(dkg_group)
                # signing_addr will be used in block RX and block creation code in the beginning of the loop
                signing_addr = dkg_group[random_pos]
        except Exception as e:
            mainLog.critical("Exception while creating DKG shares")
            mainLog.exception(e)
            # Stop P2P
            p2p.stop()
            sys.exit(0)

        #Collect DKG shares for the new DKG
        try:
            #During DKG, the prototype only works on DKG
            if in_dkg_group:
                #WE STAY HERE FOR THE WHOLE DKG
                dkg_share = p2p.get_dkg_share()
                while dkg_on and dkg_share is not None:
                    mainLog.info("Received new DKG share from P2P")
                    if not cache.in_dkg_cache(dkg_share):
                        if dkg_share.to in my_dkgIDs:
                            #Next fix if current fix does not work
                            #if dkg_share.to in my_dkgIDs and consensus.allSharesReceived(dkg_sahre.to):
                            consensus.verify_dkg_contribution(dkg_share)
                            if consensus.all_node_dkgs_finished():
                                dkg_on = False
                                exit_from_dkg = True
                                mainLog.info(
                                    "DKG Finished sucessfully for all node IDs. Exiting loop and resuming normal operation."
                                )
                                #                                if not isMaster:
                                #                                    mainLog.info("Sleeping for 1hmin to give time to master for its keys")
                                #                                    time.sleep(30*88)
                                time.sleep(120)
                            elif (time.time() - timestamp) >= DKG_TIMEOUT:
                                mainLog.critical(
                                    "Fatal Error. DKG renewal timeout expired. Stopping..."
                                )
                                raise Exception
                        # Send shares that are NOT for me
                        else:
                            p2p.send_dkg_share(dkg_share)
                        cache.store_dkg(dkg_share)
                    dkg_share = p2p.get_dkg_share()
            elif dkg_on:
                mainLog.info(
                    "This node is not participating in the DKG. Will sleep for one block time and wait for a block with the new public key"
                )
                time.sleep(BLOCK_TIME)
                if (time.time() - timestamp) >= DKG_TIMEOUT:
                    mainLog.critical(
                        "Fatal Error. DKG renewal timeout expired. Stopping..."
                    )
                    raise Exception
        except Exception as e:
            mainLog.critical("Exception while processing received DKG shares")
            mainLog.exception(e)
            # Stop P2P
            p2p.stop()
            sys.exit(0)
def main():
    # Argument and command-line options parsing
    parser = argparse.ArgumentParser(description='Blockchain simulation')
    parser.add_argument('-i', '--ip', metavar='ip', dest='ipaddr',
                        help='Specify listen IP address', default='127.0.0.1')
    parser.add_argument('-p', '--port', metavar='port', dest='port',
                        help='Specify listen port', default=9000)
    parser.add_argument('--peers', dest='peers', nargs='*',
                        help='Specify peers IP addresses', default=[])
    parser.add_argument('--miner', dest='miner', action='store_true',
                        help='Start the node immediately mining')
    parser.add_argument('--log', dest='loglevel', type=_log_level_to_int, nargs='?', default='warning',
                        help='Set the logging output level {0}'.format(_LOG_LEVEL_STRINGS))
    args = parser.parse_args()
    
    logging.basicConfig(filename='tmp/log/example.log', filemode='w', level=logging.DEBUG,
        format='%(asctime)s %(levelname)s: %(message)s', datefmt='%d/%m/%Y %I:%M:%S')
    # set up logging to console
    console = logging.StreamHandler()
    console.setLevel(args.loglevel)
    logging.getLogger('').addHandler(console)

    threads = []
    #sqldb.databaseLocation = 'blocks/blockchain.db'
    cons = consensus.Consensus(difficulty=5)
    n = Node(args.ipaddr, args.port)

    # Connect to predefined peers
    if args.peers:
        iplist = args.peers if isinstance(args.peers, list) else [args.peers]
        for ipaddr in iplist:
            n.addPeer(ipaddr)
    else: # Connect to localhost
        logging.info('Connecting to localhost...')
        n.connect()
    time.sleep(1)

    # Connect and check own node database
    logging.info('checking database')
    sqldb.dbConnect()
    n.bchain = sqldb.dbCheck()

    # Thread to listen request messages
    msg_thread = threading.Thread(name='REQ/REP', target=n.messageHandler)
    msg_thread.start()
    threads.append(msg_thread)

    # Thread to listen broadcast messages
    listen_thread = threading.Thread(name='PUB/SUB', target=n.listen)
    listen_thread.start()
    threads.append(listen_thread)
    #

    # Check peers most recent block (thread to check periodically)
    n.sync()

    # Miner thread
    miner_thread = threading.Thread(name='Miner', target=n.mine,
         kwargs={'cons': cons})
    miner_thread.start()
    threads.append(miner_thread)

    #sync_thread = threading.Thread(name='sync', target=n.sync)
    #sync_thread.start()
    #threads.append(listen_thread)
    if args.miner:
        n.start.set()
        n.f.set()

    # Main thread
    try:
        while True:
            # rpc-like commands (#TODO pass ip/port)
            n.rpcServer()
    # Exit main and threads
    except (KeyboardInterrupt, StopException):
        pass
    finally:
        #n.exit(args.ipaddr)
        #n.timer.cancel()
        n.k.set()
        n.e.set()
        n.f.set()
        n.start.set()
        n.close()
        for t in threads:
           t.join()
        print n.bchain.Info()
Example #5
0
def main():
    # Argument and command-line options parsing
    parser = argparse.ArgumentParser(description='Blockchain simulation')
    parser.add_argument('-i',
                        '--ip',
                        metavar='ip',
                        dest='ipaddr',
                        help='Specify listen IP address',
                        default='127.0.0.1')
    parser.add_argument('-p',
                        '--port',
                        metavar='port',
                        dest='port',
                        help='Specify listen port',
                        default=9000)
    parser.add_argument('--peers',
                        dest='peers',
                        nargs='*',
                        help='Specify peers IP addresses',
                        default=[])
    parser.add_argument('--miner',
                        dest='miner',
                        action='store_true',
                        help='Start the node immediately mining')
    parser.add_argument(
        '--log',
        dest='loglevel',
        type=_log_level_to_int,
        nargs='?',
        default='warning',
        help='Set the logging output level {0}'.format(_LOG_LEVEL_STRINGS))
    parser.add_argument('-c',
                        '--config',
                        dest='config_file',
                        default='node.conf',
                        type=str,
                        help='Specify the configuration file')
    args = parser.parse_args()
    args.diff = 5
    # Configuration file parsing (defaults to command-line arguments if not exists)
    cfgparser = SafeConfigParser({
        'ip': args.ipaddr,
        'port': str(args.port),
        'peers': args.peers,
        'miner': str(args.miner).lower(),
        'loglevel': 'warning',
        'diff': '5'
    })
    if cfgparser.read(args.config_file):
        args.peers = cfgparser.get('node', 'ip')
        args.port = int(cfgparser.get('node', 'port'))
        args.peers = cfgparser.get('node', 'peers').split('\n')
        args.miner = cfgparser.getboolean('node', 'miner')
        args.diff = int(cfgparser.get('node', 'diff'))
        args.loglevel = _log_level_to_int(cfgparser.get('node', 'loglevel'))
    # File logging
    logging.basicConfig(filename='log/example.log',
                        filemode='w',
                        level=_log_level_to_int('debug'),
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%d/%m/%Y %I:%M:%S')
    # set up logging to console
    console = logging.StreamHandler()
    console.setLevel(args.loglevel)
    logging.getLogger('').addHandler(console)

    threads = []
    #sqldb.databaseLocation = 'blocks/blockchain.db'
    cons = consensus.Consensus()

    n = Node(args.ipaddr, args.port)

    # Connect to predefined peers
    if args.peers:
        iplist = args.peers if isinstance(args.peers, list) else [args.peers]
        for ipaddr in iplist:
            n.addPeer(ipaddr)
    else:  # Connect to localhost
        logging.info('Connecting to localhost...')
        n.connect()
    time.sleep(1)

    # Connect and check own node database
    logging.info('checking database')
    sqldb.dbConnect()
    n.bchain = sqldb.dbCheck()

    # Thread to listen request messages
    msg_thread = threading.Thread(name='REQ/REP', target=n.messageHandler)
    msg_thread.start()
    threads.append(msg_thread)

    # Thread to listen broadcast messages
    listen_thread = threading.Thread(name='PUB/SUB', target=n.listen)
    listen_thread.start()
    threads.append(listen_thread)
    #

    # Check peers most recent block
    n.sync()

    # Miner thread
    miner_thread = threading.Thread(name='Miner',
                                    target=n.mine,
                                    kwargs={'cons': cons})
    miner_thread.start()
    threads.append(miner_thread)

    if args.miner:
        n.start.set()
        n.f.set()

    # Main program thread
    try:
        while True:
            # rpc-like commands
            n.rpcServer()
    # Exit main and threads
    except (KeyboardInterrupt, StopException):
        pass
    finally:
        n.k.set()
        n.e.set()
        n.f.set()
        n.start.set()
        n.close()
        for t in threads:
            t.join()
        print(n.bchain.Info())
Example #6
0
def validateChallenge(block, stake):
    target = consensus.Consensus().target
    if int(block.hash, 16) < target:
        return True
    return False
Example #7
0
all_addr.close()

#Randomly select participants from all the addresses
dkg_group = []
for i in range(DKG_NUMBER_PARTICIPANTS):
    random_pos = random_no % len(all_addresses)
    dkg_group.append(utils.normalize_address(all_addresses.pop(random_pos)))
    random_no = utils.compress_random_no_to_int(
        hashlib.sha256(str(random_no)).hexdigest(), 16)

print "Selected the following addresses for the DKG:"
for elem in dkg_group:
    print elem.encode('hex')

#Generate DKG shares
cons = consensus.Consensus(dkg_group, dkg_group, random_no_string, "0x00", 0,
                           "0x00")
to_send = cons.new_dkg(dkg_group, dkg_group)
#Since we have given him all nodes, we can join the DKG shares directly

if cons.all_node_dkgs_finished():
    print "Group key is: ", cons.get_current_group_key()
    print "Now writing private keys in file"
else:
    print "Fatal error, we own all nodes, DKG should be finished. Exiting"
    sys.exit(1)

#Store private keys in file
try:
    priv_keys = open('master-private-dkg-keys.txt', 'w')
except Exception as e:
    print e