示例#1
0
def multiPoolNodesCreated(request,
                          tconf,
                          looper,
                          tdir,
                          cliTempLogger,
                          namesOfPools=("pool1", "pool2")):

    multiNodes = []
    for poolName in namesOfPools:
        newPoolTxnNodeNames = [
            poolName + n for n in ("Alpha", "Beta", "Gamma", "Delta")
        ]
        config_helper = ConfigHelper(tconf, chroot=tdir)
        ledger_dir = os.path.join(config_helper.ledger_base_dir, poolName)
        newPoolTxnData = getPoolTxnData(poolName, newPoolTxnNodeNames)
        newTdirWithPoolTxns = custom_tdir_with_pool_txns(
            newPoolTxnData, ledger_dir, tconf.poolTransactionsFile)
        newTdirWithDomainTxns = custom_tdir_with_domain_txns(
            newPoolTxnData, ledger_dir, domainTxnOrderedFields(),
            tconf.domainTransactionsFile)
        testPoolNode = TestMultiNode(poolName, newPoolTxnNodeNames, tdir,
                                     tconf, newPoolTxnData,
                                     newTdirWithPoolTxns,
                                     newTdirWithDomainTxns, None)

        poolCLIBabyGen = CliBuilder(tdir, newTdirWithPoolTxns,
                                    newTdirWithDomainTxns, looper, tconf,
                                    cliTempLogger)
        poolCLIBaby = next(poolCLIBabyGen(poolName))

        # Ugly hack to build several networks
        network_bak = tconf.NETWORK_NAME
        tconf.NETWORK_NAME = poolName
        tdirWithNodeKeepInited(tdir, tconf, NodeConfigHelper, newPoolTxnData,
                               newPoolTxnNodeNames)

        nodes = []
        for nm in newPoolTxnNodeNames:
            config_helper = NodeConfigHelper(nm, tconf, chroot=tdir)
            node = TestNode(nm,
                            config_helper=config_helper,
                            config=tconf,
                            pluginPaths=None)
            looper.add(node)
            nodes.append(node)
        looper.run(checkNodesConnected(nodes))
        ensureElectionsDone(looper=looper, nodes=nodes)

        poolCli = poolCLI(tdir, tconf, poolCLIBaby, newPoolTxnData,
                          newPoolTxnNodeNames, nodes)
        testPoolNode.poolCli = poolCli
        multiNodes.append(testPoolNode)
        tconf.NETWORK_NAME = network_bak

    return multiNodes
示例#2
0
def clean(config, full, network_name):
    if network_name:
        config.NETWORK_NAME = network_name
    config_helper = ConfigHelper(config)

    shutil.rmtree(config_helper.log_dir)
    shutil.rmtree(config_helper.keys_dir)
    shutil.rmtree(config_helper.genesis_dir)

    if full:
        shutil.rmtree(config_helper.ledger_base_dir)
        shutil.rmtree(config_helper.log_base_dir)
    def __init__(self,
                 timeout: int = TIMEOUT,
                 backup_format: str = BACKUP_FORMAT,
                 test_mode: bool = False,
                 deps: List[str] = DEPS,
                 backup_target: str = None,
                 files_to_preserve: List[str] = None,
                 backup_dir: str = None,
                 backup_name_prefix: str = None,
                 backup_num: int = BACKUP_NUM,
                 hold_ext: str = '',
                 config=None):
        self.config = config or getConfig()
        self.test_mode = test_mode
        self.timeout = timeout or TIMEOUT

        config_helper = ConfigHelper(self.config)
        self.backup_dir = backup_dir or config_helper.backup_dir
        self.backup_target = backup_target or config_helper.genesis_dir

        self.tmp_dir = TMP_DIR
        self.backup_format = backup_format
        self.deps = deps

        _files_to_preserve = [
            self.config.lastRunVersionFile, self.config.nextVersionFile,
            self.config.upgradeLogFile, self.config.lastVersionFilePath,
            self.config.restartLogFile
        ]

        self.files_to_preserve = files_to_preserve or _files_to_preserve
        self.backup_num = backup_num

        _backup_name_prefix = '{}_backup_'.format(self.config.NETWORK_NAME)

        self.backup_name_prefix = backup_name_prefix or _backup_name_prefix
        self.packages_to_hold = ' '.join([PACKAGES_TO_HOLD, hold_ext])

        # Create a TCP/IP socket
        self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.server.setblocking(0)

        # Bind the socket to the port
        self.server_address = ('localhost', 30003)

        logger.info('Node control tool is starting up on {} port {}'.format(
            *self.server_address))
        self.server.bind(self.server_address)

        # Listen for incoming connections
        self.server.listen(1)
def migrate_nodes_data():
    config = getConfigOnce()
    config_helper = ConfigHelper(config)

    # Move data
    old_nodes_data_dir = os.path.join(config_helper.ledger_data_dir, 'nodes')
    new_node_data_dir = config_helper.ledger_data_dir
    try:
        visit_dirs = os.listdir(old_nodes_data_dir)
    except FileNotFoundError:
        visit_dirs = []
    for node_name in visit_dirs:
        move_path = os.path.join(old_nodes_data_dir, node_name)
        to_path = os.path.join(new_node_data_dir, node_name)
        ext_copytree(move_path, to_path)
    shutil.rmtree(old_nodes_data_dir)
    set_own_perm("indy", [new_node_data_dir])
    def __init__(self,
                 timeout: int = TIMEOUT,
                 backup_format: str = BACKUP_FORMAT,
                 test_mode: bool = False,
                 backup_target: str = None,
                 files_to_preserve: List[str] = None,
                 backup_dir: str = None,
                 backup_name_prefix: str = None,
                 backup_num: int = BACKUP_NUM,
                 hold_ext: str = '',
                 config=None):
        self.config = config or getConfig()

        self.test_mode = test_mode
        self.timeout = timeout or TIMEOUT

        self.hold_ext = hold_ext.split(" ")

        config_helper = ConfigHelper(self.config)
        self.backup_dir = backup_dir or config_helper.backup_dir
        self.backup_target = backup_target or config_helper.genesis_dir

        self.tmp_dir = TMP_DIR
        self.backup_format = backup_format

        _files_to_preserve = [
            self.config.lastRunVersionFile, self.config.nextVersionFile,
            self.config.upgradeLogFile, self.config.lastVersionFilePath,
            self.config.restartLogFile
        ]

        self.files_to_preserve = files_to_preserve or _files_to_preserve
        self.backup_num = backup_num

        _backup_name_prefix = '{}_backup_'.format(self.config.NETWORK_NAME)

        self.backup_name_prefix = backup_name_prefix or _backup_name_prefix
        self._enable_file_logging()

        self._listen()

        self.start()
示例#6
0
    return False


def remove(ledger_name):
    exceptions = ["domain", "config", "pool", "audit"]
    if ledger_name not in exceptions:
        directories_path = []

        for path in Path(config_helper.ledger_data_dir).rglob(ledger_name +
                                                              "_*"):
            directories_path.append(path)

        if not len(directories_path):
            print('Ledger doesn`t exist: ' + ledger_name)

        elif warn(ledger_name, directories_path):
            for path in directories_path:
                shutil.rmtree(str(path))
            print('Ledger removed successfully!')

    else:
        print('Can`t delete built in ledger: ' + ledger_name)


if __name__ == '__main__':
    config = getConfig()
    config_helper = ConfigHelper(config)
    script, ledger_name = argv
    remove(ledger_name)
        d=adict()
        d.name = st
        d.nym = cf.get(st,'nym')
        d.fromnym= cf.get(st,'from')
        d.role = cf.get(st,'role')
        d.verkey = cf.get(st,'verkey')
        
        nyms.append(d)

    print(nyms)
    
    getConfig().NETWORK_NAME = args.network

    chroot = None
    config_helper = ConfigHelper(getConfig(),chroot = chroot)
    os.makedirs(config_helper.genesis_dir, exist_ok=True)
    genesis_dir = config_helper.genesis_dir
    keys_dir = config_helper.keys_dir
    print(keys_dir)
    domainLedger = init_domain_ledger(args.appendToLedgers, genesis_dir,
                                       getConfig(), getTxnOrderedFields())
    domain(domainLedger , nyms)
    
    
    genesis_dir = setup_clibase_dir(getConfig(), args.network)
    keys_dir = os.path.join(genesis_dir, "keys")
    
    domainLedger = init_domain_ledger(args.appendToLedgers, genesis_dir,
                                          getConfig(), getTxnOrderedFields())
    domain(domainLedger ,nyms)
def main():
    global logger

    def check_unsigned(s):
        res = None
        try:
            res = int(s)
        except ValueError:
            pass
        if res is None or res <= 0:
            raise argparse.ArgumentTypeError(("{!r} is incorrect, "
                                              "should be int > 0").format(s, ))
        else:
            return res

    config_helper = ConfigHelper(config)

    parser = argparse.ArgumentParser(
        description=(
            "Tool to explore and gather statistics about running validator"),
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Verbose mode (command line)")
    parser.add_argument("--json",
                        action="store_true",
                        help="Format output as JSON (ignores -v)")
    parser.add_argument("--nagios",
                        action="store_true",
                        help="Format output as NAGIOS output (ignores -v)")

    statfile_group = parser.add_argument_group(
        "statfile", "settings for exploring validator stats from stat file")

    statfile_group.add_argument("--basedir",
                                metavar="PATH",
                                default=config_helper.node_info_dir,
                                help=("Path to stats files"))
    # statfile_group.add_argument(
    #     "--watch", action="store_true", help="Watch for stats file updates"
    # )

    # socket group is disabled for now due the feature is unsupported
    # socket_group = parser.add_argument_group(
    #     "socket", "settings for exploring validator stats from socket"
    # )
    #
    # socket_group.add_argument(
    #     "--listen", action="store_true",
    #     help="Listen socket for stats (ignores --statfile)"
    # )
    #
    # socket_group.add_argument(
    #     "-i", "--ip", metavar="IP", default=config.STATS_SERVER_IP,
    #     help="Server IP"
    # )
    # socket_group.add_argument(
    #     "-p", "--port", metavar="PORT", default=config.STATS_SERVER_PORT,
    #     type=check_unsigned, help="Server port"
    # )

    other_group = parser.add_argument_group("other", "other settings")

    other_group.add_argument("--log",
                             metavar="FILE",
                             default=os.path.join(
                                 config_helper.log_base_dir,
                                 os.path.basename(sys.argv[0] + ".log")),
                             help="Path to log file")

    args = parser.parse_args()

    remove_log_handlers()

    if args.log:
        set_log_owner(args.log)

    Logger().enableFileLogging(args.log)

    logger.debug("Cmd line arguments: {}".format(args))

    # is not supported for now
    # if args.listen:
    #     logger.info("Starting server on {}:{} ...".format(
    #       args.ip, args.port))
    #     print("Starting server on {}:{} ...".format(args.ip, args.port))
    #
    #     loop = asyncio.get_event_loop()
    #     coro = asyncio.start_server(accept_client,
    #                                 args.ip, args.port, loop=loop)
    #     server = loop.run_until_complete(coro)
    #
    #     logger.info("Serving on {}:{} ...".format(args.ip, args.port))
    #     print('Serving on {} ...'.format(server.sockets[0].getsockname()))
    #
    #     # Serve requests until Ctrl+C is pressed
    #     try:
    #         loop.run_forever()
    #     except KeyboardInterrupt:
    #         pass
    #
    #     logger.info("Stopping server ...")
    #     print("Stopping server ...")
    #
    #     # Close the server
    #     server.close()
    #     for task in clients.keys():
    #         task.cancel()
    #     loop.run_until_complete(server.wait_closed())
    #     loop.close()
    # else:
    all_paths = glob(os.path.join(args.basedir, "*_info.json"))

    files_by_node = dict()

    for path in all_paths:
        bn = os.path.basename(path)
        if not bn:
            continue
        node_name = bn.split("_", maxsplit=1)[0]
        if "additional" in bn:
            files_by_node.setdefault(node_name,
                                     {}).update({"additional": path})
        elif "version" in bn:
            files_by_node.setdefault(node_name, {}).update({"version": path})
        else:
            files_by_node.setdefault(node_name, {}).update({"info": path})
    if not files_by_node:
        print('There are no info files in {}'.format(args.basedir))
        return

    if args.json:
        allf = []
        for n, ff in files_by_node.items():
            allf.extend([v for k, v in ff.items()])
        out_json = compile_json_ouput(allf)
        if out_json:
            print(json.dumps(out_json, sort_keys=True))
            sys.exit(0)

    for node in files_by_node:
        inf_ver = [
            v for k, v in files_by_node[node].items()
            if k in ["info", "version"]
        ]
        json_data = compile_json_ouput(inf_ver)
        if json_data:
            if args.verbose:
                print("{}".format(os.linesep).join(
                    create_print_tree(json_data, lines=[])))
            else:
                print(
                    get_stats_from_file(json_data, args.verbose, args.json,
                                        args.nagios))

        print('\n')
    if args.verbose:
        for node in files_by_node:
            file_path = files_by_node[node].get("additional", "")
            if not file_path:
                continue
            json_data = read_json(file_path)
            if json_data:
                print("{}".format(os.linesep).join(
                    create_print_tree(json_data, lines=[])))

    logger.info("Done")
        d.client_ip = cf.get(st, 'client_ip')
        d.node_port = cf.get(st, 'node_port')
        d.client_port = cf.get(st, 'client_port')
        d.dest = cf.get(st, 'dest')
        d.bls_key = cf.get(st, 'bls_key')
        d.bls_pop = cf.get(st, 'bls_pop')
        d.fromnym = cf.get(st, 'from')
        d.services = cf.get(st, 'services')
        d.islocal = cf.get(st, 'islocal')
        nodes.append(d)
    print(nodes)

    getConfig().NETWORK_NAME = args.network

    chroot = None
    config_helper = ConfigHelper(getConfig(), chroot=chroot)
    os.makedirs(config_helper.genesis_dir, exist_ok=True)
    genesis_dir = config_helper.genesis_dir
    keys_dir = config_helper.keys_dir

    poolLedger = init_pool_ledger(args.appendToLedgers, genesis_dir,
                                  getConfig())

    genesis_dir = setup_clibase_dir(getConfig(), args.network)
    keys_dir = os.path.join(genesis_dir, "keys")
    poolLedger1 = init_pool_ledger(args.appendToLedgers, genesis_dir,
                                   getConfig())

    genesis_protocol_version = None

    seq_no = 1