def main():
    parser = argparse_ArgumentParser("Input parameters")
    parser.add_argument("--input_file_name",
                        default="input_toy.yaml",
                        help="Input parameters file name")
    parser.add_argument("--graph_files_dir",
                        default="",
                        help="Graph files' folder path")
    parser.add_argument("--out_dir_name",
                        default="/results",
                        help="Output directory name")
    args = parser.parse_args()
    with open(args.input_file_name, 'r') as f:
        inputs = yaml_load(f, yaml_Loader)

    # Override output directory name if same as gen
    if args.out_dir_name or inputs['out_comp_nm'] == "/results/res":
        if not os_path.exists(inputs['dir_nm'] + args.out_dir_name):
            os_mkdir(inputs['dir_nm'] + args.out_dir_name)
        inputs['out_comp_nm'] = args.out_dir_name + "/res"

    inputs['graph_files_dir'] = ''
    if args.graph_files_dir:
        if not os_path.exists(inputs['dir_nm'] + args.graph_files_dir):
            os_mkdir(inputs['dir_nm'] + args.graph_files_dir)
        inputs['graph_files_dir'] = args.graph_files_dir

    with open(inputs['dir_nm'] + inputs['out_comp_nm'] + "_input.yaml",
              'w') as outfile:
        yaml_dump(inputs, outfile, default_flow_style=False)

    logging_basicConfig(filename=inputs['dir_nm'] + inputs['out_comp_nm'] +
                        "_logs.yaml",
                        level=logging_INFO)
    start_time_read = time_time()
    myGraph = read_graphs(inputs)
    read_time = time_time() - start_time_read

    myGraphName = inputs['dir_nm'] + inputs['graph_files_dir'] + "/res_myGraph"
    with open(myGraphName, 'wb') as f:
        pickle_dump(myGraph, f)

    tot_time = time_time() - start_time

    out_comp_nm = inputs['dir_nm'] + inputs['out_comp_nm']
    # Write to yaml file instead
    with open(out_comp_nm + '_runtime_performance.out', "a") as fid:
        print("Read network time (s) = ",
              read_time,
              "[",
              round(100 * float(read_time) / tot_time, 2),
              "%]",
              file=fid)
        print("Total time (s) = ", tot_time, file=fid)
Exemple #2
0
def color_logging_setup():
    #
    # Set logging to INFO by default (log everything except DEBUG).
    #
    # Also try to add colors to the logging output if the logging output goes
    # to a capable device (not a file and a terminal supporting colors).
    #
    # Actually adding the ANSI escape codes in the logging level name is pretty
    # much an ugly hack but it is the easiest way (less changes).
    #
    # An elegant way of doing this is described here:
    #  http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
    #
    proc_name = current_process().name
    proc_pid = os.getpid()
    proc_info = " {}/{}:".format(proc_name, proc_pid) if proc_name != 'MainProcess' else ''
    fmt_str = "%(asctime)s %(levelname)s:{} %(message)s".format(proc_info)
    out_dev_istty = getattr(sys.stdout, 'isatty', None)

    if ((out_dev_istty is not None) and (out_dev_istty())):
        if ('256color' in os.environ['TERM']):
            for lvl in LOGGING_LEVELS.keys():
                logging_addLevelName(LOGGING_LEVELS[lvl]['level'],
                                     "\033[{0}{1}".format(
                                         LOGGING_LEVELS[lvl]['256color'],
                                         LOGGING_LEVELS[lvl]['name']))
            fmt_str = "\033[38;5;250m%(asctime)s\033[0m %(levelname)s:{} " \
                      "%(message)s\033[0m".format(proc_info)
        elif ('xterm' in os.environ['TERM']):
            for lvl in LOGGING_LEVELS.keys():
                logging_addLevelName(LOGGING_LEVELS[lvl]['level'],
                                     "\033[{0}{1}".format(
                                         LOGGING_LEVELS[lvl]['xterm'],
                                         LOGGING_LEVELS[lvl]['name']))
            fmt_str = "\033[37m%(asctime)s\033[0m %(levelname)s:{} " \
                      "%(message)s\033[0m".format(proc_info)
        else:
            logging_addLevelName(LOGGING_LEVELS['NORMAL']['level'],
                                 LOGGING_LEVELS['NORMAL']['name'])

    logging_basicConfig(format=fmt_str, level=logging_level_INFO,
                        stream=sys.stdout)
    logger = logging_getLogger()

    return logger
Exemple #3
0
def main():
    import _dimgx
    _dimgx._logexception = exitonraise(_dimgx._logexception) # WARNING: monkey patch; pylint: disable=protected-access
    args = buildparser().parse_args(sys_argv[1:])
    logging_basicConfig(format=args.log_format)
    getLogger().setLevel(logging_getLevelName(args.log_level))
    dc_kw = kwargs_from_env()

    # TODO: hack to work around github:docker/docker-py#706
    if DOCKER_TLS_VERIFY == '0':
        dc_kw['tls'].assert_hostname = False

    dc = AutoVersionClient(**dc_kw)
    layers_dict = inspectlayers(dc, args.image)
    top_most_layer_id, selected_layers = selectlayers(args, layers_dict)

    if not selected_layers:
        _LOGGER.warning('no known layers selected')

    if args.target is None:
        printlayerinfo(args, selected_layers)
    else:
        extractlayers(dc, args, selected_layers, top_most_layer_id)
Exemple #4
0
def main():
    import _dimgx
    _dimgx._logexception = exitonraise(_dimgx._logexception)  # WARNING: monkey patch; pylint: disable=protected-access
    args = buildparser().parse_args(sys_argv[1:])
    logging_basicConfig(format=args.log_format)
    getLogger().setLevel(logging_getLevelName(args.log_level))
    patch_broken_tarfile_29760()
    dc_kw = kwargs_from_env()

    # TODO: hack to work around github:docker/docker-py#706
    if DOCKER_TLS_VERIFY == '0':
        dc_kw['tls'].assert_hostname = False

    dc = AutoVersionClient(**dc_kw)
    layers_dict = inspectlayers(dc, args.image)
    top_most_layer_id, selected_layers = selectlayers(args, layers_dict)

    if not selected_layers:
        _LOGGER.warning('no known layers selected')

    if args.target is None:
        printlayerinfo(args, selected_layers)
    else:
        extractlayers(dc, args, selected_layers, top_most_layer_id)
Exemple #5
0
def main():
    parser = argparse_ArgumentParser("Input parameters")
    parser.add_argument("--input_file_name",
                        default="input_toy.yaml",
                        help="Input parameters file name")
    parser.add_argument("--out_dir_name",
                        default="/results",
                        help="Output directory name")
    parser.add_argument("--train_test_files_dir",
                        default="",
                        help="Train test file path")
    parser.add_argument("--graph_files_dir",
                        default="",
                        help="Graph files' folder path")

    parser.add_argument("--n_pts",
                        default=1,
                        help="number of partitions (computers)")
    parser.add_argument(
        "--seed_mode",
        help="Seed mode - specify 'cliques' for the cliques algo")
    parser.add_argument("--search_method", help="Sampling algorithm")
    parser.add_argument("--model_dir", help="Directory containing model")
    parser.add_argument("--ptnum", default='0', help="partition number")
    parser.add_argument("--explore_prob",
                        default=0.01,
                        help="probability of exploring")
    parser.add_argument("--prob_metropolis",
                        default=0.1,
                        help="metropolis probability")
    parser.add_argument("--T0", default=0.88, help="isa T0")
    parser.add_argument("--alpha", default=1.8, help="isa alpha")
    parser.add_argument("--classi_thresh",
                        default=0.5,
                        help="Classification threshold")
    parser.add_argument("--transfer2tmp",
                        default=True,
                        help="Transfer to tmp folder")

    args = parser.parse_args()

    with open(args.input_file_name, 'r') as f:
        inputs = yaml_load(f, yaml_Loader)

    if args.classi_thresh:
        inputs['classi_thresh'] = float(args.classi_thresh)
    if args.seed_mode:
        inputs['seed_mode'] = args.seed_mode
    if args.search_method:
        inputs['search_method'] = args.search_method
    if args.model_dir:
        inputs['model_dir'] = args.model_dir
    if args.explore_prob:
        inputs['explore_prob'] = float(args.explore_prob)
    if args.prob_metropolis:
        inputs['prob_metropolis'] = float(args.prob_metropolis)
    if args.T0:
        inputs['T0'] = float(args.T0)
    if args.alpha:
        inputs['alpha'] = float(args.alpha)

    # Override output directory name if same as gen
    if args.out_dir_name or inputs['out_comp_nm'] == "/results/res":
        if not os_path.exists(inputs['dir_nm'] + args.out_dir_name):
            os_mkdir(inputs['dir_nm'] + args.out_dir_name)
        inputs['out_comp_nm'] = args.out_dir_name + "/res"

    inputs['train_test_files_dir'] = ''
    if args.train_test_files_dir:
        if not os_path.exists(inputs['dir_nm'] + args.train_test_files_dir):
            os_mkdir(inputs['dir_nm'] + args.train_test_files_dir)
        inputs['train_test_files_dir'] = args.train_test_files_dir

    inputs['graph_files_dir'] = ''
    if args.graph_files_dir:
        if not os_path.exists(inputs['dir_nm'] + args.graph_files_dir):
            os_mkdir(inputs['dir_nm'] + args.graph_files_dir)
        inputs['graph_files_dir'] = args.graph_files_dir

    with open(inputs['dir_nm'] + inputs['out_comp_nm'] + "_input_sample.yaml",
              'w') as outfile:
        yaml_dump(inputs, outfile, default_flow_style=False)

    logging_basicConfig(filename=inputs['dir_nm'] + inputs['out_comp_nm'] +
                        "_logs.yaml",
                        level=logging_INFO)
    # fin_list_graphs = control(myGraph,inputs,n=50)
    out_comp_nm = inputs['dir_nm'] + inputs['out_comp_nm']
    out_comp_nm_model = inputs['dir_nm'] + inputs['model_dir']

    modelfname = out_comp_nm_model + "_model"
    scalerfname = out_comp_nm_model + "_scaler"

    max_sizeF = inputs['dir_nm'] + inputs[
        'train_test_files_dir'] + "/res_max_size_search_par"
    with open(max_sizeF, 'rb') as f:
        max_size = pickle_load(f)

    with open(scalerfname, 'rb') as f:
        scaler = pickle_load(f)

    myGraph = None
    if inputs['seed_mode'] == "cliques":
        myGraphName = inputs['dir_nm'] + inputs[
            'graph_files_dir'] + "/res_myGraph"
        with open(myGraphName, 'rb') as f:
            myGraph = pickle_load(f)

    ptns = int(args.n_pts)
    if inputs['seed_mode'] == 'n_nodes':

        seed_nodes_dir = out_comp_nm + "_seed_nodes"
    else:
        seed_nodes_dir = inputs['dir_nm'] + inputs[
            'graph_files_dir'] + "/" + inputs['seed_mode'] + "_n_pts_" + str(
                ptns) + "/res_seed_nodes"

    seed_nodes_F = seed_nodes_dir + args.ptnum
    with open(seed_nodes_F, 'rb') as f:
        seed_nodes = pickle_load(f)

    start_time_sample = time_time()
    out_comp_nm = inputs['dir_nm'] + inputs['out_comp_nm']

    num_comp = sample(inputs, myGraph, modelfname, scaler, seed_nodes,
                      max_size, args.transfer2tmp)

    sample_time = time_time() - start_time_sample
    sample_time_avg = sample_time / num_comp
    folNm_out = "/tmp/" + out_comp_nm + "_orig_comps"  # CHECK WHICH NODE's TMP IS BEING USED

    pred_comp_list = [
        pickle_load(open(folNm_out + "/" + seed_node, 'rb'))
        for seed_node in seed_nodes
        if os_path.exists(folNm_out + "/" + seed_node)
    ]

    with open(out_comp_nm + "_pred_comp_list" + args.ptnum, "wb") as f:
        pickle_dump(pred_comp_list, f)
    tot_time = time_time() - start_time

    with open(out_comp_nm + '_runtime_performance.out', "a") as fid:
        print("--- Runtime performance ---", file=fid)
        print("Sample time (s) = ",
              sample_time,
              "[",
              round(100 * float(sample_time) / tot_time, 2),
              "%]",
              file=fid)
        print("Average sample time (s) = ", sample_time_avg, file=fid)
        print("Total time (s) = ", tot_time, file=fid)
 def __init__(self, config, log_level):
     self.logger = getLogger("Feshie unpacker")
     self.logger.setLevel(log_level)
     logging_basicConfig(format='%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s')
     self.database = FeshieDb(config, log_level)
Exemple #7
0
def main():
    parser = argparse_ArgumentParser("Input parameters")
    parser.add_argument("--input_file_name",
                        default="input_toy.yaml",
                        help="Input parameters file name")
    parser.add_argument("--out_dir_name",
                        default="/results",
                        help="Output directory name")
    parser.add_argument("--train_test_files_dir",
                        default="",
                        help="Train test file path")
    parser.add_argument("--n_pts",
                        default=1,
                        help="number of partitions (computers)")
    parser.add_argument("--over_t", help="Overlap threshold")
    parser.add_argument("--model_dir", help="Directory containing model")
    parser.add_argument("--sample_dir", help="Sample files dir + /res")
    parser.add_argument("--sample_folders_prefix",
                        help="Input parameters file name /results..")
    parser.add_argument(
        "--sample_folders_prefix_final",
        help="Input file name to use final merged results Use as /results..")
    parser.add_argument(
        "--sample_folders_list",
        nargs='+',
        help="Input parameters file name /results.. separated by commas")
    parser.add_argument("--graph_files_dir",
                        default="",
                        help="Graph files' folder path")

    parser.add_argument("--overlap_method",
                        default=1,
                        help="Overlap method option: qi, default: jaccard")
    parser.add_argument("--infer_overlap_threshold",
                        default='n',
                        help="y or n")

    args = parser.parse_args()
    with open(args.input_file_name, 'r') as f:
        inputs = yaml_load(f, yaml_Loader)

    if args.overlap_method:
        inputs['overlap_method'] = args.overlap_method
    if args.over_t:
        inputs['over_t'] = float(args.over_t)
    if args.sample_dir:
        inputs['sample_dir'] = args.sample_dir
    if args.model_dir:
        inputs['model_dir'] = args.model_dir
    if args.infer_overlap_threshold:
        inputs['infer_overlap_threshold'] = args.infer_overlap_threshold

    # Override output directory name if same as gen
    if args.out_dir_name or inputs['out_comp_nm'] == "/results/res":
        if not os_path.exists(inputs['dir_nm'] + args.out_dir_name):
            os_mkdir(inputs['dir_nm'] + args.out_dir_name)
        inputs['out_comp_nm'] = args.out_dir_name + "/res"

    inputs['train_test_files_dir'] = ''
    if args.train_test_files_dir:
        if not os_path.exists(inputs['dir_nm'] + args.train_test_files_dir):
            os_mkdir(inputs['dir_nm'] + args.train_test_files_dir)
        inputs['train_test_files_dir'] = args.train_test_files_dir

    inputs['graph_files_dir'] = ''
    if args.graph_files_dir:
        if not os_path.exists(inputs['dir_nm'] + args.graph_files_dir):
            os_mkdir(inputs['dir_nm'] + args.graph_files_dir)
        inputs['graph_files_dir'] = args.graph_files_dir

    logging_basicConfig(filename=inputs['dir_nm'] + inputs['out_comp_nm'] +
                        "_logs.yaml",
                        level=logging_INFO)
    # fin_list_graphs = control(myGraph,inputs,n=50)

    if "sample_dir" not in inputs:
        inputs['sample_dir'] = inputs['out_comp_nm']

    myGraphName = inputs['dir_nm'] + inputs['graph_files_dir'] + "/res_myGraph"
    with open(myGraphName, 'rb') as f:
        myGraph = pickle_load(f)

    if 'infer_overlap_threshold' in inputs:
        if inputs['infer_overlap_threshold'] == 'y':
            pp_flag = 0
            if inputs['dir_nm'] == 'yeast':
                pp_flag = 1
            if 'overlap_method' in inputs:
                if inputs['overlap_method'] == 'qi':
                    inputs['over_t'] = get_overlap_threshold_qi(
                        inputs, pp_flag, myGraph)
                else:
                    inputs['over_t'] = get_overlap_threshold(
                        inputs, pp_flag, myGraph)

    with open(inputs['dir_nm'] + inputs['out_comp_nm'] + "_input_pp.yaml",
              'w') as outfile:
        yaml_dump(inputs, outfile, default_flow_style=False)

    out_comp_nm = inputs['dir_nm'] + inputs['out_comp_nm']
    out_comp_nm_sample = inputs['dir_nm'] + inputs['sample_dir']
    out_comp_nm_model = inputs['dir_nm'] + inputs['model_dir']

    modelfname = out_comp_nm_model + "_model"
    scalerfname = out_comp_nm_model + "_scaler"

    with open(scalerfname, 'rb') as f:
        scaler = pickle_load(f)

    pred_comp_list = []
    sdndap = pred_comp_list.append

    if args.sample_folders_list:
        for folder in args.sample_folders_list:
            allfiles = './' + inputs['dir_nm'] + folder + '/res_pred_comp_list*'
            for fname in glob(allfiles, recursive=True):
                with open(fname, 'rb') as f:
                    pred_comp_tmp = pickle_load(f)
                for snode in pred_comp_tmp:
                    sdndap(snode)
    elif args.sample_folders_prefix_final:
        allsubd = './' + inputs[
            'dir_nm'] + args.sample_folders_prefix_final + '*/res_pred.out'
        for fname in glob(allsubd, recursive=True):
            with open(fname) as f:
                complexes_score = [
                    line.rstrip().split() for line in f.readlines()
                ]
                pred_comp_tmp = [(frozenset(comp[:-1]), float(comp[-1]))
                                 for comp in complexes_score]

            for snode in pred_comp_tmp:
                sdndap(snode)
    elif args.sample_folders_prefix:
        allsubd = './' + inputs[
            'dir_nm'] + args.sample_folders_prefix + '*/res_pred_comp_list*'
        for fname in glob(allsubd, recursive=True):
            with open(fname, 'rb') as f:
                pred_comp_tmp = pickle_load(f)
            for snode in pred_comp_tmp:
                sdndap(snode)
    else:
        for i in range(int(args.n_pts)):
            with open(out_comp_nm_sample + "_pred_comp_list" + str(i),
                      'rb') as f:
                pred_comp_tmp = pickle_load(f)
            for snode in pred_comp_tmp:
                sdndap(snode)
    len_pred_comp_list = 'No. of complexes before pp = ' + str(
        len(pred_comp_list))
    logging_info(len_pred_comp_list)
    test_complex_path = inputs['dir_nm'] + inputs[
        'train_test_files_dir'] + "/res_test_known_complex_nodes_list"
    test_prot_list = get_prot_list(test_complex_path)

    train_complex_path = inputs['dir_nm'] + inputs[
        'train_test_files_dir'] + "/res_train_known_complex_nodes_list"
    train_prot_list = get_prot_list(train_complex_path)

    protlistfname = inputs['dir_nm'] + inputs[
        'train_test_files_dir'] + "/res_protlist"
    with open(protlistfname, 'rb') as f:
        prot_list = pickle_load(f)

    start_time_pp = time_time()
    fin_list_graphs = postprocess(pred_comp_list, modelfname, scaler, inputs,
                                  myGraph, prot_list, train_prot_list,
                                  test_prot_list)
    pp_time = time_time() - start_time_pp

    tot_time = time_time() - start_time

    # Write to yaml file instead
    with open(out_comp_nm + '_runtime_performance.out', "a") as fid:
        print("--- Runtime performance ---", file=fid)
        print("Post processing complex time (s) = ",
              pp_time,
              "[",
              round(100 * float(pp_time) / tot_time, 2),
              "%]",
              file=fid)
        print("Total time (s) = ", tot_time, file=fid)
Exemple #8
0
def main():
    parser = argparse_ArgumentParser("Input parameters")
    parser.add_argument("--input_file_name",
                        default="input_toy.yaml",
                        help="Input parameters file name")
    parser.add_argument("--out_dir_name",
                        default="/results",
                        help="Output directory name, by default - /results")
    parser.add_argument(
        "--seed_mode",
        help="Seed mode - specify 'cliques' for the cliques algo")
    parser.add_argument("--train_test_files_dir",
                        default="",
                        help="Train test file path")

    parser.add_argument("--search_method", help="Sampling algorithm")
    parser.add_argument("--model_dir", help="Directory containing model")
    parser.add_argument("--python_command",
                        default="python",
                        help="python / python3")
    parser.add_argument(
        "--read_flag",
        default=0,
        help="1 when you want to read from file for evaluation")
    parser.add_argument(
        "--complex_file_name",
        default=
        "humap/results_2stageclustering_comparison/humap_2stage_clustering_res.txt",
        help="complexes file name")
    parser.add_argument("--evaluate_additional_metrics",
                        default=1,
                        help="complexes file name")

    args = parser.parse_args()
    rf = args.read_flag
    rf_nm = args.complex_file_name

    with open(args.input_file_name, 'r') as f:
        inputs = yaml_load(f, yaml_Loader)

    if args.model_dir:
        inputs['model_dir'] = args.model_dir

    # Override output directory name if same as gen
    if args.out_dir_name or inputs['out_comp_nm'] == "/results/res":
        if not os_path.exists(inputs['dir_nm'] + args.out_dir_name):
            os_mkdir(inputs['dir_nm'] + args.out_dir_name)
        inputs['out_comp_nm'] = args.out_dir_name + "/res"

    inputs['train_test_files_dir'] = ''
    if args.train_test_files_dir:
        if not os_path.exists(inputs['dir_nm'] + args.train_test_files_dir):
            os_mkdir(inputs['dir_nm'] + args.train_test_files_dir)
        inputs['train_test_files_dir'] = args.train_test_files_dir

    with open(inputs['dir_nm'] + inputs['out_comp_nm'] + "_input_eval.yaml",
              'w') as outfile:
        yaml_dump(inputs, outfile, default_flow_style=False)

    logging_basicConfig(filename=inputs['dir_nm'] + inputs['out_comp_nm'] +
                        "_logs.yaml",
                        level=logging_INFO)
    # fin_list_graphs = control(myGraph,inputs,n=50)

    # eval_complex(rf,rf_nm,inputs,known_complex_nodes_list,prot_list,myGraph,fin_list_graphs)

    known_complex_nodes_listfname = inputs[
        'dir_nm'] + "/res_known_complex_nodes_list"

    protlistfname = inputs['dir_nm'] + inputs[
        'train_test_files_dir'] + "/res_protlist"
    with open(protlistfname, 'rb') as f:
        prot_list = pickle_load(f)
    with open(known_complex_nodes_listfname, 'rb') as f:
        known_complex_nodes_list = pickle_load(f)

    out_comp_nm = inputs['dir_nm'] + inputs['out_comp_nm']

    if not rf:
        with open(inputs['dir_nm'] + inputs["out_comp_nm"] + '_pred.out',
                  "r") as fn:
            lines = fn.readlines()

        fin_list_graphs = []
        for line in lines:
            words = line.split()
            fin_list_graphs.append((set(words[:-1]), words[-1]))
            N_pred_complexes = len(fin_list_graphs)

        with open(out_comp_nm + '_metrics.out', "a") as fid:
            print("No. of predicted complexes = ", N_pred_complexes, file=fid)
        if N_pred_complexes == 0:
            print("0 predicted complexes")
            return

    pythonCommand = args.python_command
    if rf == 1:
        if rf_nm == 0:
            rf_nm = out_comp_nm + '_pred.out'

        with open(rf_nm) as fn:
            fin_list_graphs = [(set(line.rstrip('\n').split()), 1)
                               for line in fn]  # Space separated text only

        # TRAINING SET EVALUATION
        with open(out_comp_nm + '_metrics.out', "a") as fid:
            print("\n --- On training set ---", file=fid)

        train_complex_path = inputs['dir_nm'] + inputs[
            'train_test_files_dir'] + "/res_train_known_complex_nodes_list"
        try:
            with open(train_complex_path + "_prot_list", 'rb') as f:
                train_prot_list = pickle_load(f)
        except:
            train_prot_list = get_prot_list(train_complex_path)

        with open(train_complex_path, 'rb') as f:
            train_complex_list = pickle_load(f)

        eval_complex(rf, rf_nm, inputs, train_complex_list, train_prot_list,
                     fin_list_graphs, "_train")

        if args.evaluate_additional_metrics:
            try:
                run_metrics(train_complex_list, fin_list_graphs, out_comp_nm,
                            "_train")
            except:
                print("Error in running additional metrics for train")

        # TEST SET EVALUATION
        with open(out_comp_nm + '_metrics.out', "a") as fid:
            print("\n --- On test set ---", file=fid)

        test_complex_path = inputs['dir_nm'] + inputs[
            'train_test_files_dir'] + "/res_test_known_complex_nodes_list"
        try:
            with open(test_complex_path + "_prot_list", 'rb') as f:
                test_prot_list = pickle_load(f)
        except:
            test_prot_list = get_prot_list(test_complex_path)

        with open(test_complex_path, 'rb') as f:
            test_complex_list = pickle_load(f)

        eval_complex(rf, rf_nm, inputs, test_complex_list, test_prot_list,
                     fin_list_graphs, "_test")

        if args.evaluate_additional_metrics:
            try:
                run_metrics(test_complex_list, fin_list_graphs, out_comp_nm,
                            "_test")
            except:
                print("Error in running additional metrics for test")
        # ON BOTH SETS
        with open(out_comp_nm + '_metrics.out', "a") as fid:
            print("\n --- On both sets ---", file=fid)
        eval_complex(rf, rf_nm, inputs, known_complex_nodes_list, prot_list,
                     fin_list_graphs, "_both")

        if args.evaluate_additional_metrics:
            try:
                run_metrics(known_complex_nodes_list, fin_list_graphs,
                            out_comp_nm, "")
            except:
                print("Error in running additional metrics for both")
    else:
        start_time_eval = time_time()

        # TRAINING SET EVALUATION
        with open(out_comp_nm + '_metrics.out', "a") as fid:
            print("\n --- On training set ---", file=fid)

        train_complex_path = inputs['dir_nm'] + inputs[
            'train_test_files_dir'] + "/res_train_known_complex_nodes_list"
        try:
            with open(train_complex_path + "_prot_list", 'rb') as f:
                train_prot_list = pickle_load(f)
        except:
            train_prot_list = get_prot_list(train_complex_path)

        with open(train_complex_path, 'rb') as f:
            train_complex_list = pickle_load(f)

        eval_complex(rf, rf_nm, inputs, train_complex_list, train_prot_list,
                     fin_list_graphs, "_train")

        if args.evaluate_additional_metrics:
            try:
                run_metrics(train_complex_list, fin_list_graphs, out_comp_nm,
                            "_train")
            except:
                print("Error in running additional metrics for train")

        # TEST SET EVALUATION
        with open(out_comp_nm + '_metrics.out', "a") as fid:
            print("\n --- On test set ---", file=fid)

        test_complex_path = inputs['dir_nm'] + inputs[
            'train_test_files_dir'] + "/res_test_known_complex_nodes_list"
        try:
            with open(test_complex_path + "_prot_list", 'rb') as f:
                test_prot_list = pickle_load(f)
        except:
            test_prot_list = get_prot_list(test_complex_path)

        with open(test_complex_path, 'rb') as f:
            test_complex_list = pickle_load(f)

        eval_complex(rf, rf_nm, inputs, test_complex_list, test_prot_list,
                     fin_list_graphs, "_test")

        if args.evaluate_additional_metrics:
            try:
                run_metrics(test_complex_list, fin_list_graphs, out_comp_nm,
                            "_test")
            except:
                print("Error in running additional metrics for test")

        # BOTH SETS EVALUATION
        with open(out_comp_nm + '_metrics.out', "a") as fid:
            print("\n --- On both sets ---", file=fid)
        eval_complex(rf, rf_nm, inputs, known_complex_nodes_list, prot_list,
                     fin_list_graphs, "_both")

        if args.evaluate_additional_metrics:
            try:
                run_metrics(known_complex_nodes_list, fin_list_graphs,
                            out_comp_nm, "")
            except:
                print("Error in running additional metrics for both")

        if not os_path.exists(out_comp_nm + "_edge_pr_files"):
            os_mkdir(out_comp_nm + "_edge_pr_files")
        for pref in ["", "_train", "_test"]:
            # model dir not outcompnm
            out_comp_nm_model = inputs['dir_nm'] + inputs['model_dir']
            results_wprob = out_comp_nm + '_tot_pred_edges_unique_max_comp_prob_inKnown' + pref + '.out'
            input_pos = out_comp_nm_model + pref + '_tot_known_edges_unique.out'
            outfname = out_comp_nm + "_edge_pr_files/res" + '_edge_pr_curve' + pref
            os_system(pythonCommand +
                      " functions_py3/prcurve_overlay_noneg.py --labels All " +
                      "--results_wprob " + results_wprob +
                      " --input_positives " + input_pos + " --output_file " +
                      outfname)

        fname = out_comp_nm + "_pred.out"
        figname = out_comp_nm + "_sizewise_scores_pred.png"
        sizewise_scores(fname, figname)
        eval_time = time_time() - start_time_eval

        tot_time = time_time() - start_time

        # Write to yaml file instead
        with open(out_comp_nm + '_runtime_performance.out', "a") as fid:
            print("--- Runtime performance ---", file=fid)
            print("Evaluate complex time (s) = ",
                  eval_time,
                  "[",
                  round(100 * float(eval_time) / tot_time, 2),
                  "%]",
                  file=fid)
            print("Total time (s) = ", tot_time, file=fid)
Exemple #9
0
# ========================================================================
class HashedBytesIo(BytesIO):

    # ---- Constructor ---------------------------------------------------

    def __init__(self, initial_bytes=None, hashimpl=sha256):
        super().__init__(initial_bytes)
        self._hash_obj = hashimpl()

    # ---- Public properties ---------------------------------------------

    @property
    def hash_obj(self):
        return self._hash_obj

    # ---- Public hooks --------------------------------------------------

    def write(self, b):
        super().write(b)
        self._hash_obj.update(b)


# ---- Initialization ----------------------------------------------------

# Suppress dimgx logging messages during testing
logging_basicConfig(format=_LOG_FMT)
getLogger('dimgx').setLevel(_LOG_LVL)

# Make sure tarfile.TarFile.next is patched for testing
patch_broken_tarfile_29760()
def main():
    """
    Command line program to configure and deploy CN-Series
    """
    try:
        fmt_str = '%(asctime)s %(levelname)s: %(message)s'

        logging_basicConfig(
            format=fmt_str, level=logging_level_INFO,
            stream=sys.stdout)

        logging_getLogger("paramiko").setLevel(logging_level_WARN)

        #
        # The default signal handler for SIGINT / CTRL-C raises a KeyboardInterrupt
        # exception which prints a possibly very long traceback. To avoid it we
        # install a custom signal handler
        #
        signal_set_handler(signal_SIGINT, custom_signal_handler)

        args = get_args()
        # Panorama info:
        pan_hostname = args.pn_ip
        pan_username = args.pn_user
        pan_password = args.pn_pass
        pan_template_stack = args.pn_tmpl
        pan_dg = args.pn_dg
        pan_cg = args.pn_cg
        cn_auth_code = args.auth_code
        cn_tokens = args.tokens
        cn_bundle = args.cn_bnd

        # Kubernetes info:
        k8s_ip = args.k8s_ip
        ctl_ip = args.ctl_ip
        k8s_username = args.k8s_user
        k8s_password = args.k8s_pass
        k8s_port = args.k8s_port
        if args.k8s_mode == 'lite' or args.k8s_mode == 'full':
            k8s_mode = args.k8s_mode
        else:
            error("Sorry I don't support this mode. Only lite or full are supported.")
            sys.exit()
        k8s_name = args.k8s_name

        pv_type = args.pv_type

        cn_pin_id = args.cn_pin_id
        cn_pin_value = args.cn_pin_value

        if not cn_pin_id or not cn_pin_value:
            if k8s_mode == 'full':
                error("You selected full mode. CN Series registration pin id and value is required.")
                sys.exit()

        if args.k8s_type == 'native':
            k8s_type = 'Native-Kubernetes'
        elif args.k8s_type == 'openshift':
            k8s_type = 'OpenShift'
        else:
            error("Sorry I don't support this type yet. only native or openshift is supported.")
            sys.exit()

        if k8s_type == 'Native-Kubernetes':
            yaml_base_url = BASE_URL + "native/"
            if not pv_type:
                error("PV Type is required for Native deployment.")
                sys.exit()
        elif k8s_type == 'OpenShift':
            yaml_base_url = BASE_URL + "openshift/"

        ctl = 'kubectl' if k8s_type == 'Native-Kubernetes' else 'oc'

        cn_images_dict = {
            'cn_mgmt_image': args.cn_mgmt_image,
            'cn_ngfw_image': args.cn_ngfw_image,
            'cn_init_image': args.cn_init_image,
            'cn_cni_image': args.cn_cni_image
        }

        panorama_dict = {
            'pan_hostname': pan_hostname,
            'pan_username': pan_username,
            'pan_password': pan_password,
            'device_group': pan_dg,
            'template_stack': pan_template_stack,
            'cn_auth_code': cn_auth_code,
            'cn_tokens': cn_tokens,
            'c_group': pan_cg,
            'cn_bundle': cn_bundle,
            'auth_key': ''
        }

        k8s_dict = {
            'k8s_cluster_name': k8s_name,
            'ctl_ip': ctl_ip,
            'k8s_cluster_ip': k8s_ip,
            'k8s_port': k8s_port,
            'k8s_type': k8s_type,
            'svc_acocunt_b64': '',
            'yaml_base_url' : yaml_base_url,
            'k8s_mode': k8s_mode,
            'pv_type': pv_type,
            'cn_pin_id': cn_pin_id,
            'cn_pin_value': cn_pin_value,
            'ctl': ctl
        }

        try:
            info("Establishing API connection with Panorama.")
            pn_api_conn = create_panos_device(pan_hostname, pan_username, pan_password)
            info("Establishing SSH connection with Panorama.")
            pn_ssh_conn = ssh_login(pan_hostname, pan_username, pan_password)
            info("Establishing SSH connection with k8s master.")
            k8s_ssh_conn = ssh_login(ctl_ip, k8s_username, k8s_password)
            if not (pn_api_conn and pn_ssh_conn and k8s_ssh_conn):
                info("Without connection to both the kubernetes cluster and Panorama I can not work.")
                sys.exit()
        except:
            error("Something went wrong which establishing connection, exiting...")
            sys.exit()

        panorama_version = check_panos_version(pn_api_conn)

        if int(panorama_version.split('.')[0]) >= 10:
            info("Panorama PAN-OS version is {}".format(panorama_version))
        else:
            error("Panorama PAN-OS version is {}. I need Panorama that running PAN-OS 10.0 or later, Exiting....".format(panorama_version))
            sys.exit()

        commit_required = False

        info("checking for Kubernetes plugin.")
        k8s_plugin_version = check_k8s_plugin(pn_api_conn)
        if k8s_plugin_version:
            info("Kubernetes plugin version is {}".format(k8s_plugin_version.split('-')[1]))
        else:
            error("Kubernetes plugin is not installed, I will install the latest plugin")
            info("Updating plugin list")
            update_plugin_list(pn_api_conn)

            for p in range(3):
                latest_k8s = find_latest_k8s_plugin(pn_api_conn)
                if latest_k8s['name']:
                    if latest_k8s['downloaded'] == 'no':
                        download_plugin(pn_ssh_conn, latest_k8s['name'])
                    else:
                        info("Kubernetes plugin {} Downloaded.".format(latest_k8s['name']))
                        break
                    if not wait_for_panos(pn_api_conn, time.time() + 60 * 5):
                        error("Download job taking more than expected, exiting...")
                        sys.exit()
                    # Give the download some time
                    time.sleep(10)
                else:
                    error("No Kubernetes plugin found. Check Panorama connection or install the plugin manually.")
                    sys.exit()
                info("Checking if plugin is downloaded properly.")

            for p in range(3):
                if latest_k8s['downloaded'] != 'no':
                    info("Installing kubernetes plugin.")
                    install_k8s_plugin(pn_ssh_conn, latest_k8s['name'])
                    commit_required = True
                    if not wait_for_panos(pn_api_conn, time.time() + 60 * 5):
                        error("Download job taking more than expected, exiting...")
                        sys.exit()
                    info("Installation complete. I will check again if the plugin is installed properly.")
                    # Give the install some time
                    time.sleep(10)
                    k8s_plugin_version = check_k8s_plugin(pn_api_conn)
                    if k8s_plugin_version:
                        info("Kubernetes plugin version is {}".format(k8s_plugin_version.split('-')[1]))
                        break
                    else:
                        info("Plugin installation was not successful I will try again.")
                else:
                    info("Plugin is not installed, exiting.")
                    sys.exit()

        if commit_required:
            info("Committing configuration")
            panorama_commit(pn_api_conn)

        if check_device_group(pn_api_conn, pan_dg):
            info("Device group {} Found.".format(pan_dg))
        else:
            error("Device Group {} was not found in Panorama. "
                  "I will add the device group to Panorama config.".format(pan_dg))
            configure_device_group(pn_ssh_conn, pan_dg)

        if check_template_stack(pn_api_conn, pan_template_stack):
            info("Template Stack {} Found.".format(pan_template_stack))
        else:
            error("Template Stack {} was not found in Panorama. "
                  "I will add a Template and Template Stack to Panorama config.".format(pan_template_stack))
            configure_template(pn_ssh_conn, pan_template_stack + "-tmp")
            configure_template_stack(pn_ssh_conn, pan_template_stack)

        if check_collector_group(pn_api_conn, pan_cg):
            info("Collector group {} found.".format(pan_cg))
        else:
            info("Collector group {} not found. "
                 "I will add a dummy one you can add log collector to it later.".format(pan_cg))
            configure_collector_group(pn_ssh_conn, pan_cg)

        info("Applying CN-Series License.")

        activate_license(pn_ssh_conn, panorama_dict['cn_auth_code'], panorama_dict['cn_tokens'])

        info("Creating k8s service account for Panorama Plugin.")
        k8s_dict['svc_acocunt_b64'] = create_k8s_plugin_svc_account(k8s_ssh_conn, yaml_base_url, ctl)
        info("Configure Panorama Plugin")
        configure_panorama(pn_ssh_conn, panorama_dict, k8s_dict)

        info("Creating bootstrapping authentication key")
        panorama_dict['auth_key'] = create_auth_key(pn_ssh_conn)

        # Committing changes to Panorama.
        panorama_commit(pn_api_conn)

        info("Deploying CN-Series")
        if create_cn_series(k8s_ssh_conn, yaml_base_url, cn_images_dict, panorama_dict, k8s_dict):
            info("CN-Series is deployed successfully.")
            info("Depending on the image download speed, it will take some time to pull images and finish deployment.")
            info("")
            info("=======================================================================================================")
            info("")
            info("I AM DONE! You can now monitor the CN-Series deployment using the following command from the k8s master")
            info("")
            info("kubectl get pods -n kube-system")
            info("")
            info("")
            info("The script will keep checking for the pods status every 5 min. Installation will take about 15 min.")
            info("You can exit now and monitor manually if you prefer")
            info("=======================================================================================================")
            info("")
            info("")

        info("I will sleep for 5 min then I will start checking the pods status.")
        time.sleep(300)

        success = False
        for c_pod in range(6):
            if check_pods_status(k8s_ssh_conn, ctl):
                info("All pods are running. I will now check if all containers are ready.")
                for c_c in range(6):
                    if check_container_status(k8s_ssh_conn, ctl):
                       info("All containers are ready.")
                       success = True
                       break
                    else:
                       info("Not all containers are ready. I will check again after 5 min.")
                       time.sleep(300)
                break
            else:
                info("Not all pods are running. I will check again after 5 min.")
                time.sleep(300)

        if success:
            info("*******************************************************************************************************")
            info("")
            info("")
            info("Installation done successfully.")
            info("")
            info("")
            info("*******************************************************************************************************")
        else:
            error("Seem like there is some errors during deployment. Please log in the k8s cluster and check the status.")

        pn_ssh_conn.close()
        k8s_ssh_conn.close()
    except:
        error("An error occurred that I couldn't handle!")
def main(argv):
    logging_basicConfig(level=INFO)
    logger = getLogger(__file__)
    logger.setLevel(INFO)

    environment = OpenAIGym(
        gym_id='MoveToBeacon-bbueno5000-v0',
        monitor=FLAGS.monitor,
        monitor_safe=FLAGS.monitor_safe,
        monitor_video=FLAGS.monitor_video,
        visualize=FLAGS.visualize)

    # if FLAGS.agent_config is not None:
    #     with open(FLAGS.agent_config, 'r') as fp:
    #         agent_config = json.load(fp=fp)
    # else:
    #     raise TensorForceError(
    #         "No agent configuration provided.")

    # if FLAGS.network is not None:
    #     with open(FLAGS.network, 'r') as fp:
    #         network = json.load(fp=fp)
    # else:
    #     network = None
    #     logger.info(
    #         "No network configuration provided.")

    network_spec = [
        dict(type='flatten'),
        dict(type='dense', size=32),
        dict(type='dense', size=32)
        ]

    agent = PPOAgent(
        states=environment.states,
        actions=environment.actions,
        network=network_spec
        )

    if FLAGS.load:
        load_dir = path.dirname(FLAGS.load)
        if not path.isdir(load_dir):
            raise OSError(
                "Could not load agent from {}: No such directory.".format(load_dir))
        agent.restore_model(FLAGS.load)

    if FLAGS.save:
        save_dir = path.dirname(FLAGS.save)
        if not path.isdir(save_dir):
            try:
                mkdir(save_dir, 0o755)
            except OSError:
                raise OSError(
                    "Cannot save agent to dir {} ()".format(save_dir))

    if FLAGS.debug:
        logger.info("-" * 16)
        logger.info("Configuration:")
        logger.info(agent)

    runner = Runner(
        agent=agent,
        environment=environment,
        repeat_actions=1)

    if FLAGS.debug:
        report_episodes = 1
    else:
        report_episodes = 100

    logger.info(
        "Starting {agent} for Environment {env}".format(
            agent=agent, env=environment))

    def episode_finished(r, id_):
        if r.episode % report_episodes == 0:
            steps_per_second = r.timestep / (time() - r.start_time)
            logger.info("Finished episode {:d} after {:d} timesteps. Steps Per Second {:0.2f}".format(
                r.agent.episode, r.episode_timestep, steps_per_second))
            logger.info("Episode reward: {}".format(r.episode_rewards[-1]))
            logger.info("Average of last 500 rewards: {:0.2f}".format(
                sum(r.episode_rewards[-500:]) / min(500, len(r.episode_rewards))))
            logger.info("Average of last 100 rewards: {:0.2f}".format(
                sum(r.episode_rewards[-100:]) / min(100, len(r.episode_rewards))))
        if FLAGS.save and FLAGS.save_episodes is not None and not r.episode % FLAGS.save_episodes:
            logger.info("Saving agent to {}".format(FLAGS.save))
            r.agent.save_model(FLAGS.save)
        return True

    runner.run(
        num_timesteps=FLAGS.timesteps,
        num_episodes=FLAGS.num_episodes,
        max_episode_timesteps=FLAGS.max_episode_timesteps,
        deterministic=FLAGS.deterministic,
        episode_finished=episode_finished,
        testing=FLAGS.test,
        sleep=FLAGS.sleep)

    runner.close()

    logger.info("Learning completed.")
    logger.info("Total episodes: {ep}".format(ep=runner.agent.episode))
def main():
    parser = argparse_ArgumentParser("Input parameters")
    parser.add_argument("--input_file_name", default="input_toy.yaml", help="Input parameters file name")
    parser.add_argument("--out_dir_name", default="/results", help="Output directory name")
    parser.add_argument("--train_test_files_dir", default="", help="Train test file path")    
    parser.add_argument("--graph_files_dir", default="", help="Graph files' folder path") 
    parser.add_argument("--seed_mode", help="Seed mode - specify 'cliques' for the cliques algo")
    parser.add_argument("--max_size_thres", help="Max size threshold")    
    parser.add_argument("--n_pts", default=1, help="number of partitions (computers)")
    args = parser.parse_args()

    with open(args.input_file_name, 'r') as f:
        inputs = yaml_load(f, yaml_Loader)

    if args.seed_mode:
        inputs['seed_mode'] = args.seed_mode
    if args.max_size_thres:
        inputs['max_size_thres'] = int(args.max_size_thres)        

    # Override output directory name if same as gen
    if args.out_dir_name or inputs['out_comp_nm'] == "/results/res":
        if not os_path.exists(inputs['dir_nm'] + args.out_dir_name):
            os_mkdir(inputs['dir_nm'] + args.out_dir_name)
        inputs['out_comp_nm'] = args.out_dir_name + "/res"
        
    inputs['train_test_files_dir'] = ''
    if args.train_test_files_dir:
        if not os_path.exists(inputs['dir_nm'] + args.train_test_files_dir):
            os_mkdir(inputs['dir_nm'] + args.train_test_files_dir)
        inputs['train_test_files_dir'] = args.train_test_files_dir    

    inputs['graph_files_dir'] = ''
    if args.graph_files_dir:
        if not os_path.exists(inputs['dir_nm'] + args.graph_files_dir):
            os_mkdir(inputs['dir_nm'] + args.graph_files_dir)
        inputs['graph_files_dir'] = args.graph_files_dir             

    with open(inputs['dir_nm'] + inputs['out_comp_nm'] + "_input_sample_partition.yaml", 'w') as outfile:
        yaml_dump(inputs, outfile, default_flow_style=False)

    logging_basicConfig(filename=inputs['dir_nm'] + inputs['out_comp_nm'] + "_logs.yaml", level=logging_INFO)
        
    neig_dicts_folder = inputs['dir_nm'] +inputs['graph_files_dir']+ "/neig_dicts"

    num_comp = inputs['num_comp']
    max_size_thres = inputs['max_size_thres']
    max_size_trainF = inputs['dir_nm'] + inputs['train_test_files_dir']+ "/res_max_size_train"
    with open(max_size_trainF, 'rb') as f:
        max_size_train = pickle_load(f)

    max_size = max_size_train
    
    max_sizeF_feat = inputs['dir_nm'] + inputs['train_test_files_dir']+ "/res_max_size_search"  
    if os_path.exists(max_sizeF_feat):
        with open(max_sizeF_feat, 'rb') as f:
            max_size = pickle_load(f)
    else:            
        with open(inputs['dir_nm'] + inputs['comf_nm']) as f:
            sizes = [len(line.rstrip().split()) for line in f.readlines()]    
        max_size = max(sizes)
        q1 = np_percentile(sizes, 25)
        q3 = np_percentile(sizes, 75)
        max_wo_outliers = math_ceil(q3 + 4.5*(q3-q1))  # Maximum after removing outliers    
        max_size = min(max_size,max_wo_outliers)
        
        
    if max_size >= max_size_thres:
        max_size = max_size_thres
        
    out_comp_nm = inputs['dir_nm'] + inputs['out_comp_nm']

    with open(out_comp_nm + '_metrics.out', "a") as fid:
        print("Max number of steps for complex growth = ", max_size, file=fid)  # NOT actual max size since you merge later
    
    max_sizeF = inputs['dir_nm'] + inputs['train_test_files_dir']+ "/res_max_size_search_par"
    
    with open(max_sizeF, 'wb') as f:
        pickle_dump(max_size, f)

    seed_mode = inputs['seed_mode']

    if seed_mode == "all_nodes":
        #graph_nodes = list(myGraph.nodes())
        seed_nodes = rand_perm(os_listdir(neig_dicts_folder))
    elif seed_mode == "n_nodes":
        seed_nodes = rand_perm(os_listdir(neig_dicts_folder))[:num_comp]
    elif seed_mode == "all_nodes_known_comp":
        protlistfname = inputs['dir_nm']+ inputs['train_test_files_dir'] + "/res_protlist"
        with open(protlistfname, 'rb') as f:
            prot_list = pickle_load(f)        
        seed_nodes = list(prot_list)
    elif seed_mode == "cliques":
        myGraphName = inputs['dir_nm'] + inputs['graph_files_dir']+ "/res_myGraph"
        with open(myGraphName, 'rb') as f:
            myGraph = pickle_load(f)        
        clique_list = list(nx_find_cliques(myGraph))
        to_rem = []
        # Removing 2 node and big complexes
        for comp in clique_list:
            if len(comp) <= 2 or len(comp) >= max_size:
                to_rem.append(comp)

        for comp in to_rem:
            clique_list.remove(comp)

        seed_nodes = clique_list  # Remove duplicates later.

    # partition
    ptns = int(args.n_pts)

    nc = len(seed_nodes)
    if seed_mode == 'n_nodes':
        seed_nodes_F = out_comp_nm + "_seed_nodes"
        each_ptn = nc // ptns
        for i in range(ptns - 1):
            with open(seed_nodes_F + str(i), 'wb') as f:
                pickle_dump(seed_nodes[i * each_ptn:(i + 1) * each_ptn], f)
        with open(seed_nodes_F + str(ptns - 1), 'wb') as f:
            pickle_dump(seed_nodes[(ptns - 1) * each_ptn:], f)
    else:
        seed_nodes_dir =  inputs['dir_nm'] + inputs['graph_files_dir']+ "/" + seed_mode + "_n_pts_" + str(ptns)

        if not os_path.exists(seed_nodes_dir):
            os_mkdir(seed_nodes_dir)
            seed_nodes_F = seed_nodes_dir + "/res_seed_nodes"
            each_ptn = nc // ptns
            for i in range(ptns - 1):
                with open(seed_nodes_F + str(i), 'wb') as f:
                    pickle_dump(seed_nodes[i * each_ptn:(i + 1) * each_ptn], f)

            with open(seed_nodes_F + str(ptns - 1), 'wb') as f:
                pickle_dump(seed_nodes[(ptns - 1) * each_ptn:], f)
Exemple #13
0
 def __init__(self, config, log_level=WARN):
     self.logger = getLogger("Data dump")
     self.logger.setLevel(log_level)
     logging_basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     self.database = FeshieDb(config, log_level)
Exemple #14
0
# ========================================================================
class HashedBytesIo(BytesIO):

    # ---- Constructor ---------------------------------------------------

    def __init__(self, initial_bytes=None, hashimpl=sha256):
        super().__init__(initial_bytes)
        self._hash_obj = hashimpl()

    # ---- Public properties ---------------------------------------------

    @property
    def hash_obj(self):
        return self._hash_obj

    # ---- Public hooks --------------------------------------------------

    def write(self, b):
        super().write(b)
        self._hash_obj.update(b)

# ---- Initialization ----------------------------------------------------

# Suppress dimgx logging messages during testing
logging_basicConfig(format=_LOG_FMT)
getLogger('dimgx').setLevel(_LOG_LVL)

# Make sure tarfile.TarFile.next is patched for testing
patch_broken_tarfile_29760()
Exemple #15
0
_LOG_LEVEL = CRITICAL + 1 if not _LOG_LEVEL else logging_getLevelName(_LOG_LEVEL)

#---- Classes ------------------------------------------------------------

#=========================================================================
class HashedBytesIo(BytesIO):

    #---- Constructor ----------------------------------------------------

    def __init__(self, initial_bytes=None, hashimpl=sha256):
        super().__init__(initial_bytes)
        self._hash_obj = hashimpl()

    #---- Public properties ----------------------------------------------

    @property
    def hash_obj(self):
        return self._hash_obj

    #---- Public hooks ---------------------------------------------------

    def write(self, b):
        super().write(b)
        self._hash_obj.update(b)

#---- Initialization -----------------------------------------------------

# Suppress dimgx logging messages during testing
logging_basicConfig(format=_DEFAULT_LOG_FMT)
getLogger('dimgx').setLevel(_LOG_LEVEL)