def data_consumers(args, q, save_dir, i, search_space):
    set_random_seed(int(str(time.time()).split('.')[0][::-1][:9]))
    file_name = 'log_%s_%d' % ('gpus', i)
    logger = setup_logger(file_name,
                          save_dir,
                          i,
                          log_level='DEBUG',
                          filename='%s.txt' % file_name)
    while True:
        msg = q.get()
        if msg == 'done':
            logger.info('thread %d end' % i)
            break
        iterations = msg['iterate']
        run_experiments_bananas_paradigm(args, save_dir, i, iterations, logger,
                                         search_space)
def main(args):
    set_random_seed(args.seed)
    encoding_sample_pipeline1, x_idx = meta_neural_net_experiment_pipeline1(
        samples=args.sample_nums)
    encoding_sample_pipeline2, _ = meta_neural_net_experiment_pipeline2(
        samples=args.sample_nums)
    encoding_full = meta_neural_net_experiment_full()
    # with open(args.save_dir, 'wb') as f:
    #     pickle.dump(encoding_sample_pipeline1, f)
    #     pickle.dump(encoding_sample_pipeline2, f)
    #     pickle.dump(encoding_full, f)
    #     pickle.dump(x_idx, f)
    print(encoding_sample_pipeline1)
    print(encoding_sample_pipeline2)
    print(encoding_full)
    plot_combine_graph(encoding_1=encoding_sample_pipeline1,
                       encoding_2=encoding_sample_pipeline2,
                       idx=x_idx,
                       encoding_all=encoding_full)
    plot_combine_graph2_log_scale(encoding_1=encoding_sample_pipeline1,
                                  encoding_2=encoding_sample_pipeline2,
                                  idx=x_idx,
                                  encoding_all=encoding_full)
    print('kl divergence ################')
Ejemplo n.º 3
0
    normal_cell = genotype.normal
    reduce_cell = genotype.reduce
    normal_cell_new = [(cell[1], ops_list.index(cell[0]))
                       for cell in normal_cell]
    reduce_cell_new = [(cell[1], ops_list.index(cell[0]))
                       for cell in reduce_cell]
    genotype_new = Genotype(normal=normal_cell_new,
                            reduce=reduce_cell_new,
                            normal_concat=genotype.normal_concat,
                            reduce_concat=genotype.reduce_concat)
    return genotype_new


if __name__ == '__main__':
    # Running this code have to modify the predictor_list, search_space, trails, seq_len, gpu, load_dir, save_dir
    parser = argparse.ArgumentParser(
        description='Predictor comparison parameters!')
    parser.add_argument('--nums', type=int, default=10000)
    parser.add_argument('--seed', type=int, default=112)
    parser.add_argument(
        '--save_dir',
        type=str,
        default=
        '/home/aurora/data_disk_new/train_output_2021/darts_save_path/darts.pkl'
    )

    args = parser.parse_args()
    set_random_seed(args.seed)

    total_archs = gen_arch_wo_key_lists(args.nums, OPS, args.save_dir)
def main(args):
    file_name = 'log_%s_%d' % ('gpus', args.gpu)
    logger = setup_logger(file_name,
                          args.save_dir,
                          args.gpu,
                          log_level='DEBUG',
                          filename='%s.txt' % file_name)
    logger.info(args)
    if args.search_space == 'nasbench_101':
        with open(nas_bench_101_all_data, 'rb') as fpkl:
            all_data = pickle.load(fpkl)
    else:
        raise NotImplementedError(
            f'The search space {args.search_space} does not support now!')

    for k in range(args.trails):
        seed = random_id_int(4)
        set_random_seed(seed)
        s_results_dict = defaultdict(list)
        k_results_dict = defaultdict(list)
        logger.info(
            f'======================  Trails {k} Begin Setting Seed to {seed} ==========================='
        )
        for budget in args.search_budget:
            train_data, test_data = data.dataset_split_idx(all_data, budget)
            print(
                f'budget: {budget}, train data size: {len(train_data)}, test data size: {len(test_data)}'
            )
            for epochs in args.train_iterations:
                if args.compare_supervised == 'T':
                    logger.info(
                        f'====  predictor type: SUPERVISED, load pretrain model False, '
                        f'search budget is {budget}. Training epoch is {epochs} ===='
                    )
                    spearman_corr, kendalltau_corr, duration = predictor_retrain_compare(
                        args,
                        'SS_RL',
                        train_data,
                        test_data,
                        flag=False,
                        train_epochs=epochs,
                        logger=logger)
                    if math.isnan(spearman_corr):
                        spearman_corr = 0
                    if math.isnan(kendalltau_corr):
                        kendalltau_corr = 0
                    s_results_dict[f'supervised#{budget}#{epochs}'].append(
                        spearman_corr)
                    k_results_dict[f'supervised#{budget}#{epochs}'].append(
                        kendalltau_corr)
                for predictor_type, dir in zip(args.predictor_list,
                                               args.load_dir):
                    logger.info(
                        f'====  predictor type: {predictor_type}, load pretrain model True. '
                        f'Search budget is {budget}. Training epoch is {epochs}. '
                        f'The model save dir is {dir.split("/")[-1][:-3]}  ===='
                    )
                    spearman_corr, kendalltau_corr, duration = predictor_retrain_compare(
                        args,
                        predictor_type,
                        train_data,
                        test_data,
                        flag=True,
                        load_dir=dir,
                        train_epochs=epochs,
                        logger=logger)
                    if math.isnan(spearman_corr):
                        spearman_corr = 0
                    if math.isnan(kendalltau_corr):
                        kendalltau_corr = 0
                    s_results_dict[predictor_type + '#' + str(budget) + '#' +
                                   str(epochs)].append(spearman_corr)
                    k_results_dict[predictor_type + '#' + str(budget) + '#' +
                                   str(epochs)].append(kendalltau_corr)
        file_id = random_id(6)
        save_path = os.path.join(
            args.save_dir,
            f'{file_id}_{args.predictor_list[0]}_{args.search_space.split("_")[-1]}_{args.gpu}_{k}.pkl'
        )
        with open(save_path, 'wb') as fp:
            pickle.dump(s_results_dict, fp)
            pickle.dump(k_results_dict, fp)