def meta_neural_net_experiment_pipeline2(samples):
    search_space = build_datasets('nasbench_data_distribution')
    test_data_total = search_space.generate_random_dataset(
        num=samples, allow_isomorphisms=False)
    encoding_2 = path_encoding_distribution(test_data_total)
    print(f'The number of samples is {len(test_data_total)}')
    return encoding_2
Пример #2
0
def main(args):
    file_name = 'log_%s_%d' % ('gpus', args.gpu)
    logger = setup_logger(file_name,
                          args.save_dir,
                          args.gpu,
                          log_level='DEBUG',
                          filename='%s.txt' % file_name)
    logger.info(args)
    if args.search_space == 'darts':
        with open(args.darts_file_path, 'rb') as f:
            if args.darts_training_nums:
                all_data = pickle.load(f)[:args.darts_training_nums]
            else:
                all_data = pickle.load(f)
    else:
        nasbench_datas = data.build_datasets(args)
        all_data = data.dataset_all(args, nasbench_datas)
    for predictor in args.predictor_list:
        logger.info(
            f'==================  predictor type: {predictor}  ======================'
        )
        predictor_unsupervised(args,
                               predictor,
                               all_data,
                               train_epochs=args.epochs,
                               logger=logger)
def ansyc_multiple_process_train(args, save_dir):
    q = Queue(10)
    data_lists = [build_datasets(args) for _ in range(args.gpus)]

    p_producer = Process(target=data_producers, args=(args, q))
    p_consumers = [
        Process(target=data_consumers,
                args=(args, q, save_dir, i, data_lists[i]))
        for i in range(args.gpus)
    ]

    p_producer.start()
    for p in p_consumers:
        p.start()

    p_producer.join()
    for p in p_consumers:
        p.join()
def ansyc_multiple_process_train(args, save_dir):
    q = Queue(10)
    metann_params = meta_neuralnet_params(args.search_space)
    data_lists = [
        build_datasets(metann_params['search_space']) for _ in range(args.gpus)
    ]

    p_producer = Process(target=data_producers, args=(args, q))
    p_consumers = [
        Process(target=data_consumers,
                args=(args, q, save_dir, i, data_lists[i]))
        for i in range(args.gpus)
    ]

    p_producer.start()
    for p in p_consumers:
        p.start()

    p_producer.join()
    for p in p_consumers:
        p.join()
Пример #5
0
                        choices=['best', 'last'],
                        help='name of output files')
    parser.add_argument('--filter_none',
                        type=str,
                        default='y',
                        choices=['y', 'n'],
                        help='name of output files')
    parser.add_argument(
        '--save_dir',
        type=str,
        default=
        '/home/albert_wei/fdisk_a/train_output_2021/npenas/search_space_analysis/',
        help='output directory')
    args = parser.parse_args()

    ss = build_datasets(args.search_space, args.dataset,
                        args.nasbench_nlp_type, args.filter_none)

    if args.search_space == 'nasbench_case2':
        save_dir = os.path.join(args.save_dir, f'nasbench_101.pkl')
        ops = ops_nasbench_101
    elif args.search_space == 'nasbench_201':
        save_dir = os.path.join(args.save_dir,
                                f'{args.search_space}_{args.dataset}.pkl')
        ops = ops_nasbench_201
    elif args.search_space == 'nasbench_nlp':
        save_dir = os.path.join(
            args.save_dir, f'{args.search_space}_{args.nasbench_nlp_type}.pkl')
        ops = ops_nasbench_nlp
    elif args.search_space == 'nasbench_asr':
        save_dir = os.path.join(args.save_dir,
                                f'{args.search_space}_{args.filter_none}.pkl')
Пример #6
0

def generate_nasbench_101_bench_keys_vals(nasbench_data):
    save_path = os.path.join(nas_bench_101_base_path, 'nasbench_archs.pkl')
    keys = nasbench_data.total_keys
    archs = nasbench_data.total_archs
    with open(save_path, 'wb') as fw:
        pickle.dump(keys, fw)
        pickle.dump(archs, fw)


def generate_nasbench_101_all_datas(nasbench_data, args):
    all_data = data.dataset_all(args, nasbench_data)
    save_path = os.path.join(nas_bench_101_base_path, 'all_data_new.pkl')
    with open(save_path, 'wb') as fb:
        pickle.dump(all_data, fb)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Args for NASBench_101 init!')
    #  unsupervised_ged: SS_RL
    parser.add_argument('--search_space', type=str, default='nasbench_101',
                        choices=['nasbench_101'],
                        help='The search space.')
    args = parser.parse_args()
    args.seq_len = 120
    nasbench_datasets = data.build_datasets(args)
    generate_nasbench_101_bench_keys_vals(nasbench_datasets)
    generate_nasbench_101_all_datas(nasbench_datasets, args)

def meta_neural_net_experiment_full():
    search_space = build_datasets('nasbench_data_distribution')
    total_encodings = search_space.get_all_path_encooding()
    distributions = path_encoding_dict_distribution(total_encodings)
    print(f'The number of samples is {len(total_encodings)}')
    return distributions
Пример #8
0
def main(args):
    file_name = 'log_%s_%d' % ('gpus', args.gpu)
    logger = setup_logger(file_name,
                          args.save_dir,
                          args.gpu,
                          log_level='DEBUG',
                          filename='%s.txt' % file_name)
    logger.info(args)
    if args.search_space == 'nasbench_101':
        with open(nas_bench_101_all_data, 'rb') as fpkl:
            all_data = pickle.load(fpkl)
    elif args.search_space == 'nasbench_201':
        nasbench_datas = data.build_datasets(args)
        all_data = data.dataset_all(args, nasbench_datas)
    elif args.search_space == 'darts':
        with open(darts_converted_with_label, 'rb') as fb:
            all_data = pickle.load(fb)
    else:
        raise NotImplementedError(
            f'The search space {args.search_space} does not support now!')

    for k in range(args.trails):
        seed = random_id_int(4)
        set_random_seed(seed)
        s_results_dict = defaultdict(list)
        k_results_dict = defaultdict(list)
        duration_dict = defaultdict(list)
        logger.info(
            f'======================  Trails {k} Begin Setting Seed to {seed} ==========================='
        )
        for budget in args.search_budget:
            train_data, test_data = data.dataset_split_idx_predictive_comparison(
                all_data, budget)
            print(
                f'budget: {budget}, train data size: {len(train_data)}, test data size: {len(test_data)}'
            )
            if args.compare_supervised == 'T':
                logger.info(
                    f'====  predictor type: SUPERVISED, load pretrain model False, '
                    f'search budget is {budget}. Training epoch is {args.epochs} ===='
                )
                spearman_corr, kendalltau_corr, duration = predictor_comparision(
                    args,
                    'SS_RL',
                    train_data,
                    test_data,
                    flag=False,
                    train_epochs=args.epochs,
                    logger=logger)
                if math.isnan(spearman_corr):
                    spearman_corr = 0
                if math.isnan(kendalltau_corr):
                    kendalltau_corr = 0
                s_results_dict[f'supervised#{budget}#{args.epochs}'].append(
                    spearman_corr)
                k_results_dict[f'supervised#{budget}#{args.epochs}'].append(
                    kendalltau_corr)
                duration_dict[f'supervised#{budget}#{args.epochs}'].append(
                    duration)
            for predictor_type, dir in zip(args.predictor_list, args.load_dir):
                logger.info(
                    f'====  predictor type: {predictor_type}, load pretrain model True. '
                    f'Search budget is {budget}. Training epoch is {args.epochs}. '
                    f'The model save dir is {dir.split("/")[-1][:-3]}  ====')
                spearman_corr, kendalltau_corr, duration = predictor_comparision(
                    args,
                    predictor_type,
                    train_data,
                    test_data,
                    flag=True,
                    load_dir=dir,
                    train_epochs=args.epochs,
                    logger=logger)
                if math.isnan(spearman_corr):
                    spearman_corr = 0
                if math.isnan(kendalltau_corr):
                    kendalltau_corr = 0
                s_results_dict[predictor_type + '#' + str(budget) + '#' +
                               str(args.epochs)].append(spearman_corr)
                k_results_dict[predictor_type + '#' + str(budget) + '#' +
                               str(args.epochs)].append(kendalltau_corr)
                duration_dict[predictor_type + '#' + str(budget) + '#' +
                              str(args.epochs)].append(duration)
        file_id = random_id(6)
        save_path = os.path.join(
            args.save_dir,
            f'{file_id}_{args.predictor_list[0]}_{args.search_space.split("_")[-1]}_{args.gpu}_{k}.pkl'
        )
        with open(save_path, 'wb') as fp:
            pickle.dump(s_results_dict, fp)
            pickle.dump(k_results_dict, fp)
            pickle.dump(duration_dict, fp)
Пример #9
0
                        default=0.005,
                        help='Loss used to train architecture.')
    parser.add_argument(
        '--save_path',
        type=str,
        default=
        '/home/albert_wei/Disk_A/train_output_npenas/prediction_compare/prediction_compare.pkl',
        help='Loss used to train architecture.')

    args = parser.parse_args()

    algos_dict = dict()
    total_algos = [
        'bananas_true', 'bananas_false', 'neural_predictor_uncertainty',
        'neural_predictor'
    ]
    for a in total_algos:
        algos_dict[a] = []
    # ns = [50, 100, 150]
    ns = [20]
    set_random_seed(args.seed)
    search_space = build_datasets(args.search_space)
    start = time.time()
    for i in range(args.trials):
        meta_neural_net_experiment(i, args.lr, algos_dict, ns, search_space)
        print(algos_dict)
    duration = time.time() - start
    print(duration)
    with open(args.save_path, 'wb') as f:
        pickle.dump(algos_dict, f)