def run_experiments(args, save_dir):

    trials = args.trials
    out_file = args.output_filename
    metann_params = meta_neuralnet_params(args.search_space)
    algorithm_params = algo_params(args.algo_params)
    num_algos = len(algorithm_params)
    logging.info(algorithm_params)

    for i in range(trials):
        results = []
        walltimes = []

        for j in range(num_algos):
            # run NAS algorithm
            print('\n* Running algorithm: {}'.format(algorithm_params[j]))
            starttime = time.time()
            algo_result = run_nas_algorithm(algorithm_params[j], metann_params)
            algo_result = np.round(algo_result, 5)

            # add walltime and results
            walltimes.append(time.time()-starttime)
            results.append(algo_result)

        # print and pickle results
        filename = os.path.join(save_dir, '{}_{}.pkl'.format(out_file, i))
        print('\n* Trial summary: (params, results, walltimes)')
        print(algorithm_params)
        print(metann_params)
        print(results)
        print(walltimes)
        print('\n* Saving to file {}'.format(filename))
        with open(filename, 'wb') as f:
            pickle.dump([algorithm_params, metann_params, results, walltimes], f)
            f.close()
Пример #2
0
def run_experiments(args, save_dir):

    os.environ['search_space'] = args.search_space

    from nas_algorithms import run_nas_algorithm
    from data import Data

    trials = args.trials
    out_file = args.output_filename
    save_specs = args.save_specs
    metann_params = meta_neuralnet_params(args.search_space)
    algorithm_params = algo_params(args.algo_params)
    num_algos = len(algorithm_params)
    logging.info(algorithm_params)

    # set up search space
    mp = copy.deepcopy(metann_params)
    ss = mp.pop('search_space')
    dataset = mp.pop('dataset')
    search_space = Data(ss, dataset=dataset)

    for i in range(trials):
        results = []
        walltimes = []
        run_data = []

        for j in range(num_algos):
            # run NAS algorithm
            print('\n* Running algorithm: {}'.format(algorithm_params[j]))
            starttime = time.time()
            algo_result, run_datum = run_nas_algorithm(algorithm_params[j],
                                                       search_space, mp)
            algo_result = np.round(algo_result, 5)

            # remove unnecessary dict entries that take up space
            for d in run_datum:
                if not save_specs:
                    d.pop('spec')
                for key in ['encoding', 'adjacency', 'path', 'dist_to_min']:
                    if key in d:
                        d.pop(key)

            # add walltime, results, run_data
            walltimes.append(time.time() - starttime)
            results.append(algo_result)
            run_data.append(run_datum)

        # print and pickle results
        filename = os.path.join(save_dir, '{}_{}.pkl'.format(out_file, i))
        print('\n* Trial summary: (params, results, walltimes)')
        print(algorithm_params)
        print(metann_params)
        print(results)
        print(walltimes)
        print('\n* Saving to file {}'.format(filename))
        with open(filename, 'wb') as f:
            pickle.dump([
                algorithm_params, metann_params, results, walltimes, run_data
            ], f)
            f.close()
Пример #3
0
def run_experiments(args, save_dir):

    os.environ['search_space'] = args.search_space

    from nas_algorithms import run_nas_algorithm
    from data import Data

    trials = args.trials

    save_specs = args.save_specs
    metann_params = meta_neuralnet_params(args.search_space)
    algorithm_params = algo_params(args.algo_params)
    num_algos = len(algorithm_params)

    # set up search space
    mp = copy.deepcopy(metann_params)
    ss = mp.pop('search_space')
    dataset = mp.pop('dataset')
    search_space = Data(ss, dataset=dataset)

    for i in range(num_algos):
        results = {}
        #walltimes = []
        #run_data = []
        alg = algorithm_params[i]
        logging.info('[{}/{}] Running algorithm: {}'.format(
            i + 1, num_algos, alg))
        if 'encoding_type' in alg:
            filename = os.path.join(
                save_dir, '{}_{}_{}_{}-{}.json'.format(alg['algo_name'],
                                                       alg['encoding_type'],
                                                       alg['total_queries'],
                                                       args.save_type, trials))
            tmpname = os.path.join(
                save_dir,
                '{}_{}_{}_{}-{}.json.tmp'.format(alg['algo_name'],
                                                 alg['encoding_type'],
                                                 alg['total_queries'],
                                                 args.save_type, trials))
        else:
            filename = os.path.join(
                save_dir, '{}_{}_{}-{}.json'.format(alg['algo_name'],
                                                    alg['total_queries'],
                                                    args.save_type, trials))
            tmpname = os.path.join(
                save_dir,
                '{}_{}_{}-{}.json.tmp'.format(alg['algo_name'],
                                              alg['total_queries'],
                                              args.save_type, trials))
        s_j = 0
        # TODO:load previous results
        if os.path.exists(tmpname):
            with open(tmpname, 'r') as json_file:
                results = json.load(json_file)
            s_j = max([int(k) for k in results.keys()])
            logging.info("Experiment will be resumed from #{}".format(s_j))

        if os.path.exists(filename):
            logging.info(
                "Experiment already saved! No more trials required for {}".
                format(alg))
        else:
            for j in range(s_j, trials):
                # run NAS algorithm
                result = {}
                result['error'] = []
                result['exec_time'] = []
                result['opt_time'] = []
                result['train_epoch'] = []

                starttime = time.time()
                algo_result, run_datum = run_nas_algorithm(
                    alg, search_space, mp)
                #algo_result = np.round(algo_result, 5)

                # remove unnecessary dict entries that take up space
                for d in run_datum:
                    if args.save_type == 'valid':
                        result['error'].append(d['val_loss'] / 100.0)
                    elif args.save_type == 'test':
                        result['error'].append(d['test_loss'] / 100.0)

                    result['opt_time'].append(d['opt_time'])
                    result['exec_time'].append(d['training_time'])
                    result['train_epoch'].append(d['epochs'])

                    if not save_specs:
                        d.pop('spec')
                    for key in [
                            'encoding', 'adjacency', 'path', 'dist_to_min'
                    ]:
                        if key in d:
                            d.pop(key)

                results[str(j)] = result
                # saving temporary JSON result
                with open(tmpname, 'w') as json_file:
                    json_file.write(json.dumps(results))

                walltime = time.time() - starttime
                logging.info("{} - trial #{} takes {:.1f} sec".format(
                    alg, j, walltime))

            # saving final JSON result
            with open(filename, 'w') as json_file:
                json_file.write(json.dumps(results))

        if os.path.exists(tmpname):
            os.remove(tmpname)