def analysis():
    search_space_1 = SearchSpace1()
    search_space_1.sample(with_loose_ends=False)
    # Load NASBench
    nasbench = NasbenchWrapper('nasbench_analysis/nasbench_data/108_e/nasbench_full.tfrecord')

    test_error = []
    valid_error = []

    search_space_1 = SearchSpace1()
    search_space_1.sample_with_loose_ends()

    for i in range(10000):
        adjacency_matrix, node_list = search_space_1.sample()
        adjacency_list = adjacency_matrix.astype(np.int).tolist()
        node_list = [INPUT, *node_list, OUTPUT]
        model_spec = api.ModelSpec(matrix=adjacency_list, ops=node_list)
        nasbench.query(model_spec)

    for adjacency_matrix, ops, model_spec in search_space_1.generate_search_space_without_loose_ends():
        # Query NASBench
        data = nasbench.query(model_spec)
        for item in data:
            test_error.append(1 - item['test_accuracy'])
            valid_error.append(1 - item['validation_accuracy'])

    print('Number of architectures', len(test_error) / len(data))

    plt.figure()
    plt.title(
        'Distribution of test error in search space (no. architectures {})'.format(
            int(len(test_error) / len(data))))
    plt.hist(test_error, bins=800, density=True)
    ax = plt.gca()
    ax.set_xscale('log')
    ax.set_yscale('log')
    plt.xlabel('Test error')
    plt.grid(True, which="both", ls="-", alpha=0.5)
    plt.tight_layout()
    plt.xlim(0, 0.3)
    plt.savefig('nasbench_analysis/search_spaces/export/search_space_1/test_error_distribution.pdf', dpi=600)
    plt.show()

    plt.figure()
    plt.title('Distribution of validation error in search space (no. architectures {})'.format(
        int(len(valid_error) / len(data))))
    plt.hist(valid_error, bins=800, density=True)
    ax = plt.gca()
    ax.set_xscale('log')
    ax.set_yscale('log')
    plt.xlabel('Validation error')
    plt.grid(True, which="both", ls="-", alpha=0.5)
    plt.tight_layout()
    plt.xlim(0, 0.3)
    plt.savefig('nasbench_analysis/search_spaces/export/search_space_1/valid_error_distribution.pdf', dpi=600)
    plt.show()

    print('test_error', min(test_error), 'valid_error', min(valid_error))
示例#2
0
def eval_directory(path):
    """Evaluates all one-shot architecture methods in the directory."""
    # Read in config
    nasbench = NasbenchWrapper(
        dataset_file='/results/nasbench_only108.tfrecord')
    with open(os.path.join(path, 'config.json')) as fp:
        config = json.load(fp)
    # Accumulate all one-shot models
    one_shot_architectures = glob.glob(
        os.path.join(path, 'one_shot_architecture_*.obj'))
    # Sort them by date
    one_shot_architectures.sort(key=natural_keys)
    # Eval all of them
    test_errors = []
    valid_errors = []
    for model in one_shot_architectures:
        test, valid, _, _ = eval_one_shot_model(config=config, model=model)
        test_errors.append(test)
        valid_errors.append(valid)

    with open(os.path.join(path, 'one_shot_validation_errors.obj'),
              'wb') as fp:
        pickle.dump(valid_errors, fp)

    with open(os.path.join(path, 'one_shot_test_errors.obj'), 'wb') as fp:
        pickle.dump(test_errors, fp)
示例#3
0
def main():
    # Select the search space to search in
    if args.search_space == '1':
        search_space = SearchSpace1()
    elif args.search_space == '2':
        search_space = SearchSpace2()
    elif args.search_space == '3':
        search_space = SearchSpace3()
    else:
        raise ValueError('Unknown search space')

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    model = Network(args.init_channels,
                    CIFAR_CLASSES,
                    args.layers,
                    criterion,
                    output_weights=args.output_weights,
                    steps=search_space.num_intermediate_nodes,
                    search_space=search_space)
    model = model.cuda()
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    train_transform, valid_transform = utils._data_transforms_cifar10(args)
    train_data = dset.CIFAR10(root=args.data,
                              train=True,
                              download=True,
                              transform=train_transform)

    num_train = len(train_data)
    indices = list(range(num_train))
    split = int(np.floor(args.train_portion * num_train))

    train_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
        pin_memory=True)

    valid_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(
            indices[split:num_train]),
        pin_memory=True)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs), eta_min=args.learning_rate_min)

    architect = Architect(model, args)

    nasbench = None

    for epoch in range(args.epochs):
        scheduler.step()
        lr = scheduler.get_lr()[0]
        logging.info('epoch %d lr %e', epoch, lr)

        # Save the one shot model architecture weights for later analysis
        arch_filename = os.path.join(
            args.save, 'one_shot_architecture_{}.obj'.format(epoch))
        with open(arch_filename, 'wb') as filehandler:
            numpy_tensor_list = []
            for tensor in model.arch_parameters():
                numpy_tensor_list.append(tensor.detach().cpu().numpy())
            pickle.dump(numpy_tensor_list, filehandler)

        # Save the entire one-shot-model
        filepath = os.path.join(args.save,
                                'one_shot_model_{}.obj'.format(epoch))
        torch.save(model.state_dict(), filepath)

        logging.info('architecture')
        logging.info(numpy_tensor_list)

        # training
        train_acc, train_obj = train(train_queue, valid_queue, model,
                                     architect, criterion, optimizer, lr,
                                     epoch)
        logging.info('train_acc %f', train_acc)

        # validation
        valid_acc, valid_obj = infer(valid_queue, model, criterion)
        logging.info('valid_acc %f', valid_acc)

        utils.save(model, os.path.join(args.save, 'weights.pt'))

        logging.info('STARTING EVALUATION')
        if nasbench is None:
            nasbench = NasbenchWrapper(
                dataset_file='/nasbench_data/nasbench_only108.tfrecord')
        test, valid, runtime, params = naseval.eval_one_shot_model(
            config=args.__dict__,
            model=arch_filename,
            nasbench_results=nasbench)
        index = np.random.choice(list(range(3)))
        logging.info(
            'TEST ERROR: %.3f | VALID ERROR: %.3f | RUNTIME: %f | PARAMS: %d' %
            (test[index], valid[index], runtime[index], params[index]))

    if args.s3_bucket is not None:
        for root, dirs, files in os.walk(args.save):
            for f in files:
                if 'one_shot_model' not in f:
                    path = os.path.join(root, f)
                    upload_to_s3(path, args.s3_bucket, path)
示例#4
0
                    help='Multiplicative factor'
                    ' accross budgets.')
parser.add_argument('--space', type=int, default=1, help='NASBench space')
parser.add_argument('--seed', type=int, default=1, help='Seed')
args = parser.parse_args()

min_budget = args.min_budget
max_budget = args.max_budget
eta = args.eta

args.working_directory = os.path.join(
    args.working_directory,
    "search_space_{}/run{}-seed{}".format(args.space, args.run_id, args.seed))

nasbench = NasbenchWrapper(
    dataset_file=
    'src/nasbench_analysis/nasbench_data/108_e/nasbench_only108.tfrecord')

if args.array_id == 1:
    os.makedirs(args.working_directory, exist_ok=True)

    NS = NameServer(run_id=args.run_id,
                    nic_name='eth0',
                    working_directory=args.working_directory)
    ns_host, ns_port = NS.start()

    # BOHB is usually so cheap, that we can
    # affort to run a worker on the master node, too.
    worker = worker(min_budget=min_budget,
                    max_budget=max_budget,
                    eta=eta,
def eval_one_shot_model(config, model):
    nasbench = NasbenchWrapper(
        dataset_file=
        '/home/darts_weight_sharing_analysis/cnn/bohb/src/nasbench_analysis/nasbench_data/108_e/nasbench_only108.tfrecord'
    )
    model_list = pickle.load(open(model, 'rb'))

    alphas_mixed_op = model_list[0]
    chosen_node_ops = softmax(alphas_mixed_op, axis=-1).argmax(-1)

    node_list = [PRIMITIVES[i] for i in chosen_node_ops]
    alphas_output = model_list[1]
    alphas_inputs = model_list[2:]

    if config['search_space'] == '1':
        search_space = SearchSpace1()
        num_inputs = list(search_space.num_parents_per_node.values())[3:-1]
        parents_node_3, parents_node_4 = \
            [get_top_k(softmax(alpha, axis=1), num_input) for num_input, alpha in zip(num_inputs, alphas_inputs)]
        output_parents = get_top_k(softmax(alphas_output), num_inputs[-1])
        parents = {
            '0': [],
            '1': [0],
            '2': [0, 1],
            '3': parents_node_3,
            '4': parents_node_4,
            '5': output_parents
        }
        node_list = [INPUT, *node_list, CONV1X1, OUTPUT]

    elif config['search_space'] == '2':
        search_space = SearchSpace2()
        num_inputs = list(search_space.num_parents_per_node.values())[2:]
        parents_node_2, parents_node_3, parents_node_4 = \
            [get_top_k(softmax(alpha, axis=1), num_input) for num_input, alpha in zip(num_inputs[:-1], alphas_inputs)]
        output_parents = get_top_k(softmax(alphas_output), num_inputs[-1])
        parents = {
            '0': [],
            '1': [0],
            '2': parents_node_2,
            '3': parents_node_3,
            '4': parents_node_4,
            '5': output_parents
        }
        node_list = [INPUT, *node_list, CONV1X1, OUTPUT]

    elif config['search_space'] == '3':
        search_space = SearchSpace3()
        num_inputs = list(search_space.num_parents_per_node.values())[2:]
        parents_node_2, parents_node_3, parents_node_4, parents_node_5 = \
            [get_top_k(softmax(alpha, axis=1), num_input) for num_input, alpha in zip(num_inputs[:-1], alphas_inputs)]
        output_parents = get_top_k(softmax(alphas_output), num_inputs[-1])
        parents = {
            '0': [],
            '1': [0],
            '2': parents_node_2,
            '3': parents_node_3,
            '4': parents_node_4,
            '5': parents_node_5,
            '6': output_parents
        }
        node_list = [INPUT, *node_list, OUTPUT]

    else:
        raise ValueError('Unknown search space')

    adjacency_matrix = search_space.create_nasbench_adjacency_matrix(parents)
    # Convert the adjacency matrix in format for nasbench
    adjacency_list = adjacency_matrix.astype(np.int).tolist()
    model_spec = api.ModelSpec(matrix=adjacency_list, ops=node_list)
    # Query nasbench
    data = nasbench.query(model_spec)
    valid_error, test_error, runtime, params = [], [], [], []
    for item in data:
        test_error.append(1 - item['test_accuracy'])
        valid_error.append(1 - item['validation_accuracy'])
        runtime.append(item['training_time'])
        params.append(item['trainable_parameters'])
    return test_error, valid_error, runtime, params
示例#6
0
    # Eval all models on nasbench
    test_errors = []
    valid_errors = []
    for model in random_ws_archs:
        test, valid = eval_random_ws_model(config=config, model=model)
        test_errors.append(test)
        valid_errors.append(valid)

    with open(os.path.join(path, 'one_shot_validation_errors.obj'),
              'wb') as fp:
        pickle.dump(valid_errors, fp)

    with open(os.path.join(path, 'one_shot_test_errors.obj'), 'wb') as fp:
        pickle.dump(test_errors, fp)


def main():
    for directory in get_directory_list("experiments/enas/"):
        try:
            eval_directory(directory)
        except Exception as e:
            print('error', e, directory)


if __name__ == '__main__':
    nasbench = NasbenchWrapper(
        dataset_file=
        '/home/ANONYMOUS/projects/darts_weight_sharing_analysis/nasbench_analysis/nasbench_data/108_e/nasbench_full.tfrecord'
    )
    main()
def analysis():
    search_space_1 = SearchSpace(num_parents_per_node={
        '0': 0,
        '1': 1,
        '2': 2,
        '3': 2,
        '4': 2,
        '5': 2
    },
                                 search_space_number=1,
                                 num_intermediate_nodes=4)

    # Load NASBench
    nasbench = NasbenchWrapper(
        '/home/siemsj/projects/darts_weight_sharing_analysis/nasbench_full.tfrecord'
    )

    test_error = []
    valid_error = []

    search_space_creator = search_space_1.create_search_space(
        with_loose_ends=False, upscale=False)
    for adjacency_matrix, ops, model_spec in search_space_creator:
        # Query NASBench
        data = nasbench.query(model_spec)
        for item in data:
            test_error.append(1 - item['test_accuracy'])
            valid_error.append(1 - item['validation_accuracy'])

    print('Number of architectures', len(test_error) / len(data))

    plt.figure()
    plt.title(
        'Distribution of test error in search space (no. architectures {})'.
        format(int(len(test_error) / len(data))))
    plt.hist(test_error, bins=800, density=True)
    ax = plt.gca()
    ax.set_xscale('log')
    ax.set_yscale('log')
    plt.xlabel('Test error')
    plt.grid(True, which="both", ls="-", alpha=0.5)
    plt.tight_layout()
    plt.xlim(0, 0.3)
    plt.savefig(
        'nasbench_analysis/search_spaces/export/search_space_1/test_error_distribution.pdf',
        dpi=600)
    plt.show()

    plt.figure()
    plt.title(
        'Distribution of validation error in search space (no. architectures {})'
        .format(int(len(valid_error) / len(data))))
    plt.hist(valid_error, bins=800, density=True)
    ax = plt.gca()
    ax.set_xscale('log')
    ax.set_yscale('log')
    plt.xlabel('Validation error')
    plt.grid(True, which="both", ls="-", alpha=0.5)
    plt.tight_layout()
    plt.xlim(0, 0.3)
    plt.savefig(
        'nasbench_analysis/search_spaces/export/search_space_1/valid_error_distribution.pdf',
        dpi=600)
    plt.show()

    print('test_error', min(test_error), 'valid_error', min(valid_error))
示例#8
0
    # Eval all of them
    test_errors = []
    valid_errors = []
    for model in one_shot_architectures:
        test, valid, _, _ = eval_one_shot_model(config=config, model=model)
        test_errors.append(test)
        valid_errors.append(valid)

    with open(os.path.join(path, 'one_shot_validation_errors.obj'),
              'wb') as fp:
        pickle.dump(valid_errors, fp)

    with open(os.path.join(path, 'one_shot_test_errors.obj'), 'wb') as fp:
        pickle.dump(test_errors, fp)


def main():
    directories = get_directory_list("experiments/inductive_bias/")
    directories.sort(key=natural_keys)
    for directory in directories:
        try:
            eval_directory(directory)
        except Exception as e:
            print('error', e, directory)


if __name__ == '__main__':
    nasbench = NasbenchWrapper(
        dataset_file='/results/nasbench_only108.tfrecord')
    main()
    valid_errors = []
    for model in one_shot_architectures:
        test, valid, _, _ = eval_one_shot_model(config=config,
                                                model=model,
                                                nasbench=nasbench)
        test_errors.append(test)
        valid_errors.append(valid)

    with open(os.path.join(path, 'one_shot_validation_errors.obj'),
              'wb') as fp:
        pickle.dump(valid_errors, fp)

    with open(os.path.join(path, 'one_shot_test_errors.obj'), 'wb') as fp:
        pickle.dump(test_errors, fp)


def main(nasbench):
    directories = get_directory_list("experiments_2/random_ws/")
    directories.sort(key=natural_keys)
    for directory in directories:
        try:
            eval_directory(directory, nasbench)
        except Exception as e:
            print('error', e, directory)


if __name__ == '__main__':
    nasbench = NasbenchWrapper(
        dataset_file='/home/ydingau/data/nasbench_only108.tfrecord')
    main(nasbench=nasbench)
    """Evaluates all one-shot architecture methods in the directory."""
    # Read in config
    with open(os.path.join(path, 'config.json')) as fp:
        config = json.load(fp)
    # Accumulate all one-shot models
    # random_ws_archs = glob.glob(os.path.join(path, 'full_val_architecture_epoch_*.obj'))
    random_ws_archs = glob.glob(os.path.join(path, file))

    # Sort them by date
    random_ws_archs.sort(key=natural_keys)
    # Eval all models on nasbench
    test_errors = []
    valid_errors = []

    for model in random_ws_archs:
        result = eval_random_ws_model(config=config, model=model)

    with open(os.path.join(path, 'local_search_result.obj'), 'wb') as fp:
        pickle.dump(result, fp)


def main():
    eval_directory('experiments/ft0_nov29/', file='local_search_300.obj')


if __name__ == '__main__':
    nasbench = NasbenchWrapper(
        dataset_file=
        '/home/ubuntu/nas_benchmark_datasets/nasbench_full.tfrecord')
    main()
示例#11
0

        print("arch parameters:")
        print(arch_parameters)
        print("model spec")
        print(model_spec.matrix)
        print(model_spec.ops)
        # print('adjacency_matrix_ss:')
        # print(adjacency_matrix_ss)
        # print('ops_ss:')
        # print(ops_ss)
        print()
    print(model.model)

def main():
    understanding(args.model_path)
    # Load NASBench
    # eval_directory_on_epoch(args.model_path, args.epoch)


parser = argparse.ArgumentParser("correlation_analysis")
parser.add_argument('--data', type=str, default='../data', help='location of the darts corpus')
parser.add_argument('--model_path', default="/Users/liqi17thu/Desktop/darts/search_space_1/search-baseline-20200623-134823-0-1",
                    help='Path to where the models are stored.')
parser.add_argument('--epoch', type=int, help='Epoch', default=108)
args = parser.parse_args()

if __name__ == '__main__':
    nasbench = NasbenchWrapper('/Users/liqi17thu/Documents/GitHub/nasbench/nasbench_full.tfrecord')
    main()
示例#12
0
              'wb') as fp:
        pickle.dump(nb_valid_errors, fp)

    with open(os.path.join(path, 'one_shot_test_errors_{}.obj'.format(epoch)),
              'wb') as fp:
        pickle.dump(one_shot_test_errors, fp)


def main():
    # Load NASBench
    eval_directory_on_epoch(args.model_path, args.epoch)


parser = argparse.ArgumentParser("correlation_analysis")
parser.add_argument('--data',
                    type=str,
                    default='../data',
                    help='location of the darts corpus')
parser.add_argument(
    '--model_path',
    default=
    "experiments/darts/search_space_1/search-baseline-20190821-171946-0-1",
    help='Path to where the models are stored.')
parser.add_argument('--epoch', type=int, help='Epoch')
args = parser.parse_args()

if __name__ == '__main__':
    nasbench = NasbenchWrapper(
        'nasbench_analysis/nasbench_data/108_e/nasbench_full.tfrecord')
    main()
示例#13
0
import os
import pickle

import numpy as np
import torch
import torch.nn.functional as F
from nasbench import api

from nasbench_analysis.search_spaces.search_space_1 import SearchSpace1
from nasbench_analysis.search_spaces.search_space_2 import SearchSpace2
from nasbench_analysis.search_spaces.search_space_3 import SearchSpace3
from nasbench_analysis.utils import get_top_k, INPUT, OUTPUT, CONV1X1, NasbenchWrapper, natural_keys
from optimizers.darts.genotypes import PRIMITIVES


nasbench = NasbenchWrapper(
    dataset_file='/nfs/data/xiangning/data/nasbench_only108.tfrecord')


def softmax(weights, axis=-1):
    return F.softmax(torch.Tensor(weights), axis).data.cpu().numpy()


def get_directory_list(path):
    """Find directory containing config.json files"""
    directory_list = []
    # return nothing if path is a file
    if os.path.isfile(path):
        return []
    # add dir to directorylist if it contains .json files
    if len([f for f in os.listdir(path) if f == 'config.json']) > 0:
        directory_list.append(path)
    with open(os.path.join(path, 'correlation_{}.obj'.format(epoch)), 'wb') as fp:
        pickle.dump(correlations, fp)
        print(os.path.join(path, 'correlation_{}.obj'.format(epoch)))

    with open(os.path.join(path, 'nb_test_errors_{}.obj'.format(epoch)), 'wb') as fp:
        pickle.dump(nb_test_errors, fp)

    with open(os.path.join(path, 'nb_valid_errors_{}.obj'.format(epoch)), 'wb') as fp:
        pickle.dump(nb_valid_errors, fp)

    with open(os.path.join(path, 'one_shot_test_errors_{}.obj'.format(epoch)), 'wb') as fp:
        pickle.dump(one_shot_test_errors, fp)


def main():
    # Load NASBench
    eval_directory_on_epoch(args.model_path, args.epoch)


parser = argparse.ArgumentParser("correlation_analysis")
parser.add_argument('--data', type=str, default='../data', help='location of the darts corpus')
parser.add_argument('--model_path', default="experiments/darts/search_space_1/search-baseline-20190821-171946-0-1",
                    help='Path to where the models are stored.')
parser.add_argument('--epoch', type=int, help='Epoch')
args = parser.parse_args()

if __name__ == '__main__':
    nasbench = NasbenchWrapper('/home/ANONYMOUS/projects/darts_weight_sharing_analysis/nasbench_full.tfrecord')
    main()
    random_ws_archs = glob.glob(
        os.path.join(path, 'full_val_architecture_epoch_*.obj'))
    # Sort them by date
    random_ws_archs.sort(key=natural_keys)
    # Eval all models on nasbench
    test_errors = []
    valid_errors = []
    for model in random_ws_archs:
        test, valid = eval_random_ws_model(config=config, model=model)
        test_errors.append(test)
        valid_errors.append(valid)

    with open(os.path.join(path, 'one_shot_validation_errors.obj'),
              'wb') as fp:
        pickle.dump(valid_errors, fp)

    with open(os.path.join(path, 'one_shot_test_errors.obj'), 'wb') as fp:
        pickle.dump(test_errors, fp)


def main():
    for directory in get_directory_list("experiments/random_ws/"):
        eval_directory(directory)


if __name__ == '__main__':
    nasbench = NasbenchWrapper(
        dataset_file=
        'nasbench_analysis/nasbench_data/108_e/nasbench_full.tfrecord')
    main()