Example #1
0
def get_nasbench201_api(dataset=None):
    """
    Load the original nasbench201 dataset (which does not include full LC info)
    TODO: this is a subset of the full LC datasets, so it is possible to get rid of this dataset.
    """
    with open(os.path.join(get_project_root(), 'data', 'nb201_all.pickle'),
              'rb') as f:
        nb201_data = pickle.load(f)
    """
    Now load the full LC info. These files are large, so we only load one for the specific dataset.
    """
    if dataset == 'cifar10':
        with open(
                os.path.join(get_project_root(), 'data',
                             'nb201_cifar10_full_training.pickle'), 'rb') as f:
            full_lc_data = pickle.load(f)

    elif dataset == 'cifar100':
        with open(
                os.path.join(get_project_root(), 'data',
                             'nb201_cifar100_full_training.pickle'),
                'rb') as f:
            full_lc_data = pickle.load(f)

    elif dataset == 'ImageNet16-120':
        with open(
                os.path.join(get_project_root(), 'data',
                             'nb201_ImageNet16_full_training.pickle'),
                'rb') as f:
            full_lc_data = pickle.load(f)

    return {'raw_data': nb201_data, 'full_lc_data': full_lc_data}
Example #2
0
def get_nlp_api(dataset=None):
    """
    Load the NAS-Bench-NLP data
    """
    with open(os.path.join(get_project_root(), 'data', 'nb_nlp.pickle'), 'rb') as f:
        nlp_data = pickle.load(f)
    nlp_arches = list(nlp_data.keys())
    return {'nlp_data':nlp_data, 'nlp_arches':nlp_arches}
Example #3
0
def get_nasbench201_api(dataset=None):
    """
    Load the NAS-Bench-201 data
    """
    if dataset == 'cifar10':
        with open(os.path.join(get_project_root(), 'data', 'nb201_cifar10_full_training.pickle'), 'rb') as f:
            data = pickle.load(f)

    elif dataset == 'cifar100':
        with open(os.path.join(get_project_root(), 'data', 'nb201_cifar100_full_training.pickle'), 'rb') as f:
            data = pickle.load(f)

    elif dataset == 'ImageNet16-120':
        with open(os.path.join(get_project_root(), 'data', 'nb201_ImageNet16_full_training.pickle'), 'rb') as f:
            data = pickle.load(f)

    return {'nb201_data':data}
Example #4
0
def get_darts_api(dataset=None):
    """
    Load the nb301 training data (which contains full learning curves) and the nb301 models
    """
    import nasbench301
    data_folder = os.path.join(get_project_root(), 'data/')
    with open(os.path.join(data_folder, 'nb301_full_training.pickle'), 'rb') as f:
        nb301_data = pickle.load(f)
        nb301_arches = list(nb301_data.keys())

    performance_model = nasbench301.load_ensemble(os.path.join(data_folder + 'nb301_models/xgb_v1.0'))
    runtime_model = nasbench301.load_ensemble(os.path.join(data_folder + 'nb301_models/lgb_runtime_v1.0'))
    nb301_model = [performance_model, runtime_model]
    return {'nb301_data': nb301_data, 'nb301_arches':nb301_arches, 'nb301_model':nb301_model}
Example #5
0
    def get_configspace(path_to_configspace_obj=os.path.join(
        get_project_root(), "search_spaces/darts/configspace.json")):
        """
        Returns the ConfigSpace object for the search space

        Args:
            path_to_configspace_obj: path to ConfigSpace json encoding

        Returns:
            ConfigSpace.ConfigutationSpace: a ConfigSpace object
        """
        with open(path_to_configspace_obj, 'r') as fh:
            json_string = fh.read()
            config_space = config_space_json_r_w.read(json_string)
        return config_space
 def __init__(self, config, batch_size=64, method_type='jacov'):
     torch.backends.cudnn.deterministic = True
     torch.backends.cudnn.benchmark = False
     self.batch_size = batch_size
     self.method_type = method_type
     self.device = torch.device(
         "cuda:0" if torch.cuda.is_available() else "cpu")
     config.data = "{}/data".format(get_project_root())
     self.config = config
     if method_type == 'jacov':
         self.num_classes = 1
     else:
         num_classes_dic = {
             'cifar10': 10,
             'cifar100': 100,
             'ImageNet16-120': 120
         }
         self.num_classes = num_classes_dic[self.config.dataset]
Example #7
0
    def __init__(self, config, batch_size=64, method_type='jacov'):
        # available zero-cost method types: 'jacov', 'snip', 'synflow', 'grad_norm', 'fisher', 'grasp'
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

        self.batch_size = batch_size
        self.dataload = 'random'
        self.num_imgs_or_batches = 1
        self.method_type = method_type
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        config.data = "{}/data".format(get_project_root())
        self.config = config
        num_classes_dic = {
            'cifar10': 10,
            'cifar100': 100,
            'ImageNet16-120': 120
        }
        self.num_classes = None
        if self.config.dataset in num_classes_dic:
            self.num_classes = num_classes_dic[self.config.dataset]
import unittest
import logging
import torch
import os

from naslib.search_spaces import HierarchicalSearchSpace
from naslib.optimizers import DARTSOptimizer, GDASOptimizer
from naslib.utils import utils, setup_logger

logger = setup_logger(
    os.path.join(utils.get_project_root().parent, "tmp", "tests.log"))
logger.handlers[0].setLevel(logging.FATAL)

config = utils.AttrDict()
config.dataset = 'cifar10'
config.search = utils.AttrDict()
config.search.grad_clip = None
config.search.learning_rate = 0.01
config.search.momentum = 0.1
config.search.weight_decay = 0.1
config.search.arch_learning_rate = 0.01
config.search.arch_weight_decay = 0.1
config.search.tau_max = 10
config.search.tau_min = 1
config.search.epochs = 2

data_train = (torch.ones([2, 3, 32, 32]), torch.ones([2]).long())
data_val = (torch.ones([2, 3, 32, 32]), torch.ones([2]).long())

if torch.cuda.is_available():
    data_train = tuple(x.cuda() for x in data_train)
Example #9
0
def get_nasbench101_api(dataset=None):
    # load nasbench101
    from nasbench import api
    nb101_data = api.NASBench(
        os.path.join(get_project_root(), 'data', 'nasbench_only108.tfrecord'))
    return {'api': api, 'nb101_data': nb101_data}
Example #10
0
import os
import pickle
import torch.nn as nn

from naslib.search_spaces.core import primitives as ops
from naslib.search_spaces.core.graph import Graph, EdgeData
from naslib.search_spaces.core.primitives import AbstractPrimitive
from naslib.search_spaces.core.query_metrics import Metric

from naslib.utils.utils import get_project_root

from .primitives import ResNetBasicblock

# load the nasbench201 data
with open(os.path.join(get_project_root(), 'data', 'nb201_all.pickle'), 'rb') as f:
    nb201_data = pickle.load(f)


class NasBench201SearchSpace(Graph):
    """
    Implementation of the nasbench 201 search space.
    It also has an interface to the tabular benchmark of nasbench 201.
    """

    OPTIMIZER_SCOPE = [
        "stage_1",
        "stage_2",
        "stage_3",
    ]

    QUERYABLE = True
Example #11
0
import numpy as np

from naslib.search_spaces.core import primitives as ops
from naslib.search_spaces.core.graph import Graph, EdgeData
from naslib.search_spaces.core.primitives import AbstractPrimitive
from naslib.search_spaces.core.query_metrics import Metric

from naslib.utils.utils import get_project_root

from .primitives import ReLUConvBN


# load the nasbench101 data -- requires TF 1.x
from nasbench import api

nb101_datadir = os.path.join(get_project_root(), 'data', 'nasbench_only108.tfrecord')
nasbench = api.NASBench(nb101_datadir)

# data = nasbench.query(cell)

class NasBench101SearchSpace(Graph):
    """
    Contains the interface to the tabular benchmark of nasbench 101.
    """

    OPTIMIZER_SCOPE = [
        "stack_1",
        "stack_2",
        "stack_3",
    ]