Esempio n. 1
0
def train(train_queue, net, criterion, optimizer, params):
    net.train()
    train_loss = 0
    correct = 0
    total = 0

    for step, (inputs, targets) in enumerate(train_queue):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs, outputs_aux = net(inputs)
        if config_dict()['problem'] == 'regression':
            targets = targets.float()
        loss = criterion(outputs, targets)

        if params['auxiliary']:
            loss_aux = criterion(outputs_aux, targets)
            loss += params['auxiliary_weight'] * loss_aux

        loss.backward()
        nn.utils.clip_grad_norm_(net.parameters(), params['grad_clip'])
        optimizer.step()

        if config_dict()['problem'] == 'classification':
            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
        else:
            train_loss -= loss.item()
            total += targets.size(0)

    if config_dict()['problem'] == 'classification':
        return 100.*correct/total, train_loss/total
    else:
        return train_loss / total, train_loss / total
Esempio n. 2
0
def infer(valid_queue, net, criterion):
    net.eval()
    test_loss = 0
    correct = 0
    total = 0

    with torch.no_grad():
        for step, (inputs, targets) in enumerate(valid_queue):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs, _ = net(inputs)
            if config_dict()['problem'] == 'regression':
                targets = targets.float()
            loss = criterion(outputs, targets)

            if config_dict()['problem'] == 'classification':
                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()
            else:
                test_loss -= loss.item()
                total += targets.size(0)

            # if step % args.report_freq == 0:
            #     logging.info('valid %03d %e %f', step, test_loss/total, 100.*correct/total)
    if config_dict()['problem'] == 'classification':
        acc = 100. * correct / total
        return acc, test_loss / total
    else:
        return test_loss / total, test_loss / total
Esempio n. 3
0
def evolution_search():
    for exp_type in config_dict()['exp_order']:
        save_dir = f'{os.path.dirname(os.path.abspath(__file__))}/search-{args.save}-{exp_type}-{dataset}-{time.strftime("%Y%m%d-%H%M%S")}'
        utils.create_exp_dir(save_dir)
        fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'))
        fh.setFormatter(logging.Formatter(log_format))
        logging.getLogger().addHandler(fh)

        np.random.seed(args.seed)
        logging.info("args = %s", args)

        # setup NAS search problem
        if exp_type == 'micro':  # NASNet search space
            n_var, lb, ub = set_micro_exp(args)
        elif exp_type == 'macro':  # modified GeneticCNN search space
            n_var, lb, ub = set_macro_exp(args)
        elif exp_type == 'micromacro' or exp_type == 'micro_garbage' or exp_type == 'macro_garbage':  # modified GeneticCNN search space
            n_var_mac, lb_mac, ub_mac = set_macro_exp(args)
            n_var_mic, lb_mic, ub_mic = set_micro_exp(args)
            n_var = n_var_mic + n_var_mac
            lb = np.array([*lb_mac, *lb_mic])
            ub = np.array([*ub_mac, *ub_mic])
        else:
            raise NameError('Unknown search space type')

        problem = NAS(n_var=n_var, search_space=exp_type,
                      n_obj=2, n_constr=0, lb=lb, ub=ub,
                      init_channels=args.init_channels, layers=args.layers,
                      epochs=args.epochs, save_dir=save_dir, batch_size=args.batch_size)

        # configure the nsga-net method
        method = engine.nsganet(pop_size=args.pop_size,
                                n_offsprings=args.n_offspring,
                                eliminate_duplicates=True)

        if args.termination == 'ngens':
            termination = ('n_gen', args.n_gens)
        elif args.termination == 'time':
            termination = TimeTermination(time.time(), args.max_time)

        res = minimize(problem,
                       method,
                       callback=do_every_generations,
                       termination=termination)

        val_accs = res.pop.get('F')[:, 0]

        if exp_type == 'microtomacro' or exp_type == 'micro':
            best_idx = np.where(val_accs == np.min(val_accs))[0][0]
            best_genome = res.pop[best_idx].X
            with open(f'{save_dir}/best_genome.pkl', 'wb') as pkl_file:
                pickle.dump(best_genome, pkl_file)
        if exp_type == 'microtomacro':
            set_config('micro_creator', make_micro_creator(best_genome))

    return (100 - np.min(val_accs)) / 100
Esempio n. 4
0
    def __init__(self, gene, in_channels, out_channels, idx, preact=False):
        """
        Constructor.
        :param gene: list, element of genome describing connections in this phase.
        :param in_channels: int, number of input channels.
        :param out_channels: int, number of output channels.
        :param idx: int, index in the network.
        :param preact: should we use the preactivation scheme?
        """
        super(ResidualPhase, self).__init__()

        self.channel_flag = in_channels != out_channels  # Flag to tell us if we need to increase channel size.
        self.first_conv = nn.Conv2d(
            in_channels,
            out_channels,
            kernel_size=1 if idx != 0 else KERNEL_SIZE_3,
            stride=1,
            bias=False)
        self.dependency_graph = ResidualPhase.build_dependency_graph(gene)

        if preact:
            node_constructor = PreactResidualNode

        # elif config_dict()['custom_cell_for_macro']:
        #     node_constructor = cell_1on1

        else:
            node_constructor = config_dict()['micro_creator']

        nodes = []
        for i in range(len(gene)):
            if len(self.dependency_graph[i + 1]) > 0:
                nodes.append(node_constructor(out_channels, out_channels))
            else:
                nodes.append(None)  # Module list will ignore NoneType.

        self.nodes = nn.ModuleList(nodes)

        #
        # At this point, we know which nodes will be receiving input from where.
        # So, we build the 1x1 convolutions that will deal with the depth-wise concatenations.
        #
        conv1x1s = [Identity()] + [
            Identity() for _ in range(max(self.dependency_graph.keys()))
        ]
        for node_idx, dependencies in self.dependency_graph.items():
            if len(dependencies) > 1:
                conv1x1s[node_idx] = \
                    nn.Conv2d(len(dependencies) * out_channels, out_channels, kernel_size=1, bias=False)

        self.processors = nn.ModuleList(conv1x1s)
        self.out = nn.Sequential(nn.BatchNorm2d(out_channels),
                                 nn.ReLU(inplace=True))
Esempio n. 5
0
    def __init__(self,
                 root,
                 train=True,
                 transform=None,
                 target_transform=None,
                 download=False):
        self.root = os.path.expanduser(root)
        self.transform = transform
        self.target_transform = target_transform
        self.train = train  # training set or test set

        if download:
            self.download()

        if self.train:
            if config_dict()["dataset"] == 'cifar10':
                self.train_data = []
                self.train_labels = []
                for fentry in self.train_list:
                    f = fentry[0]
                    file = os.path.join(self.root, self.base_folder, f)
                    fo = open(file, 'rb')
                    if sys.version_info[0] == 2:
                        entry = pickle.load(fo)
                    else:
                        entry = pickle.load(fo, encoding='latin1')
                    self.train_data.append(entry['data'])
                    if 'labels' in entry:
                        self.train_labels += entry['labels']
                    else:
                        self.train_labels += entry['fine_labels']
                    fo.close()

                self.train_data = np.concatenate(self.train_data)
            else:
                self.train_data = np.load(
                    f'{FILE_PATH}/../data/{config_dict()["dataset"]}/X_train.npy'
                )
                if self.train_data.ndim == 3:
                    self.train_data = self.train_data[:, :, :, None]
                self.train_labels = np.load(
                    f'{FILE_PATH}/../data/{config_dict()["dataset"]}/y_train.npy'
                )

        else:
            if config_dict()["dataset"] == 'cifar10':
                f = self.test_list[0][0]
                file = os.path.join(self.root, self.base_folder, f)
                fo = open(file, 'rb')
                if sys.version_info[0] == 2:
                    entry = pickle.load(fo)
                else:
                    entry = pickle.load(fo, encoding='latin1')
                self.test_data = entry['data']
                if 'labels' in entry:
                    self.test_labels = entry['labels']
                else:
                    self.test_labels = entry['fine_labels']
                fo.close()
            else:
                self.test_data = np.load(
                    f'{FILE_PATH}/../data/{config_dict()["dataset"]}/X_test.npy'
                )
                if self.test_data.ndim == 3:
                    self.test_data = self.test_data[:, :, :, None]
                self.test_labels = np.load(
                    f'{FILE_PATH}/../data/{config_dict()["dataset"]}/y_test.npy'
                )
Esempio n. 6
0
def main(macro_genome, micro_genome, epochs, search_space='micro',
         save='Design_1', expr_root='search', seed=0, gpu=0, init_channels=24,
         layers=11, auxiliary=False, cutout=False, drop_path_prob=0.0, batch_size=128):

    # ---- train logger ----------------- #
    save_pth = os.path.join(expr_root, '{}'.format(save))
    utils.create_exp_dir(save_pth)
    log_format = '%(asctime)s %(message)s'
    logging.basicConfig(stream=sys.stdout, level=logging.INFO,
                        format=log_format, datefmt='%m/%d %I:%M:%S %p')

    # ---- parameter values setting ----- #
    CIFAR_CLASSES = config_dict()['n_classes']
    INPUT_CHANNELS = config_dict()['n_channels']
    learning_rate = 0.025
    momentum = 0.9
    weight_decay = 3e-4
    data_root = '../data'
    cutout_length = 16
    auxiliary_weight = 0.4
    grad_clip = 5
    report_freq = 50
    train_params = {
        'auxiliary': auxiliary,
        'auxiliary_weight': auxiliary_weight,
        'grad_clip': grad_clip,
        'report_freq': report_freq,
    }

    if search_space == 'micro' or search_space == 'micro_garbage':
        genome = micro_genome
        genotype = micro_encoding.decode(genome)
        model = Network(init_channels, CIFAR_CLASSES, config_dict()['n_channels'], layers, auxiliary, genotype)
    elif search_space == 'macro' or search_space == 'macro_garbage':
        genome = macro_genome
        genotype = macro_encoding.decode(genome)
        channels = [(INPUT_CHANNELS, init_channels),
                    (init_channels, 2*init_channels),
                    (2*init_channels, 4*init_channels)]
        model = EvoNetwork(genotype, channels, CIFAR_CLASSES, (config_dict()['INPUT_HEIGHT'], config_dict()['INPUT_WIDTH']), decoder='residual')
    elif search_space == 'micromacro':
        genome = [macro_genome, micro_genome]
        macro_genotype = macro_encoding.decode(macro_genome)
        micro_genotype = micro_encoding.decode(micro_genome)
        genotype = [macro_genotype, micro_genotype]
        set_config('micro_creator', make_micro_creator(micro_genotype, convert=False))
        channels = [(INPUT_CHANNELS, init_channels),
                    (init_channels, 2 * init_channels),
                    (2 * init_channels, 4 * init_channels)]
        model = EvoNetwork(macro_genotype, channels, CIFAR_CLASSES,
                           (config_dict()['INPUT_HEIGHT'], config_dict()['INPUT_WIDTH']), decoder='residual')

    else:
        raise NameError('Unknown search space type')

    # logging.info("Genome = %s", genome)
    logging.info("Architecture = %s", genotype)

    torch.cuda.set_device(gpu)
    cudnn.benchmark = True
    torch.manual_seed(seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(seed)

    n_params = (np.sum(np.prod(v.size()) for v in filter(lambda p: p.requires_grad, model.parameters())) / 1e6)
    model = model.to(device)

    logging.info("param size = %fMB", n_params)

    if config_dict()['problem'] == 'classification':
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.MSELoss()
    criterion = criterion.cuda()


    parameters = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = torch.optim.SGD(
        parameters,
        learning_rate,
        momentum=momentum,
        weight_decay=weight_decay
    )

    CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
    CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]

    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor()
    ])

    if cutout:
        train_transform.transforms.append(utils.Cutout(cutout_length))

    train_transform.transforms.append(transforms.Normalize(CIFAR_MEAN, CIFAR_STD))

    valid_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
    ])

    train_data = my_cifar10.CIFAR10(root=data_root, train=True, download=False, transform=train_transform)
    valid_data = my_cifar10.CIFAR10(root=data_root, train=False, download=False, transform=valid_transform)

    train_queue = torch.utils.data.DataLoader(
        train_data, batch_size=batch_size,
        # sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
        pin_memory=True, num_workers=1)

    valid_queue = torch.utils.data.DataLoader(
        valid_data, batch_size=batch_size,
        # sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
        pin_memory=True, num_workers=1)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, int(epochs))

    for epoch in range(epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.droprate = drop_path_prob * epoch / epochs

        train_acc, train_obj = train(train_queue, model, criterion, optimizer, train_params)
        logging.info(f'train_{config_dict()["performance_measure"]} %f', train_acc)

    valid_acc, valid_obj = infer(valid_queue, model, criterion)
    logging.info(f'valid_{config_dict()["performance_measure"]} %f', valid_acc)

    # calculate for flops
    model = add_flops_counting_methods(model)
    model.eval()
    model.start_flops_count()
    random_data = torch.randn(1, INPUT_CHANNELS, config_dict()['INPUT_HEIGHT'], config_dict()['INPUT_WIDTH'])
    model(torch.autograd.Variable(random_data).to(device))
    n_flops = np.round(model.compute_average_flops_cost() / 1e6, 4)
    logging.info('flops = %f', n_flops)

    # save to file
    # os.remove(os.path.join(save_pth, 'log.txt'))
    with open(os.path.join(save_pth, 'log.txt'), "w") as file:
        file.write("Genome = {}\n".format(genome))
        file.write("Architecture = {}\n".format(genotype))
        file.write("param size = {}MB\n".format(n_params))
        file.write("flops = {}MB\n".format(n_flops))
        file.write("valid_acc = {}\n".format(valid_acc))

    # logging.info("Architecture = %s", genotype))

    return {
        'valid_acc': valid_acc,
        'params': n_params,
        'flops': n_flops,
    }
Esempio n. 7
0
def main(args):
    save_dir = f'{os.path.dirname(os.path.abspath(__file__))}/../train/train-{args.save}-{time.strftime("%Y%m%d-%H%M%S")}'
    utils.create_exp_dir(save_dir)
    data_root = '../data'
    CIFAR_CLASSES = config_dict()['n_classes']
    INPUT_CHANNELS = config_dict()['n_channels']

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    if args.auxiliary and args.net_type == 'macro':
        logging.info(
            'auxiliary head classifier not supported for macro search space models'
        )
        sys.exit(1)

    logging.info("args = %s", args)

    cudnn.enabled = True
    cudnn.benchmark = True
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    best_acc = 0  # initiate a artificial best accuracy so far

    # Data
    train_transform, valid_transform = utils._data_transforms_cifar10(args)
    # train_data = torchvision.datasets.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
    # valid_data = torchvision.datasets.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)

    train_data = my_cifar10.CIFAR10(root=data_root,
                                    train=True,
                                    download=False,
                                    transform=train_transform)
    valid_data = my_cifar10.CIFAR10(root=data_root,
                                    train=False,
                                    download=False,
                                    transform=valid_transform)

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=1)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=128,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=1)

    # Model
    if args.net_type == 'micro':
        logging.info("==> Building micro search space encoded architectures")
        genotype = eval("genotypes.%s" % args.arch)
        net = NetworkCIFAR(args.init_channels,
                           num_classes=CIFAR_CLASSES,
                           num_channels=INPUT_CHANNELS,
                           layers=args.layers,
                           auxiliary=args.auxiliary,
                           genotype=genotype,
                           SE=args.SE)
    elif args.net_type == 'macro':
        genome = eval("macro_genotypes.%s" % args.arch)
        channels = [(INPUT_CHANNELS, 128), (128, 128), (128, 128)]
        net = EvoNetwork(
            genome,
            channels,
            CIFAR_CLASSES,
            (config_dict()['INPUT_HEIGHT'], config_dict()['INPUT_WIDTH']),
            decoder='dense')
    else:
        raise NameError(
            'Unknown network type, please only use supported network type')

    # logging.info("{}".format(net))
    logging.info("param size = %fMB", utils.count_parameters_in_MB(net))

    net = net.to(device)

    n_epochs = args.epochs

    parameters = filter(lambda p: p.requires_grad, net.parameters())

    criterion = nn.CrossEntropyLoss()
    criterion.to(device)
    optimizer = optim.SGD(parameters,
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, n_epochs, eta_min=args.min_learning_rate)

    for epoch in range(n_epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        net.droprate = args.droprate * epoch / args.epochs

        train(args, train_queue, net, criterion, optimizer)
        _, valid_acc = infer(args, valid_queue, net, criterion)

        if valid_acc > best_acc:
            utils.save(net, os.path.join(save_dir, 'weights.pt'))
            best_acc = valid_acc

    return best_acc
Esempio n. 8
0
import sys, os

sys.path.append('..')
from flask import render_template, Flask, redirect, request
from config import config_dict
import json

conf_dict = config_dict()

config_path = 'config.py'
app = Flask(__name__, instance_relative_config=False)
app.config.from_pyfile(config_path)


@app.route('/', methods=['GET'])
def main_reroute():
    return redirect('index.html')


@app.route('/index.html', methods=['GET'])
def index_html():
    animals = {"Dog": "Bark!", "Cat": "Meow!", "Cow": "Moo!", "Crow": "caw!"}
    to_js_array = json.dumps(animals)
    return render_template('index.html',
                           flask=conf_dict['say_me'],
                           meta_tag=to_js_array)


@app.route('/animal_noise.html', methods=['POST'])
def animal_noises():
    animal_noise = request.form['animal_dropdown']
Esempio n. 9
0
parser.add_argument('--droprate', default=0, type=float, help='dropout probability (default: 0.0)')
# parser.add_argument('--init_channels', type=int, default=32, help='num of init channels')
parser.add_argument('--arch', type=str, default='NSGANet', help='which architecture to use')
parser.add_argument('--filter_increment', default=4, type=int, help='# of filter increment')
parser.add_argument('--SE', action='store_true', default=False, help='use Squeeze-and-Excitation')
parser.add_argument('--net_type', type=str, default='macro', help='(options)micro, macro')

log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
                    format=log_format, datefmt='%m/%d %I:%M:%S %p')

SERVER_IP = '132.72.81.248'

pop_hist = []  # keep track of every evaluated architecture
ex = Experiment()
ex.add_config(config_dict())


# dataset = ArticularyWordRecognition,AtrialFibrillation,BasicMotions,CharacterTrajectories,Cricket,DuckDuckGeese,EigenWorms,Epilepsy,ERing,EthanolConcentration,FaceDetection,FingerMovements,HandMovementDirection,Handwriting,Heartbeat,InsectWingbeat,JapaneseVowels,Libras,LSST,MotorImagery,NATOPS,PEMS-SF,PenDigits,PhonemeSpectra,RacketSports,SelfRegulationSCP1,SelfRegulationSCP2,SpokenArabicDigits,StandWalkJump,UWaveGestureLibrary
# EEG_dataset_1 = BCI_IV_2a,BCI_IV_2b,HG
# EEG dataset_2 = NER15,Opportunity,MentalImageryLongWords


class TimeTermination(Termination):
    def __init__(self, start_time, n_max_seconds) -> None:
        super().__init__()
        self.start_time = start_time
        self.n_max_seconds = n_max_seconds

    def _do_continue(self, algorithm):
        return (time.time() - self.start_time) <= self.n_max_seconds
Esempio n. 10
0
def check_config():
    print(config.config_dict())