Esempio n. 1
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    if args.auxiliary and args.net_type == 'macro':
        logging.info('auxiliary head classifier not supported for macro search space models')
        sys.exit(1)

    logging.info("args = %s", args)

    cudnn.enabled = True
    cudnn.benchmark = True
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # Data
    _, valid_transform = utils._data_transforms_cifar10(args)

    valid_data = torchvision.datasets.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
    valid_queue = torch.utils.data.DataLoader(
        valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=1)

    # classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    # Model
    if args.net_type == 'micro':
        logging.info("==> Building micro search space encoded architectures")
        genotype = eval("genotypes.%s" % args.arch)
        net = PyrmNASNet(args.init_channels, num_classes=10, layers=args.layers,
                         auxiliary=args.auxiliary, genotype=genotype,
                         increment=args.filter_increment, SE=args.SE)
    elif args.net_type == 'macro':
        genome = eval("macro_genotypes.%s" % args.arch)
        channels = [(3, 128), (128, 128), (128, 128)]
        net = EvoNetwork(genome, channels, 10, (32, 32), decoder='dense')
    else:
        raise NameError('Unknown network type, please only use supported network type')

    # logging.info("{}".format(net))
    logging.info("param size = %fMB", utils.count_parameters_in_MB(net))

    net = net.to(device)
    # no drop path during inference
    net.droprate = 0.0
    utils.load(net, args.model_path)

    criterion = nn.CrossEntropyLoss()
    criterion.to(device)

    # inference on original CIFAR-10 test images
    infer(valid_queue, net, criterion)
Esempio n. 2
0
def demo():
    """
    Demo creating a network.
    """
    import misc.utils as utils
    genome = [[[1], [0, 0], [0, 1, 0], [0, 1, 1, 1], [1, 0, 0, 1, 1], [0]],
              [[0], [0, 0], [0, 1, 0], [0, 1, 0, 1], [1, 1, 1, 1, 1], [0]],
              [[0], [0, 1], [1, 0, 1], [1, 0, 1, 1], [1, 0, 0, 1, 1], [0]]]

    channels = [(3, 128), (128, 128), (128, 128)]

    out_features = 10
    data = torch.randn(16, 3, 32, 32) * 100
    net = EvoNetwork(genome, channels, out_features, (32, 32), decoder='dense')
    print("param size = {}MB".format(utils.count_parameters_in_MB(net)))
    output = net(torch.autograd.Variable(data))

    print(output)
Esempio n. 3
0
                if self._auxiliary and self.training:
                    logits_aux = self.auxiliary_head(s1)
        out = self.global_pooling(s1)
        logits = self.classifier(out.view(out.size(0), -1))
        return logits, logits_aux


if __name__ == '__main__':
    import misc.utils as utils
    import models.micro_genotypes as genotypes

    genome = genotypes.SimpleGenotype
    # model = AlterPyramidNetworkCIFAR(30, 10, 20, True, genome, 6, SE=False)

    data = torch.randn(16, 3, 32, 32)

    model = NetworkCIFAR(C=48, num_classes=10, num_channels=3, layers=9, auxiliary=True, genotype=genome, SE=True)

    # model = NetworkCIFAR(34, 10, 20, True, genome, SE=True)
    # model = GradPyramidNetworkCIFAR(34, 10, 20, True, genome, 4)
    model.droprate = 0.0

    # calculate number of trainable parameters
    print("param size = {}MB".format(utils.count_parameters_in_MB(model)))

    output = model(torch.autograd.Variable(data))
    cell = Cell(genome, 3, 3, 3, False, False)

    output2 = cell(torch.autograd.Variable(data))
    print(output.shape)
Esempio n. 4
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    if args.auxiliary and args.net_type == 'macro':
        logging.info(
            'auxiliary head classifier not supported for macro search space models'
        )
        sys.exit(1)

    logging.info("args = %s", args)

    cudnn.enabled = True
    cudnn.benchmark = True
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    best_acc = 0  # initiate a artificial best accuracy so far

    # Data
    train_transform, valid_transform = utils._data_transforms_cifar10(args)
    train_data = torchvision.datasets.CIFAR10(root=args.data,
                                              train=True,
                                              download=True,
                                              transform=train_transform)
    valid_data = torchvision.datasets.CIFAR10(root=args.data,
                                              train=False,
                                              download=True,
                                              transform=valid_transform)

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=2)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=128,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=2)

    # classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    # Model
    if args.net_type == 'micro':
        logging.info("==> Building micro search space encoded architectures")
        genotype = eval("genotypes.%s" % args.arch)
        net = PyrmNASNet(args.init_channels,
                         num_classes=10,
                         layers=args.layers,
                         auxiliary=args.auxiliary,
                         genotype=genotype,
                         increment=args.filter_increment,
                         SE=args.SE)
    elif args.net_type == 'macro':
        genome = eval("macro_genotypes.%s" % args.arch)
        channels = [(3, 128), (128, 128), (128, 128)]
        net = EvoNetwork(genome, channels, 10, (32, 32), decoder='dense')
    else:
        raise NameError(
            'Unknown network type, please only use supported network type')

    # logging.info("{}".format(net))
    logging.info("param size = %fMB", utils.count_parameters_in_MB(net))

    net = net.to(device)

    n_epochs = args.epochs

    parameters = filter(lambda p: p.requires_grad, net.parameters())

    criterion = nn.CrossEntropyLoss()
    criterion.to(device)
    optimizer = optim.SGD(parameters,
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, n_epochs, eta_min=args.min_learning_rate)

    for epoch in range(n_epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        net.droprate = args.droprate * epoch / args.epochs

        train(train_queue, net, criterion, optimizer)
        _, valid_acc = infer(valid_queue, net, criterion)

        if valid_acc > best_acc:
            utils.save(net, os.path.join(args.save, 'weights.pt'))
            best_acc = valid_acc
Esempio n. 5
0
def main(args):
    save_dir = f'{os.path.dirname(os.path.abspath(__file__))}/../train/train-{args.save}-{time.strftime("%Y%m%d-%H%M%S")}'
    utils.create_exp_dir(save_dir)
    data_root = '../data'
    CIFAR_CLASSES = config_dict()['n_classes']
    INPUT_CHANNELS = config_dict()['n_channels']

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    if args.auxiliary and args.net_type == 'macro':
        logging.info(
            'auxiliary head classifier not supported for macro search space models'
        )
        sys.exit(1)

    logging.info("args = %s", args)

    cudnn.enabled = True
    cudnn.benchmark = True
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    best_acc = 0  # initiate a artificial best accuracy so far

    # Data
    train_transform, valid_transform = utils._data_transforms_cifar10(args)
    # train_data = torchvision.datasets.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
    # valid_data = torchvision.datasets.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)

    train_data = my_cifar10.CIFAR10(root=data_root,
                                    train=True,
                                    download=False,
                                    transform=train_transform)
    valid_data = my_cifar10.CIFAR10(root=data_root,
                                    train=False,
                                    download=False,
                                    transform=valid_transform)

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=1)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=128,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=1)

    # Model
    if args.net_type == 'micro':
        logging.info("==> Building micro search space encoded architectures")
        genotype = eval("genotypes.%s" % args.arch)
        net = NetworkCIFAR(args.init_channels,
                           num_classes=CIFAR_CLASSES,
                           num_channels=INPUT_CHANNELS,
                           layers=args.layers,
                           auxiliary=args.auxiliary,
                           genotype=genotype,
                           SE=args.SE)
    elif args.net_type == 'macro':
        genome = eval("macro_genotypes.%s" % args.arch)
        channels = [(INPUT_CHANNELS, 128), (128, 128), (128, 128)]
        net = EvoNetwork(
            genome,
            channels,
            CIFAR_CLASSES,
            (config_dict()['INPUT_HEIGHT'], config_dict()['INPUT_WIDTH']),
            decoder='dense')
    else:
        raise NameError(
            'Unknown network type, please only use supported network type')

    # logging.info("{}".format(net))
    logging.info("param size = %fMB", utils.count_parameters_in_MB(net))

    net = net.to(device)

    n_epochs = args.epochs

    parameters = filter(lambda p: p.requires_grad, net.parameters())

    criterion = nn.CrossEntropyLoss()
    criterion.to(device)
    optimizer = optim.SGD(parameters,
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, n_epochs, eta_min=args.min_learning_rate)

    for epoch in range(n_epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        net.droprate = args.droprate * epoch / args.epochs

        train(args, train_queue, net, criterion, optimizer)
        _, valid_acc = infer(args, valid_queue, net, criterion)

        if valid_acc > best_acc:
            utils.save(net, os.path.join(save_dir, 'weights.pt'))
            best_acc = valid_acc

    return best_acc