Exemplo n.º 1
0
def main(args):
    reset_seed(args.seed)
    prepare_logger(args)

    logger.info("These are the hyper-parameters you want to tune:\n%s", pprint.pformat(vars(args)))

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    train_loader, test_loader = data_preprocess(args)
    model = models.__dict__[args.model]()
    model.to(device)

    criterion = nn.CrossEntropyLoss()
    if args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(), lr=args.initial_lr, weight_decay=args.weight_decay)
    else:
        if args.optimizer == 'sgd':
            optimizer_cls = optim.SGD
        elif args.optimizer == 'rmsprop':
            optimizer_cls = optim.RMSprop
        optimizer = optimizer_cls(model.parameters(), lr=args.initial_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=args.ending_lr)

    for epoch in range(1, args.epochs + 1):
        train(model, train_loader, criterion, optimizer, scheduler, args, epoch, device)
        top1, _ = test(model, test_loader, criterion, args, epoch, device)
    logger.info("Final accuracy is: %.6f", top1)
Exemplo n.º 2
0
def main(args):
    reset_seed(args.seed)
    prepare_logger(args)

    logger.info("These are the hyper-parameters you want to tune:\n%s",
                pprint.pformat(vars(args)))

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    train_loader, test_loader = data_preprocess(args)
    # model = models.__dict__[args.model](num_classes=10)
    model = CNN(32, 3, args.channels, 10, args.layers)
    model.to(device)

    criterion = nn.CrossEntropyLoss()
    if args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.initial_lr,
                               weight_decay=args.weight_decay)
    else:
        if args.optimizer == 'sgd':
            optimizer_cls = optim.SGD
        elif args.optimizer == 'rmsprop':
            optimizer_cls = optim.RMSprop
        optimizer = optimizer_cls(model.parameters(),
                                  lr=args.initial_lr,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)

    if args.lr_scheduler == 'cosin':
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer, args.epochs, eta_min=args.ending_lr)
    elif args.lr_scheduler == 'linear':
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=15,
                                              gamma=0.1)

    trainer = DartsTrainer(
        model,
        loss=criterion,
        metrics=lambda output, target: accuracy(output, target),
        optimizer=optimizer,
        num_epochs=args.epochs,
        dataset_train=train_loader,
        dataset_valid=test_loader,
        batch_size=args.batch_size,
        log_frequency=args.log_frequency,
        unrolled=args.unrolled,
        callbacks=[
            LRSchedulerCallback(scheduler),
            ArchitectureCheckpoint("./checkpoints_layer5")
        ])

    if args.visualization:
        trainer.enable_visualization()
    trainer.train()
def main(args):
    args = nni.get_next_parameter()
    reset_seed(args['seed'])
    prepare_logger(args)

    logger.info("These are the hyper-parameters you want to tune:\n%s",
                pprint.pformat(args))

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    train_loader, test_loader = data_preprocess(args)
    model = models.__dict__[args['model']]()
    model.to(device)

    criterion = nn.CrossEntropyLoss()
    if args['optimizer'] == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args['initial_lr'],
                               weight_decay=args['weight_decay'])
    else:
        if args['optimizer'] == 'sgd':
            optimizer_cls = optim.SGD
        elif args['optimizer'] == 'rmsprop':
            optimizer_cls = optim.RMSprop
        optimizer = optimizer_cls(model.parameters(),
                                  lr=args['initial_lr'],
                                  momentum=args['momentum'],
                                  weight_decay=args['weight_decay'])
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                     args['epochs'],
                                                     eta_min=args['ending_lr'])

    for epoch in range(1, args['epochs'] + 1):
        train(model, train_loader, criterion, optimizer, scheduler, args,
              epoch, device)
        top1, _ = test(model, test_loader, criterion, args, epoch, device)
        nni.report_intermediate_result(_)

    nni.report_final_result(top1)
    logger.info("Final accuracy is: %.6f", top1)
Exemplo n.º 4
0
def main():
    global mainlogger
    global bm
    global lm
    global fm

    loggerlevel = 10
    refine = False
    if (len(os.sys.argv) > 1):
        loggerlevel = os.sys.argv[1]
        if (len(os.sys.argv) > 2):
            refine = os.sys.argv[2]
    mainlogger = utils.prepare_logger(__name__, int(loggerlevel))
    mainlogger.info("Logger ready")
    mainlogger.info("Building up bluetooth connection")

    bm = BluetoothManager.BlueManager(mainlogger.getChild("BluetoothManager"))
    lm = LedManager.LedManager(mainlogger.getChild("LedManager"))
    fm = FDCManager.FDCManager(mainlogger.getChild("FDCManager"), refine)

    socket = bm.build_socket()
    client = None

    try:
        while 1:
            client = connectionloop(socket)
            try:
                dataloop(client)
            except Exception as e:
                if e == UnboundLocalError:
                    mainlogger.error(traceback.format_exc())
                elif e == ConnectionResetError:
                    mainlogger.warning("Connection reset")
                    mainlogger.warning("Connection reset error")
                    continue
                elif e == Exception:
                    print(e)
                    mainlogger.error(traceback.format_exc())
                    mainlogger.error("Unknown/handled exception")
                    break
                else:
                    mainlogger.warning("Error unhandeld. Doing nothing")
                    mainlogger.warning(traceback.format_exc())
    finally:
        lm.colorwipe()
        bm.clean_up(client, socket)
def main(args):
    reset_seed(args.seed)
    prepare_logger(args)

    logger.info("These are the hyper-parameters you want to tune:\n%s", pprint.pformat(vars(args)))

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    train_loader, test_loader = data_preprocess(args)
    # model = models.__dict__[args.model]()
    if args.model == 'vgg16':
        model = VGG('VGG16')
    elif args.model == 'vgg19':
        model = VGG('VGG19')
    elif args.model == 'resnet18':
        model = ResNet18()
    elif args.model == 'resnet34':
        model = ResNet34()
    elif args.model == 'senet18':
        model = SENet18()
    elif args.model == 'densenet121':
        model = densenet_cifar()
    # if args['model'] == 'resnet50':
    #     model = models.resnet50(pretrained=True)
    # if args['model'] == 'googlenet':
    #     model = GoogLeNet()
    # if args['model'] == 'mobilenet':
    #     model = MobileNet()
    # if args['model'] == 'dpn92':
    #     model = DPN92()
    # if args['model'] == 'shufflenetg2':
    #     model = ShuffleNetG2()
    
    model.to(device)

    criterion = nn.CrossEntropyLoss()
    if args.optimizer == 'SGD':
        optimizer = optim.SGD(model.parameters(), lr=args.initial_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    elif args.optimizer == 'Adadelta':
        optimizer = optim.Adadelta(model.parameters(), lr=args.initial_lr)
    elif args.optimizer == 'Adagrad':
        optimizer = optim.Adagrad(model.parameters(), lr=args.initial_lr)
    elif args.optimizer == 'Adam':
        optimizer = optim.Adam(model.parameters(), lr=args.initial_lr)
    elif args.optimizer == 'Adamax':
        optimizer = optim.Adamax(model.parameters(), lr=args.initial_lr)
    elif args.optimizer == 'RMSprop':
        optimizer = optim.RMSprop(model.parameters(), lr=args.initial_lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # if args.optimizer == 'adam':
    #     optimizer = optim.Adam(model.parameters(), lr=args.initial_lr, weight_decay=args.weight_decay)
    # else:
    #     if args.optimizer == 'sgd':
    #         optimizer_cls = optim.SGD
    #     elif args.optimizer == 'rmsprop':
    #         optimizer_cls = optim.RMSprop
    #     optimizer = optimizer_cls(model.parameters(), lr=args.initial_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=args.ending_lr)

    best_top1 = 0
    for epoch in range(1, args.epochs + 1):
        train(model, train_loader, criterion, optimizer, scheduler, args, epoch, device)
        top1, _ = test(model, test_loader, criterion, args, epoch, device)

        if top1 > best_top1:
            best_top1 = top1
        nni.report_intermediate_result(top1)
    logger.info("Final accuracy is: %.6f", top1)
    nni.report_final_result(best_top1)
Exemplo n.º 6
0
def main(args):
    reset_seed(args.seed)
    prepare_logger(args)

    logger.info("These are the hyper-parameters you want to tune:\n%s",
                pprint.pformat(vars(args)))

    if args.model == 'nas':
        logger.info("Using NAS.\n")
        if args.fix_arch:
            if not os.path.exists(args.arc_checkpoint):
                print(args.arc_checkpoint,
                      'does not exist, don not fix archetect')
                args.fix_arch = False

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    if args.model == 'nas':
        if not args.fix_arch:
            model = CNN(32, 3, args.channels, 10, args.layers)
            trainset, testset = data_preprocess(args)
        else:
            model = CNN(32, 3, args.channels, 10, args.layers)
            apply_fixed_architecture(model, args.arc_checkpoint)
            model.to(device)
            train_loader, test_loader = data_preprocess(args)
    else:
        train_loader, test_loader = data_preprocess(args)
        model = models.__dict__[args.model]()
        model.to(device)

    criterion = nn.CrossEntropyLoss()
    if args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.initial_lr,
                               weight_decay=args.weight_decay)
    else:
        if args.optimizer == 'sgd':
            optimizer_cls = optim.SGD
        elif args.optimizer == 'rmsprop':
            optimizer_cls = optim.RMSprop
        optimizer = optimizer_cls(model.parameters(),
                                  lr=args.initial_lr,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                     args.epochs,
                                                     eta_min=args.ending_lr)

    if args.model == 'nas' and not args.fix_arch:
        trainer = DartsTrainer(model,
                               loss=criterion,
                               metrics=lambda output, target: accuracyTopk(
                                   output, target, topk=(1, )),
                               optimizer=optimizer,
                               num_epochs=args.epochs,
                               dataset_train=trainset,
                               dataset_valid=testset,
                               batch_size=args.batch_size,
                               log_frequency=args.log_frequency,
                               unrolled=args.unrolled,
                               callbacks=[
                                   LRSchedulerCallback(scheduler),
                                   ArchitectureCheckpoint("./checkpoints")
                               ])
        if args.visualization:
            trainer.enable_visualization()
        trainer.train()
        trainer.export("final_arch.json")
    else:
        for epoch in range(1, args.epochs + 1):
            train(model, train_loader, criterion, optimizer, scheduler, args,
                  epoch, device)
            top1, _ = test(model, test_loader, criterion, args, epoch, device)
            nni.report_intermediate_result(top1)
        logger.info("Final accuracy is: %.6f", top1)
        nni.report_final_result(top1)