Пример #1
0
                                   tanh_constant=1.1,
                                   cell_exit_extra_step=True)
    else:
        raise AssertionError

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                0.05,
                                momentum=0.9,
                                weight_decay=1.0E-4)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                              T_max=num_epochs,
                                                              eta_min=0.001)

    trainer = enas.EnasTrainer(model,
                               loss=criterion,
                               metrics=accuracy,
                               reward_function=reward_accuracy,
                               optimizer=optimizer,
                               callbacks=[
                                   LRSchedulerCallback(lr_scheduler),
                                   ArchitectureCheckpoint("./checkpoints")
                               ],
                               batch_size=args.batch_size,
                               num_epochs=num_epochs,
                               dataset_train=dataset_train,
                               dataset_valid=dataset_valid,
                               log_frequency=args.log_frequency,
                               mutator=mutator)
    trainer.train()
Пример #2
0
                                      shuffle=True,
                                      num_workers=0)
        dataloader_valid = DataLoader(dataset_valid,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=0)
        train(args, model, dataloader_train, dataloader_valid, criterion,
              optim,
              torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
        exit(0)

    trainer = enas.EnasTrainer(
        model,
        loss=criterion,
        metrics=lambda output, target: accuracy(output, target, topk=(1, )),
        reward_function=reward_accuracy,
        optimizer=optim,
        callbacks=[
            LRSchedulerCallback(lr_scheduler),
            ArchitectureCheckpoint("./checkpoints")
        ],
        batch_size=args.batch_size,
        num_epochs=args.epochs,
        dataset_train=dataset_train,
        dataset_valid=dataset_valid,
        log_frequency=args.log_frequency)

    if args.visualization:
        trainer.enable_visualization()
    trainer.train()