예제 #1
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    torch.cuda.set_device(args.gpu)
    cudnn.enabled = True
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    if args.dataset in LARGE_DATASETS:
        model = NetworkLarge(args.init_channels, CLASSES, args.layers,
                             args.auxiliary, genotype)
    else:
        model = Network(args.init_channels, CLASSES, args.layers,
                        args.auxiliary, genotype)
    model = model.cuda()
    utils.load(model, args.model_path)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    _, test_transform = utils.data_transforms(args.dataset, args.cutout,
                                              args.cutout_length)
    if args.dataset == "CIFAR100":
        test_data = dset.CIFAR100(root=args.data,
                                  train=False,
                                  download=True,
                                  transform=test_transform)
    elif args.dataset == "CIFAR10":
        test_data = dset.CIFAR10(root=args.data,
                                 train=False,
                                 download=True,
                                 transform=test_transform)
    elif args.dataset == "sport8":
        dset_cls = dset.ImageFolder
        val_path = '%s/Sport8/test' % args.data
        test_data = dset_cls(root=val_path, transform=test_transform)
    elif args.dataset == "mit67":
        dset_cls = dset.ImageFolder
        val_path = '%s/MIT67/test' % args.data
        test_data = dset_cls(root=val_path, transform=test_transform)
    elif args.dataset == "flowers102":
        dset_cls = dset.ImageFolder
        val_path = '%s/flowers102/test' % args.tmp_data_dir
        test_data = dset_cls(root=val_path, transform=test_transform)
    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=False,
                                             num_workers=2)

    model.drop_path_prob = 0.0
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info('Test_acc %f', test_acc)
예제 #2
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    if args.parallel:
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda()
    model.load_state_dict(
        torch.load(args.model_path, map_location='cuda:0')['state_dict'])

    print("param size = {:.1f}MB".format(
        floor(utils.count_parameters_in_MB(model), 1)))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=4)

    model.drop_path_prob = args.drop_path_prob
    input = torch.randn(1, 3, 224, 224)
    input = input.cuda()
    valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model,
                                                      criterion)
    logging.info('valid_acc_top1 %f', valid_acc_top1)
    logging.info('valid_acc_top5 %f', valid_acc_top5)
def get_imagenet_tuned_model(load_weights=True):
    network = NetworkImageNet(48, IMAGENET_CLASSES, 14, True, 0.4,
                              IMAGENET_TUNED,
                              CrossEntropyLabelSmooth(IMAGENET_CLASSES, 0.1))
    if load_weights:
        device = torch.device('cpu')
        state_dict = torch.load('weights/imagenet_tuned.pt',
                                map_location=device)
        # state_dict = {k:v for k,v in state_dict.items() if not 'total_ops' in k and not 'total_params' in k}
        network.load_state_dict(state_dict)
    return network
예제 #4
0
def main():
    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    #start_epochs = 0
    model.drop_path_prob = 0
    stat(model, (3, 224, 224))

    genotype = eval("genotypes.%s" % "MY_DARTS")
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)

    model.drop_path_prob = 0
    stat(model, (3, 224, 224))
예제 #5
0
def main():
    args = parser.parse_args()
    assert torch.cuda.is_available()

    image_ph = tf.placeholder(tf.uint8, (None, None, 3))
    image_proc = preprocess_for_eval(image_ph, args.image_size,
                                     args.image_size)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    model = NetworkImageNet(args.num_conv_filters, args.num_classes,
                            args.num_cells, False, PNASNet)
    model.drop_path_prob = 0
    model.eval()
    model.load_state_dict(torch.load('data/PNASNet-5_Large.pth'))
    model = model.cuda()

    c1, c5 = 0, 0
    val_dataset = datasets.ImageFolder(args.valdir)
    for i, (image, label) in enumerate(val_dataset):
        tf_image_proc = sess.run(image_proc, feed_dict={image_ph: image})
        image = torch.from_numpy(tf_image_proc.transpose((2, 0, 1)))
        image = Variable(image).cuda()
        logits, _ = model(image.unsqueeze(0))
        top5 = logits.data.cpu().numpy().squeeze().argsort()[::-1][:5]
        top1 = top5[0]
        if label + 1 == top1:
            c1 += 1
        if label + 1 in top5:
            c5 += 1
        print('Test: [{0}/{1}]\t'
              'Prec@1 {2:.3f}\t'
              'Prec@5 {3:.3f}\t'.format(i + 1, len(val_dataset), c1 / (i + 1.),
                                        c5 / (i + 1.)))
예제 #6
0
def eval_arch(genotype_file, ckpt_path):
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info("args = %s", args)

#   tmp_dict = json.load(open(genotype_file,'r'))
  if args.arch is not None:
      genotype = eval("genotypes.%s" % args.arch)
#   genotype = genotypes.Genotype(**tmp_dict)

  print(genotype)
  model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
  model.drop_path_prob = 0.
  num_gpus = torch.cuda.device_count()   
  if num_gpus > 1:
    model = nn.DataParallel(model).cuda()
  else:
    model = model.cuda()
  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
  model.load_state_dict(torch.load(ckpt_path), strict=False)

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
  criterion_smooth = criterion_smooth.cuda()

  validdir = os.path.join(args.data, 'ILSVRC2012_img_val')
  normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  valid_data = dset.ImageFolder(
    validdir,
    transforms.Compose([
      transforms.Resize(256),
      transforms.CenterCrop(224),
      transforms.ToTensor(),
      normalize,
    ]))
  valid_queue = torch.utils.data.DataLoader(
    valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)

  valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
  logging.info('valid_acc_top1 %f', valid_acc_top1)
  logging.info('valid_acc_top5 %f', valid_acc_top5)
예제 #7
0
def main():
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info(
        "device = %s" %
        'cuda:{}'.format(args.gpu) if torch.cuda.is_available() else 'cpu')
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype).to(device)
    model.load_state_dict(torch.load(args.model_path)["state_dict"])

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss().to(device)

    validdir = os.path.join(args.data, "val")
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]),
    )

    valid_queue = torch.utils.data.DataLoader(
        valid_data,
        batch_size=args.batch_size,
        shuffle=False,
        pin_memory=True,
        num_workers=4,
    )

    model.drop_path_prob = args.drop_path_prob
    valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model,
                                                      criterion)
    logging.info("valid_acc_top1 %f", valid_acc_top1)
    logging.info("valid_acc_top5 %f", valid_acc_top5)
예제 #8
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    model = model.cuda()
    model.load_state_dict(torch.load(args.model_path)['state_dict'])

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    validdir = os.path.join(args.data, 'valid')
    normalize = transforms.Normalize(mean=[0.4802, 0.4481, 0.3975],
                                     std=[0.2302, 0.2265, 0.2262])
    valid_data = dset.ImageFolder(
        validdir, transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=4)

    model.drop_path_prob = args.drop_path_prob
    valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model,
                                                      criterion)
    logging.info('valid_acc_top1 %f', valid_acc_top1)
    logging.info('valid_acc_top5 %f', valid_acc_top5)
예제 #9
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)
    cudnn.enabled = True
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    model = nn.DataParallel(model)
    model = model.cuda()
    model.load_state_dict(torch.load(args.model_path)['state_dict'],
                          strict=False)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=False,
                                              num_workers=4)

    model.module.drop_path_prob = 0.0
    valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model,
                                                      criterion)
    logging.info('Valid_acc_top1 %f', valid_acc_top1)
    logging.info('Valid_acc_top5 %f', valid_acc_top5)
예제 #10
0
    def build_model_from_config(self):
        genotype = Genotype(
            normal=[
                ("skip_connect", 1),
                ("skip_connect", 0),
                ("sep_conv_3x3", 2),
                ("sep_conv_3x3", 1),
                ("sep_conv_5x5", 2),
                ("sep_conv_3x3", 0),
                ("sep_conv_5x5", 3),
                ("sep_conv_5x5", 2),
            ],
            normal_concat=range(2, 6),
            reduce=[
                ("max_pool_3x3", 1),
                ("sep_conv_3x3", 0),
                ("sep_conv_5x5", 1),
                ("dil_conv_5x5", 2),
                ("sep_conv_3x3", 1),
                ("sep_conv_3x3", 3),
                ("sep_conv_5x5", 1),
                ("max_pool_3x3", 2),
            ],
            reduce_concat=range(2, 6),
        )
        activation_function = activation_map[self.context.get_hparam(
            "activation")]

        model = NetworkImageNet(
            genotype,
            activation_function,
            self.context.get_hparam("init_channels"),
            self.context.get_hparam("num_classes"),
            self.context.get_hparam("layers"),
            auxiliary=self.context.get_hparam("auxiliary"),
            do_SE=self.context.get_hparam("do_SE"),
            drop_path_prob=self.context.get_hparam("drop_path_prob"),
            drop_prob=self.context.get_hparam("drop_prob"),
        )

        ema_model = EMAWrapper(self.context.get_hparam("ema_decay"), model)
        return ema_model
예제 #11
0
    def build_model(self) -> nn.Module:
        genotype = Genotype(
            normal=[
                ("skip_connect", 1),
                ("skip_connect", 0),
                ("sep_conv_3x3", 2),
                ("sep_conv_3x3", 1),
                ("sep_conv_5x5", 2),
                ("sep_conv_3x3", 0),
                ("sep_conv_5x5", 3),
                ("sep_conv_5x5", 2),
            ],
            normal_concat=range(2, 6),
            reduce=[
                ("max_pool_3x3", 1),
                ("sep_conv_3x3", 0),
                ("sep_conv_5x5", 1),
                ("dil_conv_5x5", 2),
                ("sep_conv_3x3", 1),
                ("sep_conv_3x3", 3),
                ("sep_conv_5x5", 1),
                ("max_pool_3x3", 2),
            ],
            reduce_concat=range(2, 6),
        )
        activation_function = activation_map[self.context.get_hparam("activation")]

        model = NetworkImageNet(
            genotype,
            activation_function,
            self.context.get_hparam("init_channels"),
            self.context.get_hparam("num_classes"),
            self.context.get_hparam("layers"),
            auxiliary=self.context.get_hparam("auxiliary"),
            do_SE=self.context.get_hparam("do_SE"),
        )

        # If loading backbone weights, do not call reset_parameters() or
        # call before loading the backbone weights.
        reset_parameters(model)
        return model
예제 #12
0
    def write_pytorch_weight(self):
        model = NetworkImageNet(216, 1001, 12, False, PNASNet)
        model.drop_path_prob = 0
        model.eval()

        self.used_keys = []
        self.convert_conv(model.conv0, 'conv0/weights')
        self.convert_bn(model.conv0_bn, 'conv0_bn/gamma', 'conv0_bn/beta',
                        'conv0_bn/moving_mean', 'conv0_bn/moving_variance')
        self.convert_cell(model.stem1, 'cell_stem_0/')
        self.convert_cell(model.stem2, 'cell_stem_1/')

        for i in range(12):
            self.convert_cell(model.cells[i], 'cell_{}/'.format(i))

        self.convert_fc(model.classifier, 'final_layer/FC/weights',
                        'final_layer/FC/biases')

        print('Conversion complete!')
        print('Check 1: whether all TF variables are used...')
        assert len(self.weight_dict) == len(self.used_keys)
        print('Pass!')

        model = model.cuda()
        image = self.tf_image_proc.transpose((2, 0, 1))
        image = Variable(self.Tensor(image)).cuda()
        logits, _ = model(image.unsqueeze(0))
        self.pytorch_logits = logits.data.cpu().numpy()

        print('Check 2: whether logits have small diff...')
        assert np.max(np.abs(self.tf_logits - self.pytorch_logits)) < 1e-5
        print('Pass!')

        model_path = 'data/PNASNet-5_Large.pth'
        torch.save(model.state_dict(), model_path)
        print('PyTorch model saved to {}'.format(model_path))
예제 #13
0
def main():
    if not torch.cuda.is_available():
        print('No GPU device available')
        sys.exit(1)

    ## step 1 construct the selected network
    genotype = eval("genotypes.%s" % args.selected_arch)
    CLASSES = 1000
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)

    num_gpus = torch.cuda.device_count()
    if num_gpus > 1:
        model = nn.DataParallel(model)
        model = model.cuda()
    else:
        model = model.cuda()

    ## step 2 load pretrained model parameter
    model_CKPT = torch.load(args.model_path)
    model.load_state_dict(model_CKPT['state_dict'])
    model.module.drop_path_prob = 0
    model.drop_path_prob = 0
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    ## step 3 load test data
    valid_queue = load_data_cifar(args)

    ## step 4. inference on test data
    valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model,
                                                      criterion)
    print('-----------------------------------------------')
    print('Valid_acc_top1: %f,  Valid_acc_top5: %f' %
          (valid_acc_top1, valid_acc_top5))
    print('-----------------------------------------------')
예제 #14
0
def main():
    # Scale learning rate based on global batch size.
    if not args.no_scale_lr:
        scale = float(args.batch_size * args.world_size) / 128.0
        args.learning_rate = scale * args.learning_rate

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info('args = %s', args)

    # Get data loaders.
    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.4,
                               contrast=0.4,
                               saturation=0.4,
                               hue=0.2),
        transforms.ToTensor(),
        normalize,
    ])
    val_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ])

    if 'lmdb' in args.data:
        train_data = imagenet_lmdb_dataset(traindir, transform=train_transform)
        valid_data = imagenet_lmdb_dataset(validdir, transform=val_transform)
    else:
        train_data = dset.ImageFolder(traindir, transform=train_transform)
        valid_data = dset.ImageFolder(validdir, transform=val_transform)

    train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=8,
                                              sampler=train_sampler)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=8)

    # Set up the network.
    if os.path.isfile(args.genotype):
        logging.info('Loading genotype from: %s' % args.genotype)
        genotype = torch.load(args.genotype, map_location='cpu')
    else:
        genotype = eval('genotypes.%s' % args.genotype)
    if not isinstance(genotype, list):
        genotype = [genotype]

    # If num channels not provided, find the max under 600M MAdds.
    if args.init_channels < 0:
        if args.local_rank == 0:
            flops, num_params, init_channels = find_max_channels(
                genotype, args.layers, args.cell_type, args.max_M_flops * 1e6)
            logging.info('Num flops = %.2fM', flops / 1e6)
            logging.info('Num params = %.2fM', num_params / 1e6)
        else:
            init_channels = 0
        # All reduce with world_size 1 is sum.
        init_channels = torch.Tensor([init_channels]).cuda()
        init_channels = utils.reduce_tensor(init_channels, 1)
        args.init_channels = int(init_channels.item())
    logging.info('Num channels = %d', args.init_channels)

    # Create model and loss.
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype, args.cell_type)
    model = model.cuda()
    model = DDP(model, delay_allreduce=True)

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()
    logging.info('param size = %fM', utils.count_parameters_in_M(model))

    # Set up network weights optimizer.
    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    if args.lr_scheduler == 'exp':
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    args.decay_period,
                                                    gamma=args.gamma)
    elif args.lr_scheduler == 'cosine':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, float(args.epochs), eta_min=args.min_learning_rate)

    # Train.
    global_step = 0
    best_acc_top1 = 0
    for epoch in range(args.epochs):
        # Shuffle the sampler, update lrs.
        train_queue.sampler.set_epoch(epoch + args.seed)
        # Change lr.
        if epoch >= args.warmup_epochs:
            scheduler.step()
        model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        # Training.
        train_acc, train_obj, global_step = train(train_queue, model,
                                                  criterion_smooth, optimizer,
                                                  epoch, args.learning_rate,
                                                  args.warmup_epochs,
                                                  global_step)
        logging.info('train_acc %f', train_acc)
        writer.add_scalar('train/acc', train_acc, global_step)

        # Validation.
        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)
        writer.add_scalar('val/acc_top1', valid_acc_top1, global_step)
        writer.add_scalar('val/acc_top5', valid_acc_top5, global_step)
        writer.add_scalar('val/loss', valid_obj, global_step)

        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        if args.local_rank == 0:
            utils.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_acc_top1': best_acc_top1,
                    'optimizer': optimizer.state_dict(),
                }, is_best, args.save)
예제 #15
0
def main():
    args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
    utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))

    log_format = '%(asctime)s %(message)s'
    logging.basicConfig(stream=sys.stdout,
                        level=logging.INFO,
                        format=log_format,
                        datefmt='%m/%d %I:%M:%S %p')
    fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)

    CLASSES = 1000

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    if args.parallel:
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=4)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=4)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                args.decay_period,
                                                gamma=args.gamma)

    best_acc_top1 = 0
    for epoch in range(args.epochs):
        lr = scheduler.get_last_lr()[0]
        logging.info('epoch %d lr %e', epoch, lr)

        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        # training
        train_acc, train_obj = train(train_queue, model, criterion_smooth,
                                     optimizer)
        logging.info('train_acc %f', train_acc)

        # validation
        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)

        # スケジューラの更新
        scheduler.step()

        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)
예제 #16
0
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU device available')
        sys.exit(1)
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    torch.cuda.set_device(initGpu)

    logging.info("args = %s", args)
    logging.info("unparsed_args = %s", unparsed)
    num_gpus = torch.cuda.device_count()
    genotype = eval("genotypes.%s" % args.arch)
    print('---------Genotype---------')
    logging.info(genotype)
    print('--------------------------')
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    #start_epochs = 0

    if num_gpus > 1:
        model = nn.DataParallel(model, device_ids)
        model = model.cuda()
    else:
        model = model.cuda()
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
    if (not args.resume_path == ''):
        state = utils.load_checkpoint(args.resume_path)
        #start_epochs = state[epoch]
        model.load_state_dict(state['state_dict'])

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    data_dir = args.tmp_data_dir
    traindir = os.path.join(data_dir, 'train')
    validdir = os.path.join(data_dir, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=args.workers)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=args.workers)

    #    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))
    best_acc_top1 = 0
    best_acc_top5 = 0
    for epoch in range(args.epochs):
        if args.lr_scheduler == 'cosine':
            scheduler.step()
            current_lr = scheduler.get_lr()[0]
        elif args.lr_scheduler == 'linear':
            current_lr = adjust_lr(optimizer, epoch)
        else:
            print('Wrong lr type, exit')
            sys.exit(1)
        logging.info('Epoch: %d lr %e', epoch, current_lr)
        if epoch < 5 and args.batch_size > 256:
            for param_group in optimizer.param_groups:
                param_group['lr'] = current_lr * (epoch + 1) / 5.0
            logging.info('Warming-up Epoch: %d, LR: %e', epoch,
                         current_lr * (epoch + 1) / 5.0)
        if num_gpus > 1:
            model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        else:
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        epoch_start = time.time()
        train_acc, train_obj = train(train_queue, model, criterion_smooth,
                                     optimizer)
        logging.info('Train_acc: %f', train_acc)

        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        logging.info('Valid_acc_top1: %f', valid_acc_top1)
        logging.info('Valid_acc_top5: %f', valid_acc_top5)
        epoch_duration = time.time() - epoch_start
        logging.info('Epoch time: %ds.', epoch_duration)
        is_best = False
        if valid_acc_top5 > best_acc_top5:
            best_acc_top5 = valid_acc_top5
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)
예제 #17
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)
  #6/28 04:21:06 PM args = Namespace(arch='DARTS', auxiliary=False, auxiliary_weight=0.4, batch_size=128, data='../data/imagenet/', decay_period=1, drop_path_prob=0, epochs=250, gamma=0.97, gpu=0, grad_clip=5.0, init_channels=48, label_smooth=0.1, layers=14, learning_rate=0.1, momentum=0.9, parallel=False, report_freq=100, save='eval-EXP-20190628-162106', seed=0, weight_decay=3e-05)

  genotype = eval("genotypes.%s" % args.arch)
  logging.info('genotype = %s', genotype)
  #06/28 04:21:06 PM genotype = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])

  model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
  #model = Network(args.init_channels=48, CLASSES=1000, args.layers=14, args.auxiliary, genotype)
  if args.parallel:
    model = nn.DataParallel(model).cuda()
  else:
    model = model.cuda()

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
  #06/28 04:21:08 PM param size = 4.718752MB

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
  criterion_smooth = criterion_smooth.cuda()

  optimizer = torch.optim.SGD(
    model.parameters(),
    args.learning_rate,
    momentum=args.momentum,
    weight_decay=args.weight_decay
    )

  traindir = os.path.join(args.data, 'train')
  validdir = os.path.join(args.data, 'val')
  normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  train_data = dset.ImageFolder(
    traindir,
    transforms.Compose([
      transforms.RandomResizedCrop(224),
      transforms.RandomHorizontalFlip(),
      transforms.ColorJitter(
        brightness=0.4,
        contrast=0.4,
        saturation=0.4,
        hue=0.2),
      transforms.ToTensor(),
      normalize,
    ]))
  valid_data = dset.ImageFolder(
    validdir,
    transforms.Compose([
      transforms.Resize(256),
      transforms.CenterCrop(224),
      transforms.ToTensor(),
      normalize,
    ]))

  train_queue = torch.utils.data.DataLoader(
    train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)

  valid_queue = torch.utils.data.DataLoader(
    valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)

  scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)

  best_acc_top1 = 0
  for epoch in range(args.epochs):
    scheduler.step()
    logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
    model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

    train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer)
    logging.info('train_acc %f', train_acc)

    valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
    logging.info('valid_acc_top1 %f', valid_acc_top1)
    logging.info('valid_acc_top5 %f', valid_acc_top5)

    is_best = False
    if valid_acc_top1 > best_acc_top1:
      best_acc_top1 = valid_acc_top1
      is_best = True

    utils.save_checkpoint({
      'epoch': epoch + 1,
      'state_dict': model.state_dict(),
      'best_acc_top1': best_acc_top1,
      'optimizer' : optimizer.state_dict(),
      }, is_best, args.save)
예제 #18
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    if args.parallel:  # multi gpu
        num_gpus = torch.cuda.device_count()
        logging.info('num of gpu devices = %d' % num_gpus)
    else:  # single gpu
        torch.cuda.set_device(args.gpu)
        logging.info('gpu device = %d' % args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    if args.parallel:
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=4)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=4)

    if args.lr_scheduler == 'step':
        # DARTS code
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    args.decay_period,
                                                    gamma=args.gamma)
    elif args.lr_scheduler == 'cosine' or args.lr_scheduler == 'linear':
        # PCDARTS code
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, float(args.epochs))
    else:
        raise ValueError("Wrong learning rate scheduler")

    # ---- resume ---- #
    start_epoch = 0
    best_acc_top1 = 0.0
    best_acc_top5 = 0.0
    best_acc_epoch = 0
    if args.resume:
        # in multi-gpu???
        if os.path.isfile(args.resume):
            logging.info("=> loading checkpoint {}".format(args.resume))
            device = torch.device("cuda")
            checkpoint = torch.load(args.resume, map_location=device)
            start_epoch = checkpoint['epoch']
            best_acc_top1 = checkpoint['best_acc_top1']
            best_acc_top5 = checkpoint['best_acc_top5']
            best_acc_epoch = checkpoint['best_acc_epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            logging.info(
                "=> loaded checkpoint {} (trained until epoch {})".format(
                    args.resume, start_epoch - 1))
        else:
            raise ValueError("Wrong args.resume")
    else:
        logging.info("=> training from scratch")

    for epoch in range(start_epoch, args.epochs):
        scheduler.step()
        if args.lr_scheduler == 'cosine' or args.lr_scheduler == 'step':
            scheduler.step()
            current_lr = scheduler.get_lr()[0]
        elif args.lr_scheduler == 'linear':
            current_lr = adjust_lr(optimizer, epoch)

        if epoch < 5 and args.batch_size > 256:
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.learning_rate * (epoch + 1) / 5.0
                logging.info('Warming-up epoch: %d, LR: %e', epoch,
                             lr * (epoch + 1) / 5.0)
        else:
            logging.info('epoch %d lr %e', epoch, current_lr)

        if args.parallel:
            model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        else:
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        epoch_start = time.time()
        train_acc, train_obj = train(train_queue, model, criterion_smooth,
                                     optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        is_best = (valid_acc_top1 > best_acc_top1)
        if is_best:
            best_acc_top1 = valid_acc_top1
            best_acc_top5 = valid_acc_top5
            best_acc_epoch = epoch + 1
            utils.save(model, os.path.join(args.save, 'best_weights.pt'))
        logging.info('valid_acc %f %f, best_acc %f %f (at epoch %d)',
                     valid_acc_top1, valid_acc_top5, best_acc_top1,
                     best_acc_top5, best_acc_epoch)
        logging.info('epoch time %d sec.', time.time() - epoch_start)

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'best_acc_top1': best_acc_top1,
                'best_acc_top5': best_acc_top5,
                'best_acc_epoch': best_acc_epoch,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)

    utils.save(model, os.path.join(args.save, 'weights.pt'))
예제 #19
0
def main():
    global best_top1, args, logger

    args.distributed = False
    if 'WORLD_SIZE' in os.environ:
        args.distributed = int(os.environ['WORLD_SIZE']) > 1

    # commented because it is now set as an argparse param.
    # args.gpu = 0
    args.world_size = 1

    if args.distributed:
        args.gpu = args.local_rank % torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        args.world_size = torch.distributed.get_world_size()

    # note the gpu is used for directory creation and log files
    # which is needed when run as multiple processes
    args = utils.initialize_files_and_args(args)
    logger = utils.logging_setup(args.log_file_path)

    if args.fp16:
        assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."

    if args.static_loss_scale != 1.0:
        if not args.fp16:
            logger.info(
                "Warning:  if --fp16 is not used, static_loss_scale will be ignored."
            )

    # # load the correct ops dictionary
    op_dict_to_load = "operations.%s" % args.ops
    logger.info('loading op dict: ' + str(op_dict_to_load))
    op_dict = eval(op_dict_to_load)

    # load the correct primitives list
    primitives_to_load = "genotypes.%s" % args.primitives
    logger.info('loading primitives:' + primitives_to_load)
    primitives = eval(primitives_to_load)
    logger.info('primitives: ' + str(primitives))
    # create model
    genotype = eval("genotypes.%s" % args.arch)
    # get the number of output channels
    classes = dataset.class_dict[args.dataset]
    # create the neural network
    if args.dataset == 'imagenet':
        model = NetworkImageNet(args.init_channels,
                                classes,
                                args.layers,
                                args.auxiliary,
                                genotype,
                                op_dict=op_dict,
                                C_mid=args.mid_channels)
        flops_shape = [1, 3, 224, 224]
    else:
        model = NetworkCIFAR(args.init_channels,
                             classes,
                             args.layers,
                             args.auxiliary,
                             genotype,
                             op_dict=op_dict,
                             C_mid=args.mid_channels)
        flops_shape = [1, 3, 32, 32]
    model.drop_path_prob = 0.0
    # if args.pretrained:
    #     logger.info("=> using pre-trained model '{}'".format(args.arch))
    #     model = models.__dict__[args.arch](pretrained=True)
    # else:
    #     logger.info("=> creating model '{}'".format(args.arch))
    #     model = models.__dict__[args.arch]()

    if args.flops:
        model = model.cuda()
        logger.info("param size = %fMB", utils.count_parameters_in_MB(model))
        logger.info("flops_shape = " + str(flops_shape))
        logger.info("flops = " +
                    utils.count_model_flops(model, data_shape=flops_shape))
        return

    if args.sync_bn:
        import apex
        logger.info("using apex synced BN")
        model = apex.parallel.convert_syncbn_model(model)

    model = model.cuda()
    if args.fp16:
        model = network_to_half(model)
    if args.distributed:
        # By default, apex.parallel.DistributedDataParallel overlaps communication with
        # computation in the backward pass.
        # model = DDP(model)
        # delay_allreduce delays all communication to the end of the backward pass.
        model = DDP(model, delay_allreduce=True)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    # Scale learning rate based on global batch size
    args.learning_rate = args.learning_rate * float(
        args.batch_size * args.world_size) / 256.
    init_lr = args.learning_rate / args.warmup_lr_divisor
    optimizer = torch.optim.SGD(model.parameters(),
                                init_lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # epoch_count = args.epochs - args.start_epoch
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(epoch_count))
    # scheduler = warmup_scheduler.GradualWarmupScheduler(
    #     optimizer, args.warmup_lr_divisor, args.warmup_epochs, scheduler)

    if args.fp16:
        optimizer = FP16_Optimizer(optimizer,
                                   static_loss_scale=args.static_loss_scale,
                                   dynamic_loss_scale=args.dynamic_loss_scale)

    # Optionally resume from a checkpoint
    if args.resume or args.evaluate:
        if args.evaluate:
            args.resume = args.evaluate
        # Use a local scope to avoid dangling references
        def resume():
            if os.path.isfile(args.resume):
                logger.info("=> loading checkpoint '{}'".format(args.resume))
                checkpoint = torch.load(
                    args.resume,
                    map_location=lambda storage, loc: storage.cuda(args.gpu))
                args.start_epoch = checkpoint['epoch']
                if 'best_top1' in checkpoint:
                    best_top1 = checkpoint['best_top1']
                model.load_state_dict(checkpoint['state_dict'])
                # An FP16_Optimizer instance's state dict internally stashes the master params.
                optimizer.load_state_dict(checkpoint['optimizer'])
                # TODO(ahundt) make sure scheduler loading isn't broken
                if 'lr_scheduler' in checkpoint:
                    scheduler.load_state_dict(checkpoint['lr_scheduler'])
                elif 'lr_schedule' in checkpoint:
                    lr_schedule = checkpoint['lr_schedule']
                logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']))
            else:
                logger.info("=> no checkpoint found at '{}'".format(
                    args.resume))

        resume()

    # # Data loading code
    # traindir = os.path.join(args.data, 'train')
    # valdir = os.path.join(args.data, 'val')

    # if(args.arch == "inception_v3"):
    #     crop_size = 299
    #     val_size = 320 # I chose this value arbitrarily, we can adjust.
    # else:
    #     crop_size = 224
    #     val_size = 256

    # train_dataset = datasets.ImageFolder(
    #     traindir,
    #     transforms.Compose([
    #         transforms.RandomResizedCrop(crop_size),
    #         transforms.RandomHorizontalFlip(),
    #         autoaugment.ImageNetPolicy(),
    #         # transforms.ToTensor(),  # Too slow, moved to data_prefetcher()
    #         # normalize,
    #     ]))
    # val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
    #         transforms.Resize(val_size),
    #         transforms.CenterCrop(crop_size)
    #     ]))

    # train_sampler = None
    # val_sampler = None
    # if args.distributed:
    #     train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    #     val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)

    # train_loader = torch.utils.data.DataLoader(
    #     train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
    #     num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)

    # val_loader = torch.utils.data.DataLoader(
    #     val_dataset,
    #     batch_size=args.batch_size, shuffle=False,
    #     num_workers=args.workers, pin_memory=True,
    #     sampler=val_sampler,
    #     collate_fn=fast_collate)

    # Get preprocessing functions (i.e. transforms) to apply on data
    # normalize_as_tensor = False because we normalize and convert to a
    # tensor in our custom prefetching function, rather than as part of
    # the transform preprocessing list.
    train_transform, valid_transform = utils.get_data_transforms(
        args, normalize_as_tensor=False)
    # Get the training queue, select training and validation from training set
    train_loader, val_loader = dataset.get_training_queues(
        args.dataset,
        train_transform,
        valid_transform,
        args.data,
        args.batch_size,
        train_proportion=1.0,
        collate_fn=fast_collate,
        distributed=args.distributed,
        num_workers=args.workers)

    if args.evaluate:
        if args.dataset == 'cifar10':
            # evaluate best model weights on cifar 10.1
            # https://github.com/modestyachts/CIFAR-10.1
            train_transform, valid_transform = utils.get_data_transforms(args)
            # Get the training queue, select training and validation from training set
            # Get the training queue, use full training and test set
            train_queue, valid_queue = dataset.get_training_queues(
                args.dataset,
                train_transform,
                valid_transform,
                args.data,
                args.batch_size,
                train_proportion=1.0,
                search_architecture=False)
            test_data = cifar10_1.CIFAR10_1(root=args.data,
                                            download=True,
                                            transform=valid_transform)
            test_queue = torch.utils.data.DataLoader(
                test_data,
                batch_size=args.batch_size,
                shuffle=False,
                pin_memory=True,
                num_workers=args.workers)
            eval_stats = evaluate(args,
                                  model,
                                  criterion,
                                  train_queue=train_queue,
                                  valid_queue=valid_queue,
                                  test_queue=test_queue)
            with open(args.stats_file, 'w') as f:
                # TODO(ahundt) fix "TypeError: 1869 is not JSON serializable" to include arg info, see train.py
                # arg_dict = vars(args)
                # arg_dict.update(eval_stats)
                # json.dump(arg_dict, f)
                json.dump(eval_stats, f)
            logger.info("flops = " + utils.count_model_flops(model))
            logger.info(utils.dict_to_log_string(eval_stats))
            logger.info('\nEvaluation of Loaded Model Complete! Save dir: ' +
                        str(args.save))
        else:
            validate(val_loader, model, criterion, args)
        return

    lr_schedule = cosine_power_annealing(
        epochs=args.epochs,
        max_lr=args.learning_rate,
        min_lr=args.learning_rate_min,
        warmup_epochs=args.warmup_epochs,
        exponent_order=args.lr_power_annealing_exponent_order,
        restart_lr=args.restart_lr)
    epochs = np.arange(args.epochs) + args.start_epoch

    stats_csv = args.epoch_stats_file
    stats_csv = stats_csv.replace('.json', '.csv')
    with tqdm(epochs,
              dynamic_ncols=True,
              disable=args.local_rank != 0,
              leave=False) as prog_epoch:
        best_stats = {}
        stats = {}
        epoch_stats = []
        best_epoch = 0
        for epoch, learning_rate in zip(prog_epoch, lr_schedule):
            if args.distributed and train_loader.sampler is not None:
                train_loader.sampler.set_epoch(int(epoch))
            # if args.distributed:
            # train_sampler.set_epoch(epoch)
            # update the learning rate
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate
            # scheduler.step()
            model.drop_path_prob = args.drop_path_prob * float(epoch) / float(
                args.epochs)
            # train for one epoch
            train_stats = train(train_loader, model, criterion, optimizer,
                                int(epoch), args)
            if args.prof:
                break
            # evaluate on validation set
            top1, val_stats = validate(val_loader, model, criterion, args)
            stats.update(train_stats)
            stats.update(val_stats)
            # stats['lr'] = '{0:.5f}'.format(scheduler.get_lr()[0])
            stats['lr'] = '{0:.5f}'.format(learning_rate)
            stats['epoch'] = epoch

            # remember best top1 and save checkpoint
            if args.local_rank == 0:
                is_best = top1 > best_top1
                best_top1 = max(top1, best_top1)
                stats['best_top1'] = '{0:.3f}'.format(best_top1)
                if is_best:
                    best_epoch = epoch
                    best_stats = copy.deepcopy(stats)
                stats['best_epoch'] = best_epoch

                stats_str = utils.dict_to_log_string(stats)
                logger.info(stats_str)
                save_checkpoint(
                    {
                        'epoch': epoch,
                        'arch': args.arch,
                        'state_dict': model.state_dict(),
                        'best_top1': best_top1,
                        'optimizer': optimizer.state_dict(),
                        # 'lr_scheduler': scheduler.state_dict()
                        'lr_schedule': lr_schedule,
                        'stats': best_stats
                    },
                    is_best,
                    path=args.save)
                prog_epoch.set_description(
                    'Overview ***** best_epoch: {0} best_valid_top1: {1:.2f} ***** Progress'
                    .format(best_epoch, best_top1))
            epoch_stats += [copy.deepcopy(stats)]
            with open(args.epoch_stats_file, 'w') as f:
                json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
            utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
        stats_str = utils.dict_to_log_string(best_stats, key_prepend='best_')
        logger.info(stats_str)
        with open(args.stats_file, 'w') as f:
            arg_dict = vars(args)
            arg_dict.update(best_stats)
            json.dump(arg_dict, f, cls=utils.NumpyEncoder)
        with open(args.epoch_stats_file, 'w') as f:
            json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
        utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
        logger.info('Training of Final Model Complete! Save dir: ' +
                    str(args.save))
예제 #20
0
def main():
  parser = argparse.ArgumentParser("Common Argument Parser")
  parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
  parser.add_argument('--dataset', type=str, default='cifar10', help='which dataset:\
                      cifar10, mnist, emnist, fashion, svhn, stl10, devanagari')
  parser.add_argument('--batch_size', type=int, default=64, help='batch size')
  parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
  parser.add_argument('--learning_rate_min', type=float, default=1e-8, help='min learning rate')
  parser.add_argument('--lr_power_annealing_exponent_order', type=float, default=2,
                      help='Cosine Power Annealing Schedule Base, larger numbers make '
                           'the exponential more dominant, smaller make cosine more dominant, '
                           '1 returns to standard cosine annealing.')
  parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
  parser.add_argument('--weight_decay', '--wd', dest='weight_decay', type=float, default=3e-4, help='weight decay')
  parser.add_argument('--partial', default=1/8, type=float, help='partially adaptive parameter p in Padam')
  parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
  parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
  parser.add_argument('--epochs', type=int, default=2000, help='num of training epochs')
  parser.add_argument('--start_epoch', default=1, type=int, metavar='N',
                      help='manual epoch number (useful for restarts)')
  parser.add_argument('--warmup_epochs', type=int, default=5, help='num of warmup training epochs')
  parser.add_argument('--warm_restarts', type=int, default=20, help='warm restarts of cosine annealing')
  parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
  parser.add_argument('--mid_channels', type=int, default=32, help='C_mid channels in choke SharpSepConv')
  parser.add_argument('--layers', type=int, default=20, help='total number of layers')
  parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
  parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
  parser.add_argument('--mixed_auxiliary', action='store_true', default=False, help='Learn weights for auxiliary networks during training. Overrides auxiliary flag')
  parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
  parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
  parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
  parser.add_argument('--autoaugment', action='store_true', default=False, help='use cifar10 autoaugment https://arxiv.org/abs/1805.09501')
  parser.add_argument('--random_eraser', action='store_true', default=False, help='use random eraser')
  parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
  parser.add_argument('--save', type=str, default='EXP', help='experiment name')
  parser.add_argument('--seed', type=int, default=0, help='random seed')
  parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
  parser.add_argument('--ops', type=str, default='OPS', help='which operations to use, options are OPS and DARTS_OPS')
  parser.add_argument('--primitives', type=str, default='PRIMITIVES',
                      help='which primitive layers to use inside a cell search space,'
                           ' options are PRIMITIVES, SHARPER_PRIMITIVES, and DARTS_PRIMITIVES')
  parser.add_argument('--optimizer', type=str, default='sgd', help='which optimizer to use, options are padam and sgd')
  parser.add_argument('--load', type=str, default='',  metavar='PATH', help='load weights at specified location')
  parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
  parser.add_argument('--flops', action='store_true', default=False, help='count flops and exit, aka floating point operations.')
  parser.add_argument('-e', '--evaluate', dest='evaluate', type=str, metavar='PATH', default='',
                      help='evaluate model at specified path on training, test, and validation datasets')
  parser.add_argument('--multi_channel', action='store_true', default=False, help='perform multi channel search, a completely separate search space')
  parser.add_argument('--load_args', type=str, default='',  metavar='PATH',
                      help='load command line args from a json file, this will override '
                           'all currently set args except for --evaluate, and arguments '
                           'that did not exist when the json file was originally saved out.')
  parser.add_argument('--layers_of_cells', type=int, default=8, help='total number of cells in the whole network, default is 8 cells')
  parser.add_argument('--layers_in_cells', type=int, default=4,
                      help='Total number of nodes in each cell, aka number of steps,'
                           ' default is 4 nodes, which implies 8 ops')
  parser.add_argument('--weighting_algorithm', type=str, default='scalar',
                    help='which operations to use, options are '
                         '"max_w" (1. - max_w + w) * op, and scalar (w * op)')
  # TODO(ahundt) remove final path and switch back to genotype
  parser.add_argument('--load_genotype', type=str, default=None, help='Name of genotype to be used')
  parser.add_argument('--simple_path', default=True, action='store_false', help='Final model is a simple path (MultiChannelNetworkModel)')
  args = parser.parse_args()

  args = utils.initialize_files_and_args(args)

  logger = utils.logging_setup(args.log_file_path)

  if not torch.cuda.is_available():
    logger.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logger.info('gpu device = %d' % args.gpu)
  logger.info("args = %s", args)

  DATASET_CLASSES = dataset.class_dict[args.dataset]
  DATASET_CHANNELS = dataset.inp_channel_dict[args.dataset]
  DATASET_MEAN = dataset.mean_dict[args.dataset]
  DATASET_STD = dataset.std_dict[args.dataset]
  logger.info('output channels: ' + str(DATASET_CLASSES))

  # # load the correct ops dictionary
  op_dict_to_load = "operations.%s" % args.ops
  logger.info('loading op dict: ' + str(op_dict_to_load))
  op_dict = eval(op_dict_to_load)

  # load the correct primitives list
  primitives_to_load = "genotypes.%s" % args.primitives
  logger.info('loading primitives:' + primitives_to_load)
  primitives = eval(primitives_to_load)
  logger.info('primitives: ' + str(primitives))

  genotype = eval("genotypes.%s" % args.arch)
  # create the neural network

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  if args.multi_channel:
    final_path = None
    if args.load_genotype is not None:
      genotype = getattr(genotypes, args.load_genotype)
      print(genotype)
      if type(genotype[0]) is str:
        logger.info('Path :%s', genotype)
    # TODO(ahundt) remove final path and switch back to genotype
    cnn_model = MultiChannelNetwork(
      args.init_channels, DATASET_CLASSES, layers=args.layers_of_cells, criterion=criterion, steps=args.layers_in_cells,
      weighting_algorithm=args.weighting_algorithm, genotype=genotype)
    flops_shape = [1, 3, 32, 32]
  elif args.dataset == 'imagenet':
      cnn_model = NetworkImageNet(args.init_channels, DATASET_CLASSES, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
      flops_shape = [1, 3, 224, 224]
  else:
      cnn_model = NetworkCIFAR(args.init_channels, DATASET_CLASSES, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
      flops_shape = [1, 3, 32, 32]
  cnn_model = cnn_model.cuda()

  logger.info("param size = %fMB", utils.count_parameters_in_MB(cnn_model))
  if args.flops:
    logger.info('flops_shape = ' + str(flops_shape))
    logger.info("flops = " + utils.count_model_flops(cnn_model, data_shape=flops_shape))
    return

  optimizer = torch.optim.SGD(
      cnn_model.parameters(),
      args.learning_rate,
      momentum=args.momentum,
      weight_decay=args.weight_decay
      )

  # Get preprocessing functions (i.e. transforms) to apply on data
  train_transform, valid_transform = utils.get_data_transforms(args)
  if args.evaluate:
    # evaluate the train dataset without augmentation
    train_transform = valid_transform

  # Get the training queue, use full training and test set
  train_queue, valid_queue = dataset.get_training_queues(
    args.dataset, train_transform, valid_transform, args.data, args.batch_size, train_proportion=1.0, search_architecture=False)

  test_queue = None
  if args.dataset == 'cifar10':
    # evaluate best model weights on cifar 10.1
    # https://github.com/modestyachts/CIFAR-10.1
    test_data = cifar10_1.CIFAR10_1(root=args.data, download=True, transform=valid_transform)
    test_queue = torch.utils.data.DataLoader(
      test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8)

  if args.evaluate:
    # evaluate the loaded model, print the result, and return
    logger.info("Evaluating inference with weights file: " + args.load)
    eval_stats = evaluate(
      args, cnn_model, criterion, args.load,
      train_queue=train_queue, valid_queue=valid_queue, test_queue=test_queue)
    with open(args.stats_file, 'w') as f:
      arg_dict = vars(args)
      arg_dict.update(eval_stats)
      json.dump(arg_dict, f)
    logger.info("flops = " + utils.count_model_flops(cnn_model))
    logger.info(utils.dict_to_log_string(eval_stats))
    logger.info('\nEvaluation of Loaded Model Complete! Save dir: ' + str(args.save))
    return

  lr_schedule = cosine_power_annealing(
    epochs=args.epochs, max_lr=args.learning_rate, min_lr=args.learning_rate_min,
    warmup_epochs=args.warmup_epochs, exponent_order=args.lr_power_annealing_exponent_order)
  epochs = np.arange(args.epochs) + args.start_epoch
  # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
  epoch_stats = []

  stats_csv = args.epoch_stats_file
  stats_csv = stats_csv.replace('.json', '.csv')
  with tqdm(epochs, dynamic_ncols=True) as prog_epoch:
    best_valid_acc = 0.0
    best_epoch = 0
    best_stats = {}
    stats = {}
    epoch_stats = []
    weights_file = os.path.join(args.save, 'weights.pt')
    for epoch, learning_rate in zip(prog_epoch, lr_schedule):
      # update the drop_path_prob augmentation
      cnn_model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
      # update the learning rate
      for param_group in optimizer.param_groups:
        param_group['lr'] = learning_rate
      # scheduler.get_lr()[0]

      train_acc, train_obj = train(args, train_queue, cnn_model, criterion, optimizer)

      val_stats = infer(args, valid_queue, cnn_model, criterion)
      stats.update(val_stats)
      stats['train_acc'] = train_acc
      stats['train_loss'] = train_obj
      stats['lr'] = learning_rate
      stats['epoch'] = epoch

      if stats['valid_acc'] > best_valid_acc:
        # new best epoch, save weights
        utils.save(cnn_model, weights_file)
        best_epoch = epoch
        best_stats.update(copy.deepcopy(stats))
        best_valid_acc = stats['valid_acc']
        best_train_loss = train_obj
        best_train_acc = train_acc
      # else:
      #   # not best epoch, load best weights
      #   utils.load(cnn_model, weights_file)
      logger.info('epoch, %d, train_acc, %f, valid_acc, %f, train_loss, %f, valid_loss, %f, lr, %e, best_epoch, %d, best_valid_acc, %f, ' + utils.dict_to_log_string(stats),
                  epoch, train_acc, stats['valid_acc'], train_obj, stats['valid_loss'], learning_rate, best_epoch, best_valid_acc)
      stats['train_acc'] = train_acc
      stats['train_loss'] = train_obj
      epoch_stats += [copy.deepcopy(stats)]
      with open(args.epoch_stats_file, 'w') as f:
        json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
      utils.list_of_dicts_to_csv(stats_csv, epoch_stats)

    # get stats from best epoch including cifar10.1
    eval_stats = evaluate(args, cnn_model, criterion, weights_file, train_queue, valid_queue, test_queue)
    with open(args.stats_file, 'w') as f:
      arg_dict = vars(args)
      arg_dict.update(eval_stats)
      json.dump(arg_dict, f, cls=utils.NumpyEncoder)
    with open(args.epoch_stats_file, 'w') as f:
      json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
    logger.info(utils.dict_to_log_string(eval_stats))
    logger.info('Training of Final Model Complete! Save dir: ' + str(args.save))
예제 #21
0
                             transforms.Resize(256),
                             transforms.CenterCrop(224),
                             transforms.ToTensor(),
                             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                  std=[0.229, 0.224, 0.225])
                         ]))
val_sampler = torch.utils.data.distributed.DistributedSampler(
    val_dataset, num_replicas=hvd.size(), rank=hvd.rank())
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=args.val_batch_size,
                                         sampler=val_sampler,
                                         **kwargs)

# Set up standard ResNet-50 model.
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                genotype)
# model = models.resnet50()

if args.cuda:
    # Move model to GPU.
    model.cuda()

#model load
# model_path = "./model_imagenet_20190112/checkpoint-60.pth"
# model.load_state_dict(torch.load(model_path))

criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
예제 #22
0
def main():
  if not torch.cuda.is_available():
    logger.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logger.info('gpu device = %d' % args.gpu)
  logger.info("args = %s", args)

  # # load the correct ops dictionary
  op_dict_to_load = "operations.%s" % args.ops
  logger.info('loading op dict: ' + str(op_dict_to_load))
  op_dict = eval(op_dict_to_load)

  # load the correct primitives list
  primitives_to_load = "genotypes.%s" % args.primitives
  logger.info('loading primitives:' + primitives_to_load)
  primitives = eval(primitives_to_load)
  logger.info('primitives: ' + str(primitives))

  genotype = eval("genotypes.%s" % args.arch)
  cnn_model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
  if args.parallel:
    cnn_model = nn.DataParallel(cnn_model).cuda()
  else:
    cnn_model = cnn_model.cuda()

  logger.info("param size = %fMB", utils.count_parameters_in_MB(cnn_model))
  if args.flops:
    cnn_model.drop_path_prob = 0.0
    logger.info("flops = " + utils.count_model_flops(cnn_model, data_shape=[1, 3, 224, 224]))
    exit(1)

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
  criterion_smooth = criterion_smooth.cuda()

  optimizer = torch.optim.SGD(
    cnn_model.parameters(),
    args.learning_rate,
    momentum=args.momentum,
    weight_decay=args.weight_decay
    )

  traindir = os.path.join(args.data, 'train')
  validdir = os.path.join(args.data, 'val')
  normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  train_data = dset.ImageFolder(
    traindir,
    transforms.Compose([
      transforms.RandomResizedCrop(224),
      transforms.RandomHorizontalFlip(),
      autoaugment.ImageNetPolicy(),
      # transforms.ColorJitter(
      #   brightness=0.4,
      #   contrast=0.4,
      #   saturation=0.4,
      #   hue=0.2),
      transforms.ToTensor(),
      normalize,
    ]))
  valid_data = dset.ImageFolder(
    validdir,
    transforms.Compose([
      transforms.Resize(256),
      transforms.CenterCrop(224),
      transforms.ToTensor(),
      normalize,
    ]))

  train_queue = torch.utils.data.DataLoader(
    train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=8)

  valid_queue = torch.utils.data.DataLoader(
    valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8)

  scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)

  prog_epoch = tqdm(range(args.epochs), dynamic_ncols=True)
  best_valid_acc = 0.0
  best_epoch = 0
  best_stats = {}
  best_acc_top1 = 0
  weights_file = os.path.join(args.save, 'weights.pt')
  for epoch in prog_epoch:
    scheduler.step()
    cnn_model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

    train_acc, train_obj = train.train(args, train_queue, cnn_model, criterion, optimizer)
    stats = train.infer(args, valid_queue, cnn_model, criterion)

    is_best = False
    if stats['valid_acc'] > best_valid_acc:
      # new best epoch, save weights
      utils.save(cnn_model, weights_file)
      best_epoch = epoch
      best_valid_acc = stats['valid_acc']

      best_stats = stats
      best_stats['lr'] = scheduler.get_lr()[0]
      best_stats['epoch'] = best_epoch
      best_train_loss = train_obj
      best_train_acc = train_acc
      is_best = True

    logger.info('epoch, %d, train_acc, %f, valid_acc, %f, train_loss, %f, valid_loss, %f, lr, %e, best_epoch, %d, best_valid_acc, %f, ' + utils.dict_to_log_string(stats),
                epoch, train_acc, stats['valid_acc'], train_obj, stats['valid_loss'], scheduler.get_lr()[0], best_epoch, best_valid_acc)
    checkpoint = {
          'epoch': epoch,
          'state_dict': cnn_model.state_dict(),
          'best_acc_top1': best_valid_acc,
          'optimizer' : optimizer.state_dict(),
    }
    checkpoint.update(stats)
    utils.save_checkpoint(stats, is_best, args.save)

  best_epoch_str = utils.dict_to_log_string(best_stats, key_prepend='best_')
  logger.info(best_epoch_str)
  logger.info('Training of Final Model Complete! Save dir: ' + str(args.save))
예제 #23
0
    genotype = eval("genotypes.%s" % args.arch)
    # get the number of output channels
    classes = dataset.class_dict[args.dataset]
    # create the neural network
    print('initializing module')
if args.arch == 'multi_channel':
    cnn_model = model_search.MultiChannelNetwork(always_apply_ops=True,
                                                 layers=4,
                                                 steps=2,
                                                 visualization=True,
                                                 genotype=None)
elif args.dataset == 'imagenet':
    cnn_model = NetworkImageNet(args.init_channels,
                                classes,
                                args.layers,
                                args.auxiliary,
                                genotype,
                                op_dict=op_dict,
                                C_mid=args.mid_channels)
    # workaround for graph generation limitations
    cnn_model.drop_path_prob = torch.zeros(1)
else:
    cnn_model = NetworkCIFAR(args.init_channels,
                             classes,
                             args.layers,
                             args.auxiliary,
                             genotype,
                             op_dict=op_dict,
                             C_mid=args.mid_channels)
    # workaround for graph generation limitations
    cnn_model.drop_path_prob = torch.zeros(1)
예제 #24
0
def main():

    print('---------Genotype---------')
    logging.info(genotype)
    print('--------------------------')
    model = Network(args.init_channels, CLASSES, args.layers, genotype)
    model = model.cuda()
    # Arcface
    margin = ArcMarginProduct(512, CLASSES)
    margin = margin.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    if MULTI_GPU:
        optimizer = torch.optim.SGD([{
            'params': model.parameters(),
            'weight_decay': args.weight_decay
        }, {
            'params': margin.parameters(),
            'weight_decay': args.weight_decay
        }],
                                    args.learning_rate,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.SGD([{
            'params': model.parameters(),
            'weight_decay': args.weight_decay
        }, {
            'params': margin.parameters(),
            'weight_decay': args.weight_decay
        }],
                                    args.learning_rate,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

    if MULTI_GPU:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.local_rank])
        margin = torch.nn.parallel.DistributedDataParallel(
            margin, device_ids=[args.local_rank])
        # margin = torch.nn.DataParallel(margin)

    if resume:
        print('cccccccccccccccccccccccccccccccc')
        checkpoint = torch.load('./margin.pt')
        margin.load_state_dict(checkpoint)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    best_acc_top5 = 0
    lr = args.learning_rate
    for epoch in range(args.epochs):
        if args.lr_scheduler == 'cosine':
            scheduler.step()
            current_lr = scheduler.get_lr()[0]
        elif args.lr_scheduler == 'linear':
            current_lr = adjust_lr(optimizer, epoch)
        else:
            print('Wrong lr type, exit')
            sys.exit(1)
        logging.info('Epoch: %d lr %e', epoch, current_lr)
        if epoch < 5 and args.batch_size > 256:
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr * (epoch + 1) / 5.0
            logging.info('Warming-up Epoch: %d, LR: %e', epoch,
                         lr * (epoch + 1) / 5.0)

        if MULTI_GPU:
            model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        else:
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        epoch_start = time.time()
        train_acc, train_obj = train(data_loaders['train_dataset'], model,
                                     margin, criterion, optimizer, epoch)
        logging.info('Train_acc: %f', train_acc)

        valid_acc_top1 = infer(data_loaders, dataset, model, margin, epoch)
        global best_acc_top1
        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        state = {
            'epoch': epoch + 1,
            'model': model.module.state_dict(),
            'margin': margin.module.state_dict(),
            'best_acc_top1': best_acc_top1,
            'optimizer': optimizer.state_dict(),
        }

        if is_best:
            filename = os.path.join('./', 'best_model.pth.tar')
            torch.save(state, filename)
            # torch.save(model.state_dict(), model_path)
            filename = os.path.join('./', 'checkpoint.pth.tar')
            torch.save(state, filename)
        else:
            filename = os.path.join('./', 'checkpoint.pth.tar')
            torch.save(state, filename)
예제 #25
0
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU device available')
        sys.exit(1)
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled=True
    torch.cuda.manual_seed(args.seed)
    logging.info("args = %s", args)
    logging.info("unparsed args = %s", unparsed)
    num_gpus = torch.cuda.device_count()
    
    genotype = eval("genotypes.%s" % args.arch)
    print('---------Genotype---------')
    logging.info(genotype)
    print('--------------------------')
    if args.dataset in utils.LARGE_DATASETS:
        model = NetworkLarge(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
    else:
        model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
    if num_gpus > 1:
        model = torch.nn.DataParallel(model)
    model = model.cuda()
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    optimizer = torch.optim.SGD(
        model.parameters(),
        args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.weight_decay
        )
    train_transform, valid_transform = utils.data_transforms(args.dataset,args.cutout,args.cutout_length)
    if args.dataset == "CIFAR100":
        train_data = dset.CIFAR100(root=args.tmp_data_dir, train=True, download=True, transform=train_transform)
        valid_data = dset.CIFAR100(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform)
    elif args.dataset == "CIFAR10":
        train_data = dset.CIFAR10(root=args.tmp_data_dir, train=True, download=True, transform=train_transform)
        valid_data = dset.CIFAR10(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform)
    elif args.dataset == 'mit67':
        dset_cls = dset.ImageFolder
        data_path = '%s/MIT67/train' % args.tmp_data_dir  
        val_path = '%s/MIT67/test' % args.tmp_data_dir 
        train_data = dset_cls(root=data_path, transform=train_transform)
        valid_data = dset_cls(root=val_path, transform=valid_transform)
    elif args.dataset == 'sport8':
        dset_cls = dset.ImageFolder
        data_path = '%s/Sport8/train' % args.tmp_data_dir 
        val_path = '%s/Sport8/test' % args.tmp_data_dir  
        train_data = dset_cls(root=data_path, transform=train_transform)
        valid_data = dset_cls(root=val_path, transform=valid_transform)
    elif args.dataset == "flowers102":
        dset_cls = dset.ImageFolder
        data_path = '%s/flowers102/train' % args.tmp_data_dir
        val_path = '%s/flowers102/test' % args.tmp_data_dir
        train_data = dset_cls(root=data_path, transform=train_transform)
        valid_data = dset_cls(root=val_path, transform=valid_transform)
    train_queue = torch.utils.data.DataLoader(
        train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)

    valid_queue = torch.utils.data.DataLoader(
        valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
    best_acc = 0.0
    for epoch in range(args.epochs):
        scheduler.step()
        logging.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0])
        if num_gpus > 1:
            model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        else:
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        start_time = time.time()
        train_acc, train_obj = train(train_queue, model, criterion, optimizer)
        logging.info('Train_acc: %f', train_acc)

        valid_acc, valid_obj = infer(valid_queue, model, criterion)
        if valid_acc > best_acc:
            best_acc = valid_acc
        logging.info('Valid_acc: %f', valid_acc)
        end_time = time.time()
        duration = end_time - start_time
        print('Epoch time: %ds.' % duration )
        utils.save(model, os.path.join(args.save, 'weights.pt'))
예제 #26
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)

  genotype = eval("genotypes.%s" % args.arch)
  if args.dataset in utils.LARGE_DATASETS:
    model = NetworkLarge(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
  else:
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
  model = model.cuda()

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  optimizer = torch.optim.SGD(
      model.parameters(),
      args.learning_rate,
      momentum=args.momentum,
      weight_decay=args.weight_decay
      )

  train_transform, valid_transform = utils.data_transforms(args.dataset, args.cutout, args.cutout_length)
  if args.dataset == "CIFAR100":
    train_data = dset.CIFAR100(root=args.datapath, train=True, download=True, transform=train_transform)
    valid_data = dset.CIFAR100(root=args.datapath, train=False, download=True, transform=valid_transform)
  elif args.dataset == "CIFAR10":
    train_data = dset.CIFAR10(root=args.datapath, train=True, download=True, transform=train_transform)
    valid_data = dset.CIFAR10(root=args.datapath, train=False, download=True, transform=valid_transform)
  elif args.dataset == 'MIT67':
    dset_cls = dset.ImageFolder
    data_path = '%s/MIT67/train' % args.datapath  # 'data/MIT67/train'
    val_path = '%s/MIT67/test' % args.datapath  # 'data/MIT67/val'
    train_data = dset_cls(root=data_path, transform=train_transform)
    valid_data = dset_cls(root=val_path, transform=valid_transform)
  elif args.dataset == 'Sport8':
    dset_cls = dset.ImageFolder
    data_path = '%s/Sport8/train' % args.datapath  # 'data/Sport8/train'
    val_path = '%s/Sport8/test' % args.datapath  # 'data/Sport8/val'
    train_data = dset_cls(root=data_path, transform=train_transform)
    valid_data = dset_cls(root=val_path, transform=valid_transform)
  elif args.dataset == "flowers102":
    dset_cls = dset.ImageFolder
    data_path = '%s/flowers102/train' % args.datapath
    val_path = '%s/flowers102/test' % args.datapath
    train_data = dset_cls(root=data_path, transform=train_transform)
    valid_data = dset_cls(root=val_path, transform=valid_transform)

  train_queue = torch.utils.data.DataLoader(
      train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)

  valid_queue = torch.utils.data.DataLoader(
      valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
  best_acc = 0.0
  for epoch in range(args.epochs):
    scheduler.step()
    logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
    model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

    train_acc, train_obj = train(train_queue, model, criterion, optimizer)
    logging.info('train_acc %f', train_acc)

    valid_acc, valid_obj = infer(valid_queue, model, criterion)
    if valid_acc > best_acc:
        best_acc = valid_acc
    logging.info('valid_acc %f, best_acc %f', valid_acc, best_acc)

    utils.save(model, os.path.join(args.save, 'weights.pt'))
예제 #27
0
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU device available')
        sys.exit(1)
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info("args = %s", args)
    logging.info("unparsed args = %s", unparsed)
    num_gpus = torch.cuda.device_count()
    gpu_logger = GpuLogThread(list(range(num_gpus)),
                              writer,
                              seconds=10 if args.test else 300)
    gpu_logger.start()

    genotype = genotypes.load_genotype(args.arch, skip_cons=args.arch_pref_sc)
    print('---------Genotype---------')
    logging.info(genotype)
    print('--------------------------')
    if args.dataset == "CIFAR100":
        CLASSES = 100
    elif args.dataset == "CIFAR10":
        CLASSES = 10
    elif args.dataset == 'MIT67':
        dset_cls = dset.ImageFolder
        CLASSES = 67
    elif args.dataset == 'Sport8':
        dset_cls = dset.ImageFolder
        CLASSES = 8
    elif args.dataset == "flowers102":
        dset_cls = dset.ImageFolder
        CLASSES = 102
    if args.dataset in utils.LARGE_DATASETS:
        model = NetworkLarge(args.init_channels, CLASSES, args.layers,
                             args.auxiliary, genotype)
    else:
        model = Network(args.init_channels, CLASSES, args.layers,
                        args.auxiliary, genotype)
    if num_gpus > 1:
        model = nn.DataParallel(model)
    model = model.cuda()
    logging.info("param count = %d", utils.count_parameters(model))
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    train_transform, valid_transform = utils.data_transforms(
        args.dataset, args.cutout, args.cutout_length)
    if args.dataset == "CIFAR100":
        train_data = dset.CIFAR100(root=args.tmp_data_dir,
                                   train=True,
                                   download=True,
                                   transform=train_transform)
        valid_data = dset.CIFAR100(root=args.tmp_data_dir,
                                   train=False,
                                   download=True,
                                   transform=valid_transform)
    elif args.dataset == "CIFAR10":
        train_data = dset.CIFAR10(root=args.tmp_data_dir,
                                  train=True,
                                  download=True,
                                  transform=train_transform)
        valid_data = dset.CIFAR10(root=args.tmp_data_dir,
                                  train=False,
                                  download=True,
                                  transform=valid_transform)
    elif args.dataset == 'MIT67':
        dset_cls = dset.ImageFolder
        data_path = '%s/MIT67/train' % args.tmp_data_dir  # 'data/MIT67/train'
        val_path = '%s/MIT67/test' % args.tmp_data_dir  # 'data/MIT67/val'
        train_data = dset_cls(root=data_path, transform=train_transform)
        valid_data = dset_cls(root=val_path, transform=valid_transform)
    elif args.dataset == 'Sport8':
        dset_cls = dset.ImageFolder
        data_path = '%s/Sport8/train' % args.tmp_data_dir  # 'data/Sport8/train'
        val_path = '%s/Sport8/test' % args.tmp_data_dir  # 'data/Sport8/val'
        train_data = dset_cls(root=data_path, transform=train_transform)
        valid_data = dset_cls(root=val_path, transform=valid_transform)
    elif args.dataset == "flowers102":
        dset_cls = dset.ImageFolder
        data_path = '%s/flowers102/train' % args.tmp_data_dir
        val_path = '%s/flowers102/test' % args.tmp_data_dir
        train_data = dset_cls(root=data_path, transform=train_transform)
        valid_data = dset_cls(root=val_path, transform=valid_transform)

    train_iterator = utils.DynamicBatchSizeLoader(
        torch.utils.data.DataLoader(train_data,
                                    batch_size=args.batch_multiples,
                                    shuffle=True,
                                    pin_memory=True,
                                    num_workers=args.workers),
        args.batch_size_min)
    test_iterator = utils.DynamicBatchSizeLoader(
        torch.utils.data.DataLoader(valid_data,
                                    batch_size=args.batch_multiples,
                                    shuffle=False,
                                    pin_memory=True,
                                    num_workers=args.workers),
        args.batch_size_min)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))
    best_acc = 0.0
    for epoch in range(args.epochs):
        lr = scheduler.get_lr()[0]
        drop_path_prob = args.drop_path_prob * epoch / args.epochs
        logging.info('Epoch: %d lr %e', epoch, lr)
        if num_gpus > 1:
            model.module.drop_path_prob = drop_path_prob
        else:
            model.drop_path_prob = drop_path_prob
        epoch_start_time = time.time()
        train_acc, train_obj = train(train_iterator, test_iterator, model,
                                     criterion, optimizer, gpu_logger)
        logging.info('Train_acc: %f', train_acc)

        test_acc, test_obj = infer(test_iterator, model, criterion)
        if test_acc > best_acc:
            best_acc = test_acc
        logging.info('Valid_acc: %f', test_acc)
        epoch_duration = time.time() - epoch_start_time
        utils.save(model, os.path.join(args.save, 'weights.pt'))

        # log info
        print('Epoch time: %ds.' % epoch_duration)
        writer.add_scalar('epoch/lr', lr, epoch)
        writer.add_scalar('epoch/drop_path_prob', drop_path_prob, epoch)
        writer.add_scalar('epoch/seconds', epoch_duration, epoch)
        writer.add_scalar('epoch/batch_size', train_iterator.batch_size, epoch)
        writer.add_scalar('train/accuracy', train_acc, epoch)
        writer.add_scalar('train/loss', train_obj, epoch)
        writer.add_scalar('test/accuracy', test_acc, epoch)
        writer.add_scalar('test/loss', test_obj, epoch)

        scheduler.step()
    gpu_logger.stop()
예제 #28
0
def main(args):
    place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id) \
        if args.use_data_parallel else fluid.CUDAPlace(0)

    with fluid.dygraph.guard(place):
        genotype = eval("genotypes.%s" % args.arch)
        model = Network(C=args.init_channels,
                        num_classes=args.class_num,
                        layers=args.layers,
                        auxiliary=args.auxiliary,
                        genotype=genotype)

        logger.info("param size = {:.6f}MB".format(
            count_parameters_in_MB(model.parameters())))

        device_num = fluid.dygraph.parallel.Env().nranks
        step_per_epoch = int(args.trainset_num /
                             (args.batch_size * device_num))
        learning_rate = fluid.dygraph.ExponentialDecay(args.learning_rate,
                                                       step_per_epoch,
                                                       args.decay_rate,
                                                       staircase=True)

        clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=args.grad_clip)
        optimizer = fluid.optimizer.MomentumOptimizer(
            learning_rate,
            momentum=args.momentum,
            regularization=fluid.regularizer.L2Decay(args.weight_decay),
            parameter_list=model.parameters(),
            grad_clip=clip)

        if args.use_data_parallel:
            strategy = fluid.dygraph.parallel.prepare_context()
            model = fluid.dygraph.parallel.DataParallel(model, strategy)

        train_loader = fluid.io.DataLoader.from_generator(
            capacity=64,
            use_double_buffer=True,
            iterable=True,
            return_list=True)
        valid_loader = fluid.io.DataLoader.from_generator(
            capacity=64,
            use_double_buffer=True,
            iterable=True,
            return_list=True)

        train_reader = fluid.io.batch(reader.imagenet_reader(
            args.data_dir, 'train'),
                                      batch_size=args.batch_size,
                                      drop_last=True)
        valid_reader = fluid.io.batch(reader.imagenet_reader(
            args.data_dir, 'val'),
                                      batch_size=args.batch_size)
        if args.use_data_parallel:
            train_reader = fluid.contrib.reader.distributed_batch_reader(
                train_reader)

        train_loader.set_sample_list_generator(train_reader, places=place)
        valid_loader.set_sample_list_generator(valid_reader, places=place)

        save_parameters = (not args.use_data_parallel) or (
            args.use_data_parallel
            and fluid.dygraph.parallel.Env().local_rank == 0)
        best_top1 = 0
        for epoch in range(args.epochs):
            logger.info('Epoch {}, lr {:.6f}'.format(
                epoch, optimizer.current_step_lr()))
            train_top1, train_top5 = train(model, train_loader, optimizer,
                                           epoch, args)
            logger.info(
                "Epoch {}, train_top1 {:.6f}, train_top5 {:.6f}".format(
                    epoch, train_top1, train_top5))
            valid_top1, valid_top5 = valid(model, valid_loader, epoch, args)
            if valid_top1 > best_top1:
                best_top1 = valid_top1
                if save_parameters:
                    fluid.save_dygraph(model.state_dict(),
                                       args.model_save_dir + "/best_model")
            logger.info(
                "Epoch {}, valid_top1 {:.6f}, valid_top5 {:.6f}, best_valid_top1 {:6f}"
                .format(epoch, valid_top1, valid_top5, best_top1))
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    if args.parallel:
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=4)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=4)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                args.decay_period,
                                                gamma=args.gamma)

    best_acc_top1 = 0
    for epoch in range(args.epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        train_acc, train_obj = train(train_queue, model, criterion_smooth,
                                     optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)

        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU device available')
        sys.exit(1)

    num_gpus = torch.cuda.device_count()
    args.gpu = args.local_rank % num_gpus
    torch.cuda.set_device(args.gpu)

    np.random.seed(args.seed)
    cudnn.benchmark = True
    cudnn.deterministic = True

    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info("args = %s", args)
    logging.info("unparsed_args = %s", unparsed)

    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    args.world_size = torch.distributed.get_world_size()
    args.batch_size = args.batch_size // args.world_size

    genotype = eval("genotypes.%s" % args.arch)
    logging.info('---------Genotype---------')
    logging.info(genotype)
    logging.info('--------------------------')
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    model = model.cuda(args.gpu)
    model = apex.parallel.DistributedDataParallel(model, delay_allreduce=True)

    model_profile = Network(args.init_channels, CLASSES, args.layers,
                            args.auxiliary, genotype)
    model_profile = model_profile.cuda(args.gpu)
    model_input_size_imagenet = (1, 3, 224, 224)
    model_profile.drop_path_prob = 0
    flops, _ = profile(model_profile, model_input_size_imagenet)
    logging.info("flops = %fMB, param size = %fMB", flops,
                 count_parameters_in_MB(model))

    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # Prepare data
    total_iters = per_epoch_iters * args.epochs
    train_loader = get_train_dataloader(args.train_dir, args.batch_size,
                                        args.local_rank, total_iters)
    train_dataprovider = DataIterator(train_loader)
    val_loader = get_val_dataloader(args.test_dir)
    val_dataprovider = DataIterator(val_loader)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    start_epoch = 0
    best_acc_top1 = 0
    best_acc_top5 = 0
    checkpoint_tar = os.path.join(args.save, 'checkpoint.pth.tar')
    if os.path.exists(checkpoint_tar):
        logging.info('loading checkpoint {} ..........'.format(checkpoint_tar))
        checkpoint = torch.load(
            checkpoint_tar,
            map_location={'cuda:0': 'cuda:{}'.format(args.local_rank)})
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['state_dict'])
        logging.info("loaded checkpoint {} epoch = {}".format(
            checkpoint_tar, checkpoint['epoch']))

    # evaluation mode
    if args.eval:
        if args.eval_resume is not None:
            checkpoint = torch.load(args.eval_resume)
            model.module.drop_path_prob = 0
            model.load_state_dict(checkpoint['state_dict'])
            valid_acc_top1, valid_acc_top5 = infer(val_dataprovider,
                                                   model.module, val_iters)
            print('valid_acc_top1: {}'.format(valid_acc_top1))
        exit(0)

    for epoch in range(start_epoch, args.epochs):
        if args.lr_scheduler == 'cosine':
            scheduler.step()
            current_lr = scheduler.get_lr()[0]
        elif args.lr_scheduler == 'linear':
            current_lr = adjust_lr(optimizer, epoch)
        else:
            logging.info('Wrong lr type, exit')
            sys.exit(1)

        logging.info('Epoch: %d lr %e', epoch, current_lr)
        if epoch < 5 and args.batch_size > 256:
            for param_group in optimizer.param_groups:
                param_group['lr'] = current_lr * (epoch + 1) / 5.0
            logging.info('Warming-up Epoch: %d, LR: %e', epoch,
                         current_lr * (epoch + 1) / 5.0)
        model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        epoch_start = time.time()
        train_acc, train_obj = train(train_dataprovider, model,
                                     criterion_smooth, optimizer,
                                     per_epoch_iters)

        writer.add_scalar('Train/Loss', train_obj, epoch)
        writer.add_scalar('Train/LR', current_lr, epoch)

        if args.local_rank == 0 and (epoch % 5 == 0
                                     or args.epochs - epoch < 10):
            valid_acc_top1, valid_acc_top5 = infer(val_dataprovider,
                                                   model.module, val_iters)
            is_best = False
            if valid_acc_top5 > best_acc_top5:
                best_acc_top5 = valid_acc_top5
            if valid_acc_top1 > best_acc_top1:
                best_acc_top1 = valid_acc_top1
                is_best = True

            logging.info('Valid_acc_top1: %f', valid_acc_top1)
            logging.info('Valid_acc_top5: %f', valid_acc_top5)
            logging.info('best_acc_top1: %f', best_acc_top1)
            epoch_duration = time.time() - epoch_start
            logging.info('Epoch time: %ds.', epoch_duration)

            save_checkpoint_(
                {
                    'epoch': epoch,
                    'state_dict': model.state_dict(),
                    'best_acc_top1': best_acc_top1,
                    'optimizer': optimizer.state_dict(),
                }, args.save)