Esempio n. 1
0
    def write_pytorch_weight(self):
        model = NetworkImageNet(216, 1001, 12, False, PNASNet)
        model.drop_path_prob = 0
        model.eval()

        self.used_keys = []
        self.convert_conv(model.conv0, 'conv0/weights')
        self.convert_bn(model.conv0_bn, 'conv0_bn/gamma', 'conv0_bn/beta',
                        'conv0_bn/moving_mean', 'conv0_bn/moving_variance')
        self.convert_cell(model.stem1, 'cell_stem_0/')
        self.convert_cell(model.stem2, 'cell_stem_1/')

        for i in range(12):
            self.convert_cell(model.cells[i], 'cell_{}/'.format(i))

        self.convert_fc(model.classifier, 'final_layer/FC/weights',
                        'final_layer/FC/biases')

        print('Conversion complete!')
        print('Check 1: whether all TF variables are used...')
        assert len(self.weight_dict) == len(self.used_keys)
        print('Pass!')

        model = model.cuda()
        image = self.tf_image_proc.transpose((2, 0, 1))
        image = Variable(self.Tensor(image)).cuda()
        logits, _ = model(image.unsqueeze(0))
        self.pytorch_logits = logits.data.cpu().numpy()

        print('Check 2: whether logits have small diff...')
        assert np.max(np.abs(self.tf_logits - self.pytorch_logits)) < 1e-5
        print('Pass!')

        model_path = 'data/PNASNet-5_Large.pth'
        torch.save(model.state_dict(), model_path)
        print('PyTorch model saved to {}'.format(model_path))
Esempio n. 2
0
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU device available')
        sys.exit(1)
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    torch.cuda.set_device(initGpu)

    logging.info("args = %s", args)
    logging.info("unparsed_args = %s", unparsed)
    num_gpus = torch.cuda.device_count()
    genotype = eval("genotypes.%s" % args.arch)
    print('---------Genotype---------')
    logging.info(genotype)
    print('--------------------------')
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    #start_epochs = 0

    if num_gpus > 1:
        model = nn.DataParallel(model, device_ids)
        model = model.cuda()
    else:
        model = model.cuda()
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
    if (not args.resume_path == ''):
        state = utils.load_checkpoint(args.resume_path)
        #start_epochs = state[epoch]
        model.load_state_dict(state['state_dict'])

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    data_dir = args.tmp_data_dir
    traindir = os.path.join(data_dir, 'train')
    validdir = os.path.join(data_dir, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=args.workers)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=args.workers)

    #    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))
    best_acc_top1 = 0
    best_acc_top5 = 0
    for epoch in range(args.epochs):
        if args.lr_scheduler == 'cosine':
            scheduler.step()
            current_lr = scheduler.get_lr()[0]
        elif args.lr_scheduler == 'linear':
            current_lr = adjust_lr(optimizer, epoch)
        else:
            print('Wrong lr type, exit')
            sys.exit(1)
        logging.info('Epoch: %d lr %e', epoch, current_lr)
        if epoch < 5 and args.batch_size > 256:
            for param_group in optimizer.param_groups:
                param_group['lr'] = current_lr * (epoch + 1) / 5.0
            logging.info('Warming-up Epoch: %d, LR: %e', epoch,
                         current_lr * (epoch + 1) / 5.0)
        if num_gpus > 1:
            model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        else:
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        epoch_start = time.time()
        train_acc, train_obj = train(train_queue, model, criterion_smooth,
                                     optimizer)
        logging.info('Train_acc: %f', train_acc)

        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        logging.info('Valid_acc_top1: %f', valid_acc_top1)
        logging.info('Valid_acc_top5: %f', valid_acc_top5)
        epoch_duration = time.time() - epoch_start
        logging.info('Epoch time: %ds.', epoch_duration)
        is_best = False
        if valid_acc_top5 > best_acc_top5:
            best_acc_top5 = valid_acc_top5
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)
Esempio n. 3
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    if args.parallel:  # multi gpu
        num_gpus = torch.cuda.device_count()
        logging.info('num of gpu devices = %d' % num_gpus)
    else:  # single gpu
        torch.cuda.set_device(args.gpu)
        logging.info('gpu device = %d' % args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    if args.parallel:
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=4)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=4)

    if args.lr_scheduler == 'step':
        # DARTS code
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    args.decay_period,
                                                    gamma=args.gamma)
    elif args.lr_scheduler == 'cosine' or args.lr_scheduler == 'linear':
        # PCDARTS code
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, float(args.epochs))
    else:
        raise ValueError("Wrong learning rate scheduler")

    # ---- resume ---- #
    start_epoch = 0
    best_acc_top1 = 0.0
    best_acc_top5 = 0.0
    best_acc_epoch = 0
    if args.resume:
        # in multi-gpu???
        if os.path.isfile(args.resume):
            logging.info("=> loading checkpoint {}".format(args.resume))
            device = torch.device("cuda")
            checkpoint = torch.load(args.resume, map_location=device)
            start_epoch = checkpoint['epoch']
            best_acc_top1 = checkpoint['best_acc_top1']
            best_acc_top5 = checkpoint['best_acc_top5']
            best_acc_epoch = checkpoint['best_acc_epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            logging.info(
                "=> loaded checkpoint {} (trained until epoch {})".format(
                    args.resume, start_epoch - 1))
        else:
            raise ValueError("Wrong args.resume")
    else:
        logging.info("=> training from scratch")

    for epoch in range(start_epoch, args.epochs):
        scheduler.step()
        if args.lr_scheduler == 'cosine' or args.lr_scheduler == 'step':
            scheduler.step()
            current_lr = scheduler.get_lr()[0]
        elif args.lr_scheduler == 'linear':
            current_lr = adjust_lr(optimizer, epoch)

        if epoch < 5 and args.batch_size > 256:
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.learning_rate * (epoch + 1) / 5.0
                logging.info('Warming-up epoch: %d, LR: %e', epoch,
                             lr * (epoch + 1) / 5.0)
        else:
            logging.info('epoch %d lr %e', epoch, current_lr)

        if args.parallel:
            model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        else:
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        epoch_start = time.time()
        train_acc, train_obj = train(train_queue, model, criterion_smooth,
                                     optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        is_best = (valid_acc_top1 > best_acc_top1)
        if is_best:
            best_acc_top1 = valid_acc_top1
            best_acc_top5 = valid_acc_top5
            best_acc_epoch = epoch + 1
            utils.save(model, os.path.join(args.save, 'best_weights.pt'))
        logging.info('valid_acc %f %f, best_acc %f %f (at epoch %d)',
                     valid_acc_top1, valid_acc_top5, best_acc_top1,
                     best_acc_top5, best_acc_epoch)
        logging.info('epoch time %d sec.', time.time() - epoch_start)

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'best_acc_top1': best_acc_top1,
                'best_acc_top5': best_acc_top5,
                'best_acc_epoch': best_acc_epoch,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)

    utils.save(model, os.path.join(args.save, 'weights.pt'))
Esempio n. 4
0
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(optimizer,
                                     named_parameters=model.named_parameters(),
                                     compression=compression)

# Restore from a previous checkpoint, if initial_epoch is specified.
# Horovod: restore on the first worker which will broadcast weights to other workers.
if resume_from_epoch > 0 and hvd.rank() == 0:
    filepath = args.checkpoint_format.format(exp=args.save,
                                             epoch=resume_from_epoch)
    checkpoint = torch.load(filepath)
    model.load_state_dict(checkpoint['model'])
    optimizer.load_state_dict(checkpoint['optimizer'])

# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)


def train(epoch, criterion):
    model.train()
    train_sampler.set_epoch(epoch)
    train_loss = Metric('train_loss')
    train_accuracy = Metric('train_accuracy')
    if torch.cuda.current_device() == 0:
        for param_group in optimizer.param_groups:
            tmp_lr = param_group['lr']
        logging.info('epoch %d lr %e', epoch, tmp_lr / hvd.size())

    with tqdm(total=len(train_loader),
              desc='Train Epoch     #{}'.format(epoch + 1),
Esempio n. 5
0
def main():
  if not torch.cuda.is_available():
    logger.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logger.info('gpu device = %d' % args.gpu)
  logger.info("args = %s", args)

  # # load the correct ops dictionary
  op_dict_to_load = "operations.%s" % args.ops
  logger.info('loading op dict: ' + str(op_dict_to_load))
  op_dict = eval(op_dict_to_load)

  # load the correct primitives list
  primitives_to_load = "genotypes.%s" % args.primitives
  logger.info('loading primitives:' + primitives_to_load)
  primitives = eval(primitives_to_load)
  logger.info('primitives: ' + str(primitives))

  genotype = eval("genotypes.%s" % args.arch)
  cnn_model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels)
  if args.parallel:
    cnn_model = nn.DataParallel(cnn_model).cuda()
  else:
    cnn_model = cnn_model.cuda()

  logger.info("param size = %fMB", utils.count_parameters_in_MB(cnn_model))
  if args.flops:
    cnn_model.drop_path_prob = 0.0
    logger.info("flops = " + utils.count_model_flops(cnn_model, data_shape=[1, 3, 224, 224]))
    exit(1)

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
  criterion_smooth = criterion_smooth.cuda()

  optimizer = torch.optim.SGD(
    cnn_model.parameters(),
    args.learning_rate,
    momentum=args.momentum,
    weight_decay=args.weight_decay
    )

  traindir = os.path.join(args.data, 'train')
  validdir = os.path.join(args.data, 'val')
  normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  train_data = dset.ImageFolder(
    traindir,
    transforms.Compose([
      transforms.RandomResizedCrop(224),
      transforms.RandomHorizontalFlip(),
      autoaugment.ImageNetPolicy(),
      # transforms.ColorJitter(
      #   brightness=0.4,
      #   contrast=0.4,
      #   saturation=0.4,
      #   hue=0.2),
      transforms.ToTensor(),
      normalize,
    ]))
  valid_data = dset.ImageFolder(
    validdir,
    transforms.Compose([
      transforms.Resize(256),
      transforms.CenterCrop(224),
      transforms.ToTensor(),
      normalize,
    ]))

  train_queue = torch.utils.data.DataLoader(
    train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=8)

  valid_queue = torch.utils.data.DataLoader(
    valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8)

  scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)

  prog_epoch = tqdm(range(args.epochs), dynamic_ncols=True)
  best_valid_acc = 0.0
  best_epoch = 0
  best_stats = {}
  best_acc_top1 = 0
  weights_file = os.path.join(args.save, 'weights.pt')
  for epoch in prog_epoch:
    scheduler.step()
    cnn_model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

    train_acc, train_obj = train.train(args, train_queue, cnn_model, criterion, optimizer)
    stats = train.infer(args, valid_queue, cnn_model, criterion)

    is_best = False
    if stats['valid_acc'] > best_valid_acc:
      # new best epoch, save weights
      utils.save(cnn_model, weights_file)
      best_epoch = epoch
      best_valid_acc = stats['valid_acc']

      best_stats = stats
      best_stats['lr'] = scheduler.get_lr()[0]
      best_stats['epoch'] = best_epoch
      best_train_loss = train_obj
      best_train_acc = train_acc
      is_best = True

    logger.info('epoch, %d, train_acc, %f, valid_acc, %f, train_loss, %f, valid_loss, %f, lr, %e, best_epoch, %d, best_valid_acc, %f, ' + utils.dict_to_log_string(stats),
                epoch, train_acc, stats['valid_acc'], train_obj, stats['valid_loss'], scheduler.get_lr()[0], best_epoch, best_valid_acc)
    checkpoint = {
          'epoch': epoch,
          'state_dict': cnn_model.state_dict(),
          'best_acc_top1': best_valid_acc,
          'optimizer' : optimizer.state_dict(),
    }
    checkpoint.update(stats)
    utils.save_checkpoint(stats, is_best, args.save)

  best_epoch_str = utils.dict_to_log_string(best_stats, key_prepend='best_')
  logger.info(best_epoch_str)
  logger.info('Training of Final Model Complete! Save dir: ' + str(args.save))
Esempio n. 6
0
def main(args):
    place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id) \
        if args.use_data_parallel else fluid.CUDAPlace(0)

    with fluid.dygraph.guard(place):
        genotype = eval("genotypes.%s" % args.arch)
        model = Network(C=args.init_channels,
                        num_classes=args.class_num,
                        layers=args.layers,
                        auxiliary=args.auxiliary,
                        genotype=genotype)

        logger.info("param size = {:.6f}MB".format(
            count_parameters_in_MB(model.parameters())))

        device_num = fluid.dygraph.parallel.Env().nranks
        step_per_epoch = int(args.trainset_num /
                             (args.batch_size * device_num))
        learning_rate = fluid.dygraph.ExponentialDecay(args.learning_rate,
                                                       step_per_epoch,
                                                       args.decay_rate,
                                                       staircase=True)

        clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=args.grad_clip)
        optimizer = fluid.optimizer.MomentumOptimizer(
            learning_rate,
            momentum=args.momentum,
            regularization=fluid.regularizer.L2Decay(args.weight_decay),
            parameter_list=model.parameters(),
            grad_clip=clip)

        if args.use_data_parallel:
            strategy = fluid.dygraph.parallel.prepare_context()
            model = fluid.dygraph.parallel.DataParallel(model, strategy)

        train_loader = fluid.io.DataLoader.from_generator(
            capacity=64,
            use_double_buffer=True,
            iterable=True,
            return_list=True)
        valid_loader = fluid.io.DataLoader.from_generator(
            capacity=64,
            use_double_buffer=True,
            iterable=True,
            return_list=True)

        train_reader = fluid.io.batch(reader.imagenet_reader(
            args.data_dir, 'train'),
                                      batch_size=args.batch_size,
                                      drop_last=True)
        valid_reader = fluid.io.batch(reader.imagenet_reader(
            args.data_dir, 'val'),
                                      batch_size=args.batch_size)
        if args.use_data_parallel:
            train_reader = fluid.contrib.reader.distributed_batch_reader(
                train_reader)

        train_loader.set_sample_list_generator(train_reader, places=place)
        valid_loader.set_sample_list_generator(valid_reader, places=place)

        save_parameters = (not args.use_data_parallel) or (
            args.use_data_parallel
            and fluid.dygraph.parallel.Env().local_rank == 0)
        best_top1 = 0
        for epoch in range(args.epochs):
            logger.info('Epoch {}, lr {:.6f}'.format(
                epoch, optimizer.current_step_lr()))
            train_top1, train_top5 = train(model, train_loader, optimizer,
                                           epoch, args)
            logger.info(
                "Epoch {}, train_top1 {:.6f}, train_top5 {:.6f}".format(
                    epoch, train_top1, train_top5))
            valid_top1, valid_top5 = valid(model, valid_loader, epoch, args)
            if valid_top1 > best_top1:
                best_top1 = valid_top1
                if save_parameters:
                    fluid.save_dygraph(model.state_dict(),
                                       args.model_save_dir + "/best_model")
            logger.info(
                "Epoch {}, valid_top1 {:.6f}, valid_top5 {:.6f}, best_valid_top1 {:6f}"
                .format(epoch, valid_top1, valid_top5, best_top1))
Esempio n. 7
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    model.drop_path_prob = args.drop_path_prob
    if args.parallel:
        model = nn.DataParallel(model).cuda()

    else:
        model = model.cuda()
    #checkpoint = torch.load('./model_best.pth.tar')
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
    loss_weight = np.array([3.04, 1, 3.45, 15.09, 4.51, 43.26, 32.45, 23.18])
    loss_weight = torch.from_numpy(loss_weight).float()
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    #model.load_state_dict(checkpoint['state_dict'])
    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'test')
    normalize = transforms.Normalize(mean=[0.763, 0.545, 0.570],
                                     std=[0.141, 0.152, 0.169])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    num_classes = len(train_data.classes)
    class_weight = np.array([
        1. / 213, 1. / 649, 1. / 188, 1. / 43, 1. / 144, 1. / 15, 1. / 20,
        1. / 28
    ])
    class_weight = torch.from_numpy(class_weight)

    train_targets = [sample[1] for sample in train_data.imgs]
    valid_targets = [sample[1] for sample in valid_data.imgs]

    train_samples_weight = [
        class_weight[class_id] for class_id in train_targets
    ]
    train_sampler = torch.utils.data.sampler.WeightedRandomSampler(
        train_samples_weight, len(train_data))

    valid_samples_weight = [
        class_weight[class_id] for class_id in valid_targets
    ]
    valid_sampler = torch.utils.data.sampler.WeightedRandomSampler(
        valid_samples_weight, len(valid_data))

    train_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        shuffle=True,
        pin_memory=True,
        num_workers=4)  #, sampler = train_sampler)

    valid_queue = torch.utils.data.DataLoader(
        valid_data,
        batch_size=args.batch_size,
        shuffle=True,
        pin_memory=True,
        num_workers=4)  #, sampler = valid_sampler)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                args.decay_period,
                                                gamma=args.gamma)

    best_acc_top1 = 0
    for epoch in range(args.epochs):

        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * (epoch / args.epochs)

        train_acc, train_obj = train(train_queue, model, criterion, optimizer)
        logging.info('train_acc %f', train_acc)
        scheduler.step()
        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)
        utils.save(model, os.path.join(args.save, 'weights.pt'))

        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)
Esempio n. 8
0
def main():
    args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
    utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))

    log_format = '%(asctime)s %(message)s'
    logging.basicConfig(stream=sys.stdout,
                        level=logging.INFO,
                        format=log_format,
                        datefmt='%m/%d %I:%M:%S %p')
    fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
    fh.setFormatter(logging.Formatter(log_format))
    logging.getLogger().addHandler(fh)

    CLASSES = 1000

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    if args.parallel:
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=4)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=4)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                args.decay_period,
                                                gamma=args.gamma)

    best_acc_top1 = 0
    for epoch in range(args.epochs):
        lr = scheduler.get_last_lr()[0]
        logging.info('epoch %d lr %e', epoch, lr)

        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        # training
        train_acc, train_obj = train(train_queue, model, criterion_smooth,
                                     optimizer)
        logging.info('train_acc %f', train_acc)

        # validation
        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)

        # スケジューラの更新
        scheduler.step()

        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)
Esempio n. 9
0
def main():
    # Scale learning rate based on global batch size.
    if not args.no_scale_lr:
        scale = float(args.batch_size * args.world_size) / 128.0
        args.learning_rate = scale * args.learning_rate

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info('args = %s', args)

    # Get data loaders.
    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.4,
                               contrast=0.4,
                               saturation=0.4,
                               hue=0.2),
        transforms.ToTensor(),
        normalize,
    ])
    val_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ])

    if 'lmdb' in args.data:
        train_data = imagenet_lmdb_dataset(traindir, transform=train_transform)
        valid_data = imagenet_lmdb_dataset(validdir, transform=val_transform)
    else:
        train_data = dset.ImageFolder(traindir, transform=train_transform)
        valid_data = dset.ImageFolder(validdir, transform=val_transform)

    train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=8,
                                              sampler=train_sampler)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=8)

    # Set up the network.
    if os.path.isfile(args.genotype):
        logging.info('Loading genotype from: %s' % args.genotype)
        genotype = torch.load(args.genotype, map_location='cpu')
    else:
        genotype = eval('genotypes.%s' % args.genotype)
    if not isinstance(genotype, list):
        genotype = [genotype]

    # If num channels not provided, find the max under 600M MAdds.
    if args.init_channels < 0:
        if args.local_rank == 0:
            flops, num_params, init_channels = find_max_channels(
                genotype, args.layers, args.cell_type, args.max_M_flops * 1e6)
            logging.info('Num flops = %.2fM', flops / 1e6)
            logging.info('Num params = %.2fM', num_params / 1e6)
        else:
            init_channels = 0
        # All reduce with world_size 1 is sum.
        init_channels = torch.Tensor([init_channels]).cuda()
        init_channels = utils.reduce_tensor(init_channels, 1)
        args.init_channels = int(init_channels.item())
    logging.info('Num channels = %d', args.init_channels)

    # Create model and loss.
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype, args.cell_type)
    model = model.cuda()
    model = DDP(model, delay_allreduce=True)

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()
    logging.info('param size = %fM', utils.count_parameters_in_M(model))

    # Set up network weights optimizer.
    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    if args.lr_scheduler == 'exp':
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    args.decay_period,
                                                    gamma=args.gamma)
    elif args.lr_scheduler == 'cosine':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, float(args.epochs), eta_min=args.min_learning_rate)

    # Train.
    global_step = 0
    best_acc_top1 = 0
    for epoch in range(args.epochs):
        # Shuffle the sampler, update lrs.
        train_queue.sampler.set_epoch(epoch + args.seed)
        # Change lr.
        if epoch >= args.warmup_epochs:
            scheduler.step()
        model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        # Training.
        train_acc, train_obj, global_step = train(train_queue, model,
                                                  criterion_smooth, optimizer,
                                                  epoch, args.learning_rate,
                                                  args.warmup_epochs,
                                                  global_step)
        logging.info('train_acc %f', train_acc)
        writer.add_scalar('train/acc', train_acc, global_step)

        # Validation.
        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)
        writer.add_scalar('val/acc_top1', valid_acc_top1, global_step)
        writer.add_scalar('val/acc_top5', valid_acc_top5, global_step)
        writer.add_scalar('val/loss', valid_obj, global_step)

        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        if args.local_rank == 0:
            utils.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_acc_top1': best_acc_top1,
                    'optimizer': optimizer.state_dict(),
                }, is_best, args.save)
Esempio n. 10
0
def main():
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, NUM_CLASSES, args.layers,
                    config.optim.auxiliary, genotype)

    start_epoch = 0
    model.eval()
    model.drop_path_prob = args.drop_path_prob * 0
    # compute the params as well as the multi-adds
    params = count_parameters_in_MB(model)
    logging.info("Params = %.2fMB" % params)
    mult_adds = comp_multadds(model, input_size=config.data.input_size)
    logging.info("Mult-Adds = %.2fMB" % mult_adds)

    model.train()
    if len(args.gpus) > 1:
        model = nn.DataParallel(model)
    model = model.cuda()
    if config.optim.label_smooth:
        criterion = CrossEntropyLabelSmooth(NUM_CLASSES,
                                            config.optim.smooth_alpha)
    else:
        criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                config.optim.init_lr,
                                momentum=config.optim.momentum,
                                weight_decay=config.optim.weight_decay)

    imagenet = imagenet_data.ImageNet12(
        trainFolder=os.path.join(args.data_path, 'train'),
        testFolder=os.path.join(args.data_path, 'val'),
        num_workers=config.data.num_workers,
        type_of_data_augmentation=config.data.type_of_data_aug,
        data_config=config.data,
        size_images=config.data.input_size[1],
        scaled_size=config.data.scaled_size[1])
    train_queue, valid_queue = imagenet.getTrainTestLoader(
        config.data.batch_size)

    if config.optim.lr_schedule == 'cosine':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, float(config.train_params.epochs))

    trainer = Trainer(train_queue, valid_queue, criterion, config,
                      args.report_freq)
    best_epoch = [0, 0, 0]  # [epoch, acc_top1, acc_top5]
    lr = config.optim.init_lr
    for epoch in range(start_epoch, config.train_params.epochs):
        if config.optim.lr_schedule == 'cosine':
            scheduler.step()
            current_lr = scheduler.get_lr()[0]
        elif config.optim.lr_schedule == 'linear':  # with warmup initial
            optimizer, current_lr = adjust_lr(optimizer,
                                              config.train_params.epochs, lr,
                                              epoch)
        else:
            print('Wrong lr type, exit')
            sys.exit(1)
        if epoch < 5:  # Warmup epochs for 5
            current_lr = lr * (epoch + 1) / 5.0
            for param_group in optimizer.param_groups:
                param_group['lr'] = current_lr
            logging.info('Warming-up Epoch: %d, LR: %e', epoch,
                         lr * (epoch + 1) / 5.0)

        logging.info('Epoch: %d lr %e', epoch, current_lr)
        if len(args.gpus) > 1:
            model.module.drop_path_prob = args.drop_path_prob * epoch / config.train_params.epochs
        else:
            model.drop_path_prob = args.drop_path_prob * epoch / config.train_params.epochs
        train_acc_top1, train_acc_top5, train_obj, batch_time, data_time = trainer.train(
            model, optimizer, epoch)
        with torch.no_grad():
            val_acc_top1, val_acc_top5, batch_time, data_time = trainer.infer(
                model, epoch)
        if val_acc_top1 > best_epoch[1]:
            best_epoch = [epoch, val_acc_top1, val_acc_top5]
            if epoch >= 0:  # 120
                utils.save_checkpoint(
                    {
                        'epoch': epoch + 1,
                        'state_dict': model.module.state_dict(),
                        'best_acc_top1': val_acc_top1,
                        'optimizer': optimizer.state_dict(),
                    },
                    save_path=args.save,
                    epoch=epoch,
                    is_best=True)
                if len(args.gpus) > 1:
                    utils.save(
                        model.module.state_dict(),
                        os.path.join(
                            args.save,
                            'weights_{}_{}.pt'.format(epoch, val_acc_top1)))
                else:
                    utils.save(
                        model.state_dict(),
                        os.path.join(
                            args.save,
                            'weights_{}_{}.pt'.format(epoch, val_acc_top1)))

        logging.info('BEST EPOCH %d  val_top1 %.2f val_top5 %.2f',
                     best_epoch[0], best_epoch[1], best_epoch[2])
        logging.info(
            'epoch: {} \t train_acc_top1: {:.4f} \t train_loss: {:.4f} \t val_acc_top1: {:.4f}'
            .format(epoch, train_acc_top1, train_obj, val_acc_top1))

    logging.info("Params = %.2fMB" % params)
    logging.info("Mult-Adds = %.2fMB" % mult_adds)
Esempio n. 11
0
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU device available')
        sys.exit(1)
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled=True
    torch.cuda.manual_seed(args.seed)
    logging.info("args = %s", args)
    logging.info("unparsed_args = %s", unparsed)
    num_gpus = torch.cuda.device_count()   

    if not os.path.exists(args.base_path):
      os.makedirs(args.base_path)
    ckpt_dir = os.path.join(args.base_path, "ImageNet")
    if not os.path.exists(ckpt_dir):
      os.makedirs(ckpt_dir)

    # ckpt_dir_1 = os.path.join(args.base_path, "ImageNet-1")
    # if not os.path.exists(ckpt_dir):
    #   os.makedirs(ckpt_dir_1)


    # print(arhs.arch)
    if args.arch is not None:
      genotype = eval("genotypes.%s" % args.arch)
    elif args.base_path is not None and args.genotype_name is not None:
      genotype_path = os.path.join(args.base_path, 'results_of_7q/genotype')
      genotype = get_genotype(genotype_path, args.genotype_name)
    else:
      raise(ValueError("the parser input arch, genotype_path, genotype_name should not be all None"))
#    print(genotype)

#    genotype = eval("genotypes.%s" % args.arch)
    print('---------Genotype---------')
    logging.info(genotype)
    print('--------------------------') 
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
    if num_gpus > 1:
        model = nn.DataParallel(model)
        model = model.cuda()
    else:
        model = model.cuda()
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(
        model.parameters(),
        args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.weight_decay
        )
#    data_dir = os.path.join(args.tmp_data_dir, 'imagenet')
#    traindir = os.path.join(data_dir, 'train')
#    validdir = os.path.join(data_dir, 'val')
    traindir = os.path.join(args.data, 'ILSVRC2012_img_train')
    validdir = os.path.join(args.data, 'ILSVRC2012_img_val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(
                brightness=0.4,
                contrast=0.4,
                saturation=0.4,
                hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(
        train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)

    valid_queue = torch.utils.data.DataLoader(
        valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers)

    best_acc_top1 = 0
    best_acc_top5 = 0

    ckpt_file = None
    start_epoch = 0
    if not args.from_scratch:
      # check wheter have pre-trained models
      if args.load_file is not None and os.path.exists(args.load_file):
          ckpt_file = args.load_file
      else:
          # deal with negative names
          files = os.listdir(ckpt_dir)
          for f in files:
            tmp = f.split('.')
            if len(tmp) > 2: continue
            tmp = int(tmp[0].split('_')[1])
            if tmp > start_epoch: 
              start_epoch = tmp
              ckpt_file = os.path.join(ckpt_dir, f)
    if ckpt_file is not None:
        logging.info('====== Load ckpt ======')
        logging.info("Loading from %s"%ckpt_file)

   
        start_epoch = 249
        

        # if num_gpus > 1:
        #   model.module.load_state_dict(checkpoint['state_dict'])
        # else:
        #   model.load_state_dict(checkpoint['state_dict'])
        # start_epoch = int(checkpoint['epoch']) + 1
        # optimizer.load_state_dict(checkpoint['optimizer'])
        # best_acc_top1 = float(checkpoint['best_acc_top1'])
        # logging.info("Training Start at %d"%start_epoch)
        logging.info("Training Start at %d"%start_epoch)
        sat= torch.load(ckpt_file)
        # print(type(sat))
        # if num_gpus > 1:
        #     model.load_state_dict(sat)
        # else:
        # model = model.cuda()
        # new_stat={}
        # for k,v in sat.items():
        #     # "module.stem0.0.weight"
        #     new_k = k#[7:]
        #     new_stat[new_k] =copy.deepcopy(v)
        
        model.load_state_dict(sat)


#    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma, last_epoch=start_epoch)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
 
    for epoch in range(start_epoch, args.epochs):
        if args.lr_scheduler == 'cosine':
            scheduler.step()
            current_lr = scheduler.get_lr()[0]
        elif args.lr_scheduler == 'linear':
            current_lr = adjust_lr(optimizer, epoch)
        else:
            print('Wrong lr type, exit')
            sys.exit(1)
        logging.info('Epoch: %d lr %e', epoch, current_lr)
        if epoch < 5 and args.batch_size > 256:
            for param_group in optimizer.param_groups:
                param_group['lr'] = current_lr * (epoch + 1) / 5.0
            logging.info('Warming-up Epoch: %d, LR: %e', epoch, current_lr * (epoch + 1) / 5.0)
        if num_gpus > 1:
            model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        else:
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        epoch_start = time.time()
        train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer)
        logging.info('Train_acc: %f', train_acc)

        valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
        logging.info('Valid_acc_top1: %f', valid_acc_top1)
        logging.info('Valid_acc_top5: %f', valid_acc_top5)
        epoch_duration = time.time() - epoch_start
        logging.info('Epoch time: %ds.', epoch_duration)
        # save current epoch model, and remove previous model
        try:
            last_model = os.path.join(ckpt_dir, 'weights_%d.pt'%(epoch-1))
            os.remove(last_model)
        except:
            pass
        ckpt = {
          'epoch': epoch,
          'state_dict': model.state_dict(),
          'best_acc_top1': best_acc_top1,
          'optimizer' : optimizer.state_dict(),
               }
        utils.save(model, os.path.join(ckpt_dir, 'weights_%d.pt'%(epoch)))

        if valid_acc_top1 > best_acc_top1:
          try:
              last_model = os.path.join(ckpt_dir, 'weights_%.3f.pt'%(best_acc_top1))
              os.remove(last_model)
          except:
              pass
          ckpt = {
            'epoch': epoch,
            'state_dict': model.state_dict(),
            'best_acc_top1': best_acc_top1,
            'optimizer' : optimizer.state_dict(),
                 }
          utils.save(model, os.path.join(ckpt_dir, 'weights_%.3f.pt'%(valid_acc_top1)))
          best_acc_top1 = valid_acc_top1
          best_acc_top5 = valid_acc_top5

#        is_best = False
#        if valid_acc_top5 > best_acc_top5:
#            best_acc_top5 = valid_acc_top5
#        if valid_acc_top1 > best_acc_top1:
#            best_acc_top1 = valid_acc_top1
#            is_best = True
#        utils.save_checkpoint({
#            'epoch': epoch + 1,
#            'state_dict': model.state_dict(),
#            'best_acc_top1': best_acc_top1,
#            'optimizer' : optimizer.state_dict(),
#            }, is_best, args.save)        
        print("the best acc: %f(Top1); %f(Top5)"%(best_acc_top1, best_acc_top5))
Esempio n. 12
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  #torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device num = %d' % args.ngpu)
  logging.info("args = %s", args)

  genotype = eval("genotypes.%s" % args.arch)
  model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype, args.residual_wei, args.shrink_channel)
  if args.parallel:
    model = nn.DataParallel(model).cuda()
    #model = nn.parallel.DistributedDataParallel(model).cuda()
  else:
    model = model.cuda()
  
  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
  criterion_smooth = criterion_smooth.cuda()

  optimizer = torch.optim.SGD(
    #model.parameters(),
    utils.set_group_weight(model, args.bn_no_wd, args.bias_no_wd),
    args.learning_rate,
    momentum=args.momentum,
    weight_decay=args.weight_decay
    )

  resume = os.path.join(args.save, 'checkpoint.pth.tar')
  if os.path.exists(resume):
    print("=> loading checkpoint %s" % resume)
    #checkpoint = torch.load(resume)
    checkpoint = torch.load(resume, map_location = lambda storage, loc: storage.cuda(0))
    args.start_epoch = checkpoint['epoch']
    model.load_state_dict(checkpoint['state_dict'])
    #optimizer.load_state_dict(checkpoint['optimizer'])
    optimizer.state_dict()['state'] = checkpoint['optimizer']['state']
    print('=> loaded checkpoint epoch %d' % args.start_epoch)
    if args.start_epoch >= args.epochs:
        print('training finished')
        sys.exit(0)

  traindir = os.path.join(args.data, 'train')
  validdir = os.path.join(args.data, 'val')
  normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  train_data = dset.ImageFolder(
    traindir,
    transforms.Compose([
      transforms.RandomResizedCrop(args.image_size),
      transforms.RandomHorizontalFlip(),
      transforms.ColorJitter(
        brightness=0.4,
        contrast=0.4,
        saturation=0.4,
        hue=0.1),
      transforms.ToTensor(),
      normalize,
    ]))
  valid_data = dset.ImageFolder(
    validdir,
    transforms.Compose([
      transforms.Resize(int((256.0 / 224) * args.image_size)),
      transforms.CenterCrop(args.image_size),
      transforms.ToTensor(),
      normalize,
    ]))

  train_queue = torch.utils.data.DataLoader(
    train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=nworker)

  valid_queue = torch.utils.data.DataLoader(
    valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=nworker)

  best_acc_top1 = 0
  for epoch in range(args.start_epoch, args.epochs):
    if args.lr_strategy == 'cos':
      lr = utils.set_lr(optimizer, epoch, args.epochs, args.learning_rate)
    #elif args.lr_strategy == 'step':
    #  scheduler.step()
    #  lr = scheduler.get_lr()[0]
    logging.info('epoch %d lr %e', epoch, lr)
    if args.parallel:
      model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
    else:
      model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
    train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer, epoch)
    logging.info('train_acc %f', train_acc)

    utils.save_checkpoint({
      'epoch': epoch + 1,
      'state_dict': model.state_dict(),
      'best_acc_top1': train_acc,
      'optimizer' : optimizer.state_dict(),
      }, False, args.save)

    #if epoch >= args.early_stop:
    #  break

  valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
  logging.info('valid_acc_top1 %f', valid_acc_top1)
  logging.info('valid_acc_top5 %f', valid_acc_top5)
Esempio n. 13
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    rank, world_size = dist_util.dist_init(args.port, 'nccl')

    np.random.seed(args.seed)
    # torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    # logging.info('gpu device = %d' % args.gpu)
    if rank == 0:
        generate_date = str(datetime.now().date())
        utils.create_exp_dir(generate_date, args.save, scripts_to_save=glob.glob('*.py'))
        logging.basicConfig(stream=sys.stdout, level=logging.INFO,
                            format=log_format, datefmt='%m/%d %I:%M:%S %p')
        fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
        fh.setFormatter(logging.Formatter(log_format))
        logging.getLogger().addHandler(fh)
        logging.info("args = %s", args)
        logger = tensorboardX.SummaryWriter('./runs/eval_imagenet_{}_{}'.format(args.arch, args.remark))

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
    model = model.cuda()

    if rank == 0:
        logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    if args.warm_start:
        lr = args.warm_start_lr / args.warm_start_gamma
    else:
        lr = args.learning_rate

    optimizer = torch.optim.SGD(
        model.parameters(),
        lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay
    )

    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(
            brightness=0.4,
            contrast=0.4,
            saturation=0.4,
            hue=0.2),
        transforms.ToTensor(),
        normalize,
    ])
    train_dataset = get_dataset(traindir, os.path.join(args.data, 'meta/train.txt'), train_transform)

    valid_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ])
    valid_dataset = get_dataset(validdir, os.path.join(args.data, 'meta/val.txt'), valid_transform)

    # train_queue = torch.utils.data.DataLoader(
    #     train_dataset, batch_size=args.batch_size // world_size, sampler=DistributedSampler(train_dataset),
    #     pin_memory=True, num_workers=4)

    # valid_queue = torch.utils.data.DataLoader(
    #     valid_dataset, batch_size=args.batch_size // world_size, sampler=DistributedSampler(valid_dataset),
    #     pin_memory=True, num_workers=4)

    train_queue = torch.utils.data.DataLoader(
    train_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)

    valid_queue = torch.utils.data.DataLoader(
    valid_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)


    if args.warm_start:
        scheduler = utils.WarmStart(optimizer, gamma=args.warm_start_gamma)
    else:
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)

    best_acc_top1 = 0
    for epoch in range(args.epochs):
        '''if epoch == 0 or epoch == 1:
          for param_group in optimizer.param_groups:
            param_group['lr'] = args.warm_up_learning_rate
        elif epoch == 2:
          for param_group in optimizer.param_groups:
            param_group['lr'] = args.learning_rate
        else:
          scheduler.step()'''

        scheduler.step()

        if rank == 0:
            logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer, rank)
        if rank == 0:
            logging.info('train_acc %f', train_acc)
            logger.add_scalar("epoch_train_acc", train_acc, epoch)
            logger.add_scalar("epoch_train_loss", train_obj, epoch)

        valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion, rank)
        if rank == 0:
            logging.info('valid_acc_top1 %f', valid_acc_top1)
            logging.info('valid_acc_top5 %f', valid_acc_top5)
            logger.add_scalar("epoch_valid_acc_top1", valid_acc_top1, epoch)
            logger.add_scalar("epoch_valid_acc_top5", valid_acc_top5, epoch)

        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        if args.warm_start:
            # if not is_best and not scheduler.lr_const:
            if True:
                if rank == 0:
                    logging.info('warm start ended lr %e', scheduler.get_lr()[0])
                    logging.info("=> loading checkpoint '{}'".format(args.save))

                # checkpoint = torch.load(os.path.join(args.save, 'model_best.pth.tar'))

                checkpoint = torch.load(os.path.join(args.save, 'model_best.pth.tar'),
                                        map_location=lambda storage, loc: storage)

                best_acc_top1 = checkpoint['best_acc_top1']
                model.load_state_dict(checkpoint['state_dict'])
                optimizer.load_state_dict(checkpoint['optimizer'])
                scheduler.lr_const = True
                if rank == 0:
                    logging.info('return to last checkpoint')

                del checkpoint  # dereference seems crucial
                torch.cuda.empty_cache()

                # args.start_epoch = checkpoint['epoch']
                '''best_acc_top1 = checkpoint['best_acc_top1']
                model.load_state_dict(checkpoint['state_dict'])
                optimizer.load_state_dict(checkpoint['optimizer'])
                scheduler.lr_const = True
                if rank == 0:
                  logging.info('return to last checkpoint')'''

        if rank == 0:
            utils.save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)
Esempio n. 14
0
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU device available')
        sys.exit(1)
    np.random.seed(config.seed)
    cudnn.benchmark = True
    torch.manual_seed(config.seed)
    cudnn.enabled=True
    torch.cuda.manual_seed(config.seed)
    logging.info("config = %s", config)
    num_gpus = torch.cuda.device_count()   
    genotype = config.genotype
    print('---------Genotype---------')
    logging.info(genotype)
    print('--------------------------') 
    model = Network(config.init_channels, CLASSES, config.layers, True, genotype)
    if num_gpus > 1:
        model = nn.DataParallel(model)
        model = model.cuda()
    else:
        model = model.cuda()
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, 0.1)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(
        model.parameters(),
        config.lr,
        momentum=config.momentum,
        weight_decay=config.weight_decay
        )
    data_dir = os.path.join(config.data_path, 'imagenet')
    traindir = os.path.join(data_dir, 'train')
    validdir = os.path.join(data_dir, 'valid')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(
                brightness=0.4,
                contrast=0.4,
                saturation=0.4,
                hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(
        train_data, batch_size=config.batch_size, shuffle=True, pin_memory=True, num_workers=config.workers)

    valid_queue = torch.utils.data.DataLoader(
        valid_data, batch_size=config.batch_size, shuffle=False, pin_memory=True, num_workers=config.workers)

#    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.decay_period, gamma=config.gamma)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(config.epochs))
    best_acc_top1 = 0
    best_acc_top5 = 0
    lr = config.lr
    for epoch in range(config.epochs):
        current_lr = adjust_lr(optimizer, epoch)
        logging.info('Epoch: %d lr %e', epoch, current_lr)
        if epoch < 5 and config.batch_size > 256:
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr * (epoch + 1) / 5.0
            logging.info('Warming-up Epoch: %d, LR: %e', epoch, lr * (epoch + 1) / 5.0)
        if num_gpus > 1:
            model.module.drop_path_prob = config.drop_path_prob * epoch / config.epochs
        else:
            model.drop_path_prob = config.drop_path_prob * epoch / config.epochs
        epoch_start = time.time()
        train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer)
        logging.info('Train_acc: %f', train_acc)

        valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
        logging.info('Valid_acc_top1: %f', valid_acc_top1)
        logging.info('Valid_acc_top5: %f', valid_acc_top5)
        epoch_duration = time.time() - epoch_start
        logging.info('Epoch time: %ds.', epoch_duration)
        is_best = False
        if valid_acc_top5 > best_acc_top5:
            best_acc_top5 = valid_acc_top5
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_acc_top1': best_acc_top1,
            'optimizer' : optimizer.state_dict(),
            }, is_best, config.path)
Esempio n. 15
0
class Trainer:
    def __init__(self,
                 args: Namespace,
                 genotype: Genotype,
                 my_dataset: MyDataset,
                 choose_cell=False):

        self.__args = args
        self.__dataset = my_dataset
        self.__previous_epochs = 0

        if args.seed is None:
            raise Exception('designate seed.')
        elif args.epochs is None:
            raise Exception('designate epochs.')
        if not (args.arch or args.arch_path):
            raise Exception('need to designate arch.')

        log_format = '%(asctime)s %(message)s'
        logging.basicConfig(stream=sys.stdout,
                            level=logging.INFO,
                            format=log_format,
                            datefmt='%m/%d %I:%M:%S %p')
        fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
        fh.setFormatter(logging.Formatter(log_format))
        logging.getLogger().addHandler(fh)
        np.random.seed(args.seed)
        cudnn.benchmark = True
        cudnn.enabled = True
        torch.manual_seed(args.seed)

        logging.info(f'gpu device = {args.gpu}')
        logging.info(f'args = {args}')

        logging.info(f'Train genotype: {genotype}')

        if my_dataset == MyDataset.CIFAR10:
            self.model = NetworkCIFAR(args.init_ch, 10, args.layers,
                                      args.auxiliary, genotype)
            train_transform, valid_transform = utils._data_transforms_cifar10(
                args)
            train_data = dset.CIFAR10(root=args.data,
                                      train=True,
                                      download=True,
                                      transform=train_transform)
            valid_data = dset.CIFAR10(root=args.data,
                                      train=False,
                                      download=True,
                                      transform=valid_transform)

        elif my_dataset == MyDataset.CIFAR100:
            self.model = NetworkCIFAR(args.init_ch, 100, args.layers,
                                      args.auxiliary, genotype)
            train_transform, valid_transform = utils._data_transforms_cifar100(
                args)
            train_data = dset.CIFAR100(root=args.data,
                                       train=True,
                                       download=True,
                                       transform=train_transform)
            valid_data = dset.CIFAR100(root=args.data,
                                       train=False,
                                       download=True,
                                       transform=valid_transform)

        elif my_dataset == MyDataset.ImageNet:
            self.model = NetworkImageNet(args.init_ch, 1000, args.layers,
                                         args.auxiliary, genotype)
            self.__criterion_smooth = CrossEntropyLabelSmooth(
                1000, args.label_smooth).to(device)
            traindir = os.path.join(args.data, 'train')
            validdir = os.path.join(args.data, 'val')
            normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                             std=[0.229, 0.224, 0.225])
            train_data = dset.ImageFolder(
                traindir,
                transforms.Compose([
                    transforms.RandomResizedCrop(224),
                    transforms.RandomHorizontalFlip(),
                    transforms.ColorJitter(brightness=0.4,
                                           contrast=0.4,
                                           saturation=0.4,
                                           hue=0.2),
                    transforms.ToTensor(),
                    normalize,
                ]))
            valid_data = dset.ImageFolder(
                validdir,
                transforms.Compose([
                    transforms.Resize(256),
                    transforms.CenterCrop(224),
                    transforms.ToTensor(),
                    normalize,
                ]))
        else:
            raise Exception('No match Dataset')

        checkpoint = None
        if use_DataParallel:
            print('use Data Parallel')
            if args.checkpoint_path:
                checkpoint = torch.load(args.checkpoint_path)
                utils.load(self.model, checkpoint['state_dict'],
                           args.to_parallel)
                self.__previous_epochs = checkpoint['epoch']
                args.epochs -= self.__previous_epochs
                if args.epochs <= 0:
                    raise Exception('args.epochs is too small.')

            self.model = nn.DataParallel(self.model)
            self.__module = self.model.module
            torch.cuda.manual_seed_all(args.seed)
        else:
            if args.checkpoint_path:
                checkpoint = torch.load(args.checkpoint_path)
                utils.load(self.model, checkpoint['state_dict'],
                           args.to_parallel)
                args.epochs -= checkpoint['epoch']
                if args.epochs <= 0:
                    raise Exception('args.epochs is too small.')
            torch.cuda.manual_seed(args.seed)
            self.__module = self.model

        self.model.to(device)

        param_size = utils.count_parameters_in_MB(self.model)
        logging.info(f'param size = {param_size}MB')

        self.__criterion = nn.CrossEntropyLoss().to(device)

        self.__optimizer = torch.optim.SGD(self.__module.parameters(),
                                           args.lr,
                                           momentum=args.momentum,
                                           weight_decay=args.wd)
        if checkpoint:
            self.__optimizer.load_state_dict(checkpoint['optimizer'])

        num_workers = torch.cuda.device_count() * 4
        if choose_cell:
            num_train = len(train_data)  # 50000
            indices = list(range(num_train))
            split = int(np.floor(args.train_portion * num_train))  # 25000

            self.__train_queue = torch.utils.data.DataLoader(
                train_data,
                batch_size=args.batchsz,
                sampler=torch.utils.data.sampler.SubsetRandomSampler(
                    indices[:split]),
                pin_memory=True,
                num_workers=num_workers)

            self.__valid_queue = torch.utils.data.DataLoader(
                train_data,
                batch_size=args.batchsz,
                sampler=torch.utils.data.sampler.SubsetRandomSampler(
                    indices[split:]),
                pin_memory=True,
                num_workers=num_workers)
        else:
            self.__train_queue = torch.utils.data.DataLoader(
                train_data,
                batch_size=args.batchsz,
                shuffle=True,
                pin_memory=True,
                num_workers=num_workers)

            self.__valid_queue = torch.utils.data.DataLoader(
                valid_data,
                batch_size=args.batchsz,
                shuffle=False,
                pin_memory=True,
                num_workers=num_workers)

        if my_dataset == MyDataset.CIFAR10 or MyDataset.CIFAR100:
            self.__scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
                self.__optimizer, args.epochs)
        elif my_dataset == MyDataset.ImageNet:
            self.__scheduler = torch.optim.lr_scheduler.StepLR(
                self.__optimizer, args.decay_period, gamma=args.gamma)
        else:
            raise Exception('No match Dataset')

        if checkpoint:
            self.__scheduler.load_state_dict(checkpoint['scheduler'])

    def __train_epoch(self, train_queue, model, criterion, optimizer, epoch):
        objs = utils.AverageMeter()
        top1 = utils.AverageMeter()
        top5 = utils.AverageMeter()
        model.train()

        with tqdm(train_queue) as progress:
            progress.set_description_str(f'Train epoch {epoch}')

            for step, (x, target) in enumerate(progress):

                x, target = x.to(device), target.to(device, non_blocking=True)

                optimizer.zero_grad()
                logits, logits_aux = model(x)
                loss = criterion(logits, target)
                if self.__args.auxiliary:
                    loss_aux = criterion(logits_aux, target)
                    loss += self.__args.auxiliary_weight * loss_aux
                loss.backward()
                nn.utils.clip_grad_norm_(
                    model.module.parameters() if use_DataParallel else
                    model.parameters(), self.__args.grad_clip)
                optimizer.step()

                prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
                n = x.size(0)
                objs.update(loss.item(), n)
                top1.update(prec1.item(), n)
                top5.update(prec5.item(), n)

                progress.set_postfix_str(f'loss: {objs.avg}, top1: {top1.avg}')

                if step % self.__args.report_freq == 0:
                    logging.info(
                        f'Step:{step:03} loss:{objs.avg} acc1:{top1.avg} acc5:{top5.avg}'
                    )

        return top1.avg, objs.avg

    def __infer_epoch(self, valid_queue, model, criterion, epoch):
        objs = utils.AverageMeter()
        top1 = utils.AverageMeter()
        top5 = utils.AverageMeter()
        model.eval()

        with tqdm(valid_queue) as progress:
            for step, (x, target) in enumerate(progress):
                progress.set_description_str(f'Valid epoch {epoch}')

                x = x.to(device)
                target = target.to(device, non_blocking=True)

                with torch.no_grad():
                    logits, _ = model(x)
                    loss = criterion(logits, target)

                    prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
                    n = x.size(0)
                    objs.update(loss.item(), n)
                    top1.update(prec1.item(), n)
                    top5.update(prec5.item(), n)

                progress.set_postfix_str(f'loss: {objs.avg}, top1: {top1.avg}')

                if step % self.__args.report_freq == 0:
                    logging.info(
                        f'>> Validation: {step:03} {objs.avg} {top1.avg} {top5.avg}'
                    )

        return top1.avg, top5.avg, objs.avg

    def train(self) -> Tuple[float, float, float]:

        best_acc_top1 = 0
        for epoch in tqdm(range(self.__args.epochs), desc='Total Progress'):
            self.__scheduler.step()
            logging.info(f'epoch {epoch} lr {self.__scheduler.get_lr()[0]}')
            self.__module.drop_path_prob = self.__args.drop_path_prob * epoch / self.__args.epochs

            train_acc, train_obj = self.__train_epoch(self.__train_queue,
                                                      self.model,
                                                      self.__criterion,
                                                      self.__optimizer,
                                                      epoch + 1)
            logging.info(f'train_acc: {train_acc}')

            valid_acc_top1, valid_acc_top5, valid_obj = self.__infer_epoch(
                self.__valid_queue, self.model, self.__criterion, epoch + 1)
            logging.info(f'valid_acc: {valid_acc_top1}')
            if self.__dataset == MyDataset.ImageNet:
                logging.info(f'valid_acc_top5 {valid_acc_top5}')

            is_best = False
            if valid_acc_top1 > best_acc_top1:
                best_acc_top1 = valid_acc_top1
                is_best = True

            utils.save(self.model, os.path.join(self.__args.save,
                                                'trained.pt'))
            utils.save_checkpoint(
                {
                    'epoch': epoch + 1 + self.__previous_epochs,
                    'state_dict': self.model.state_dict(),
                    'optimizer': self.__optimizer.state_dict(),
                    'scheduler': self.__scheduler.state_dict()
                },
                is_best=is_best,
                save_path=self.__args.save)
            print('saved to: trained.pt and checkpoint.pth.tar')

        return train_acc, valid_acc_top1, best_acc_top1
Esempio n. 16
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)
  #6/28 04:21:06 PM args = Namespace(arch='DARTS', auxiliary=False, auxiliary_weight=0.4, batch_size=128, data='../data/imagenet/', decay_period=1, drop_path_prob=0, epochs=250, gamma=0.97, gpu=0, grad_clip=5.0, init_channels=48, label_smooth=0.1, layers=14, learning_rate=0.1, momentum=0.9, parallel=False, report_freq=100, save='eval-EXP-20190628-162106', seed=0, weight_decay=3e-05)

  genotype = eval("genotypes.%s" % args.arch)
  logging.info('genotype = %s', genotype)
  #06/28 04:21:06 PM genotype = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])

  model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
  #model = Network(args.init_channels=48, CLASSES=1000, args.layers=14, args.auxiliary, genotype)
  if args.parallel:
    model = nn.DataParallel(model).cuda()
  else:
    model = model.cuda()

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
  #06/28 04:21:08 PM param size = 4.718752MB

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
  criterion_smooth = criterion_smooth.cuda()

  optimizer = torch.optim.SGD(
    model.parameters(),
    args.learning_rate,
    momentum=args.momentum,
    weight_decay=args.weight_decay
    )

  traindir = os.path.join(args.data, 'train')
  validdir = os.path.join(args.data, 'val')
  normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  train_data = dset.ImageFolder(
    traindir,
    transforms.Compose([
      transforms.RandomResizedCrop(224),
      transforms.RandomHorizontalFlip(),
      transforms.ColorJitter(
        brightness=0.4,
        contrast=0.4,
        saturation=0.4,
        hue=0.2),
      transforms.ToTensor(),
      normalize,
    ]))
  valid_data = dset.ImageFolder(
    validdir,
    transforms.Compose([
      transforms.Resize(256),
      transforms.CenterCrop(224),
      transforms.ToTensor(),
      normalize,
    ]))

  train_queue = torch.utils.data.DataLoader(
    train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)

  valid_queue = torch.utils.data.DataLoader(
    valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)

  scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)

  best_acc_top1 = 0
  for epoch in range(args.epochs):
    scheduler.step()
    logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
    model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

    train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer)
    logging.info('train_acc %f', train_acc)

    valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
    logging.info('valid_acc_top1 %f', valid_acc_top1)
    logging.info('valid_acc_top5 %f', valid_acc_top5)

    is_best = False
    if valid_acc_top1 > best_acc_top1:
      best_acc_top1 = valid_acc_top1
      is_best = True

    utils.save_checkpoint({
      'epoch': epoch + 1,
      'state_dict': model.state_dict(),
      'best_acc_top1': best_acc_top1,
      'optimizer' : optimizer.state_dict(),
      }, is_best, args.save)
Esempio n. 17
0
def main():
    global best_top1, args, logger

    args.distributed = False
    if 'WORLD_SIZE' in os.environ:
        args.distributed = int(os.environ['WORLD_SIZE']) > 1

    # commented because it is now set as an argparse param.
    # args.gpu = 0
    args.world_size = 1

    if args.distributed:
        args.gpu = args.local_rank % torch.cuda.device_count()
        torch.cuda.set_device(args.gpu)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        args.world_size = torch.distributed.get_world_size()

    # note the gpu is used for directory creation and log files
    # which is needed when run as multiple processes
    args = utils.initialize_files_and_args(args)
    logger = utils.logging_setup(args.log_file_path)

    if args.fp16:
        assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."

    if args.static_loss_scale != 1.0:
        if not args.fp16:
            logger.info(
                "Warning:  if --fp16 is not used, static_loss_scale will be ignored."
            )

    # # load the correct ops dictionary
    op_dict_to_load = "operations.%s" % args.ops
    logger.info('loading op dict: ' + str(op_dict_to_load))
    op_dict = eval(op_dict_to_load)

    # load the correct primitives list
    primitives_to_load = "genotypes.%s" % args.primitives
    logger.info('loading primitives:' + primitives_to_load)
    primitives = eval(primitives_to_load)
    logger.info('primitives: ' + str(primitives))
    # create model
    genotype = eval("genotypes.%s" % args.arch)
    # get the number of output channels
    classes = dataset.class_dict[args.dataset]
    # create the neural network
    if args.dataset == 'imagenet':
        model = NetworkImageNet(args.init_channels,
                                classes,
                                args.layers,
                                args.auxiliary,
                                genotype,
                                op_dict=op_dict,
                                C_mid=args.mid_channels)
        flops_shape = [1, 3, 224, 224]
    else:
        model = NetworkCIFAR(args.init_channels,
                             classes,
                             args.layers,
                             args.auxiliary,
                             genotype,
                             op_dict=op_dict,
                             C_mid=args.mid_channels)
        flops_shape = [1, 3, 32, 32]
    model.drop_path_prob = 0.0
    # if args.pretrained:
    #     logger.info("=> using pre-trained model '{}'".format(args.arch))
    #     model = models.__dict__[args.arch](pretrained=True)
    # else:
    #     logger.info("=> creating model '{}'".format(args.arch))
    #     model = models.__dict__[args.arch]()

    if args.flops:
        model = model.cuda()
        logger.info("param size = %fMB", utils.count_parameters_in_MB(model))
        logger.info("flops_shape = " + str(flops_shape))
        logger.info("flops = " +
                    utils.count_model_flops(model, data_shape=flops_shape))
        return

    if args.sync_bn:
        import apex
        logger.info("using apex synced BN")
        model = apex.parallel.convert_syncbn_model(model)

    model = model.cuda()
    if args.fp16:
        model = network_to_half(model)
    if args.distributed:
        # By default, apex.parallel.DistributedDataParallel overlaps communication with
        # computation in the backward pass.
        # model = DDP(model)
        # delay_allreduce delays all communication to the end of the backward pass.
        model = DDP(model, delay_allreduce=True)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    # Scale learning rate based on global batch size
    args.learning_rate = args.learning_rate * float(
        args.batch_size * args.world_size) / 256.
    init_lr = args.learning_rate / args.warmup_lr_divisor
    optimizer = torch.optim.SGD(model.parameters(),
                                init_lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # epoch_count = args.epochs - args.start_epoch
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(epoch_count))
    # scheduler = warmup_scheduler.GradualWarmupScheduler(
    #     optimizer, args.warmup_lr_divisor, args.warmup_epochs, scheduler)

    if args.fp16:
        optimizer = FP16_Optimizer(optimizer,
                                   static_loss_scale=args.static_loss_scale,
                                   dynamic_loss_scale=args.dynamic_loss_scale)

    # Optionally resume from a checkpoint
    if args.resume or args.evaluate:
        if args.evaluate:
            args.resume = args.evaluate
        # Use a local scope to avoid dangling references
        def resume():
            if os.path.isfile(args.resume):
                logger.info("=> loading checkpoint '{}'".format(args.resume))
                checkpoint = torch.load(
                    args.resume,
                    map_location=lambda storage, loc: storage.cuda(args.gpu))
                args.start_epoch = checkpoint['epoch']
                if 'best_top1' in checkpoint:
                    best_top1 = checkpoint['best_top1']
                model.load_state_dict(checkpoint['state_dict'])
                # An FP16_Optimizer instance's state dict internally stashes the master params.
                optimizer.load_state_dict(checkpoint['optimizer'])
                # TODO(ahundt) make sure scheduler loading isn't broken
                if 'lr_scheduler' in checkpoint:
                    scheduler.load_state_dict(checkpoint['lr_scheduler'])
                elif 'lr_schedule' in checkpoint:
                    lr_schedule = checkpoint['lr_schedule']
                logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']))
            else:
                logger.info("=> no checkpoint found at '{}'".format(
                    args.resume))

        resume()

    # # Data loading code
    # traindir = os.path.join(args.data, 'train')
    # valdir = os.path.join(args.data, 'val')

    # if(args.arch == "inception_v3"):
    #     crop_size = 299
    #     val_size = 320 # I chose this value arbitrarily, we can adjust.
    # else:
    #     crop_size = 224
    #     val_size = 256

    # train_dataset = datasets.ImageFolder(
    #     traindir,
    #     transforms.Compose([
    #         transforms.RandomResizedCrop(crop_size),
    #         transforms.RandomHorizontalFlip(),
    #         autoaugment.ImageNetPolicy(),
    #         # transforms.ToTensor(),  # Too slow, moved to data_prefetcher()
    #         # normalize,
    #     ]))
    # val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
    #         transforms.Resize(val_size),
    #         transforms.CenterCrop(crop_size)
    #     ]))

    # train_sampler = None
    # val_sampler = None
    # if args.distributed:
    #     train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    #     val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)

    # train_loader = torch.utils.data.DataLoader(
    #     train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
    #     num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)

    # val_loader = torch.utils.data.DataLoader(
    #     val_dataset,
    #     batch_size=args.batch_size, shuffle=False,
    #     num_workers=args.workers, pin_memory=True,
    #     sampler=val_sampler,
    #     collate_fn=fast_collate)

    # Get preprocessing functions (i.e. transforms) to apply on data
    # normalize_as_tensor = False because we normalize and convert to a
    # tensor in our custom prefetching function, rather than as part of
    # the transform preprocessing list.
    train_transform, valid_transform = utils.get_data_transforms(
        args, normalize_as_tensor=False)
    # Get the training queue, select training and validation from training set
    train_loader, val_loader = dataset.get_training_queues(
        args.dataset,
        train_transform,
        valid_transform,
        args.data,
        args.batch_size,
        train_proportion=1.0,
        collate_fn=fast_collate,
        distributed=args.distributed,
        num_workers=args.workers)

    if args.evaluate:
        if args.dataset == 'cifar10':
            # evaluate best model weights on cifar 10.1
            # https://github.com/modestyachts/CIFAR-10.1
            train_transform, valid_transform = utils.get_data_transforms(args)
            # Get the training queue, select training and validation from training set
            # Get the training queue, use full training and test set
            train_queue, valid_queue = dataset.get_training_queues(
                args.dataset,
                train_transform,
                valid_transform,
                args.data,
                args.batch_size,
                train_proportion=1.0,
                search_architecture=False)
            test_data = cifar10_1.CIFAR10_1(root=args.data,
                                            download=True,
                                            transform=valid_transform)
            test_queue = torch.utils.data.DataLoader(
                test_data,
                batch_size=args.batch_size,
                shuffle=False,
                pin_memory=True,
                num_workers=args.workers)
            eval_stats = evaluate(args,
                                  model,
                                  criterion,
                                  train_queue=train_queue,
                                  valid_queue=valid_queue,
                                  test_queue=test_queue)
            with open(args.stats_file, 'w') as f:
                # TODO(ahundt) fix "TypeError: 1869 is not JSON serializable" to include arg info, see train.py
                # arg_dict = vars(args)
                # arg_dict.update(eval_stats)
                # json.dump(arg_dict, f)
                json.dump(eval_stats, f)
            logger.info("flops = " + utils.count_model_flops(model))
            logger.info(utils.dict_to_log_string(eval_stats))
            logger.info('\nEvaluation of Loaded Model Complete! Save dir: ' +
                        str(args.save))
        else:
            validate(val_loader, model, criterion, args)
        return

    lr_schedule = cosine_power_annealing(
        epochs=args.epochs,
        max_lr=args.learning_rate,
        min_lr=args.learning_rate_min,
        warmup_epochs=args.warmup_epochs,
        exponent_order=args.lr_power_annealing_exponent_order,
        restart_lr=args.restart_lr)
    epochs = np.arange(args.epochs) + args.start_epoch

    stats_csv = args.epoch_stats_file
    stats_csv = stats_csv.replace('.json', '.csv')
    with tqdm(epochs,
              dynamic_ncols=True,
              disable=args.local_rank != 0,
              leave=False) as prog_epoch:
        best_stats = {}
        stats = {}
        epoch_stats = []
        best_epoch = 0
        for epoch, learning_rate in zip(prog_epoch, lr_schedule):
            if args.distributed and train_loader.sampler is not None:
                train_loader.sampler.set_epoch(int(epoch))
            # if args.distributed:
            # train_sampler.set_epoch(epoch)
            # update the learning rate
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate
            # scheduler.step()
            model.drop_path_prob = args.drop_path_prob * float(epoch) / float(
                args.epochs)
            # train for one epoch
            train_stats = train(train_loader, model, criterion, optimizer,
                                int(epoch), args)
            if args.prof:
                break
            # evaluate on validation set
            top1, val_stats = validate(val_loader, model, criterion, args)
            stats.update(train_stats)
            stats.update(val_stats)
            # stats['lr'] = '{0:.5f}'.format(scheduler.get_lr()[0])
            stats['lr'] = '{0:.5f}'.format(learning_rate)
            stats['epoch'] = epoch

            # remember best top1 and save checkpoint
            if args.local_rank == 0:
                is_best = top1 > best_top1
                best_top1 = max(top1, best_top1)
                stats['best_top1'] = '{0:.3f}'.format(best_top1)
                if is_best:
                    best_epoch = epoch
                    best_stats = copy.deepcopy(stats)
                stats['best_epoch'] = best_epoch

                stats_str = utils.dict_to_log_string(stats)
                logger.info(stats_str)
                save_checkpoint(
                    {
                        'epoch': epoch,
                        'arch': args.arch,
                        'state_dict': model.state_dict(),
                        'best_top1': best_top1,
                        'optimizer': optimizer.state_dict(),
                        # 'lr_scheduler': scheduler.state_dict()
                        'lr_schedule': lr_schedule,
                        'stats': best_stats
                    },
                    is_best,
                    path=args.save)
                prog_epoch.set_description(
                    'Overview ***** best_epoch: {0} best_valid_top1: {1:.2f} ***** Progress'
                    .format(best_epoch, best_top1))
            epoch_stats += [copy.deepcopy(stats)]
            with open(args.epoch_stats_file, 'w') as f:
                json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
            utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
        stats_str = utils.dict_to_log_string(best_stats, key_prepend='best_')
        logger.info(stats_str)
        with open(args.stats_file, 'w') as f:
            arg_dict = vars(args)
            arg_dict.update(best_stats)
            json.dump(arg_dict, f, cls=utils.NumpyEncoder)
        with open(args.epoch_stats_file, 'w') as f:
            json.dump(epoch_stats, f, cls=utils.NumpyEncoder)
        utils.list_of_dicts_to_csv(stats_csv, epoch_stats)
        logger.info('Training of Final Model Complete! Save dir: ' +
                    str(args.save))
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU device available')
        sys.exit(1)

    num_gpus = torch.cuda.device_count()
    args.gpu = args.local_rank % num_gpus
    torch.cuda.set_device(args.gpu)

    np.random.seed(args.seed)
    cudnn.benchmark = True
    cudnn.deterministic = True

    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info("args = %s", args)
    logging.info("unparsed_args = %s", unparsed)

    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    args.world_size = torch.distributed.get_world_size()
    args.batch_size = args.batch_size // args.world_size

    genotype = eval("genotypes.%s" % args.arch)
    logging.info('---------Genotype---------')
    logging.info(genotype)
    logging.info('--------------------------')
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    model = model.cuda(args.gpu)
    model = apex.parallel.DistributedDataParallel(model, delay_allreduce=True)

    model_profile = Network(args.init_channels, CLASSES, args.layers,
                            args.auxiliary, genotype)
    model_profile = model_profile.cuda(args.gpu)
    model_input_size_imagenet = (1, 3, 224, 224)
    model_profile.drop_path_prob = 0
    flops, _ = profile(model_profile, model_input_size_imagenet)
    logging.info("flops = %fMB, param size = %fMB", flops,
                 count_parameters_in_MB(model))

    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # Prepare data
    total_iters = per_epoch_iters * args.epochs
    train_loader = get_train_dataloader(args.train_dir, args.batch_size,
                                        args.local_rank, total_iters)
    train_dataprovider = DataIterator(train_loader)
    val_loader = get_val_dataloader(args.test_dir)
    val_dataprovider = DataIterator(val_loader)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    start_epoch = 0
    best_acc_top1 = 0
    best_acc_top5 = 0
    checkpoint_tar = os.path.join(args.save, 'checkpoint.pth.tar')
    if os.path.exists(checkpoint_tar):
        logging.info('loading checkpoint {} ..........'.format(checkpoint_tar))
        checkpoint = torch.load(
            checkpoint_tar,
            map_location={'cuda:0': 'cuda:{}'.format(args.local_rank)})
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['state_dict'])
        logging.info("loaded checkpoint {} epoch = {}".format(
            checkpoint_tar, checkpoint['epoch']))

    # evaluation mode
    if args.eval:
        if args.eval_resume is not None:
            checkpoint = torch.load(args.eval_resume)
            model.module.drop_path_prob = 0
            model.load_state_dict(checkpoint['state_dict'])
            valid_acc_top1, valid_acc_top5 = infer(val_dataprovider,
                                                   model.module, val_iters)
            print('valid_acc_top1: {}'.format(valid_acc_top1))
        exit(0)

    for epoch in range(start_epoch, args.epochs):
        if args.lr_scheduler == 'cosine':
            scheduler.step()
            current_lr = scheduler.get_lr()[0]
        elif args.lr_scheduler == 'linear':
            current_lr = adjust_lr(optimizer, epoch)
        else:
            logging.info('Wrong lr type, exit')
            sys.exit(1)

        logging.info('Epoch: %d lr %e', epoch, current_lr)
        if epoch < 5 and args.batch_size > 256:
            for param_group in optimizer.param_groups:
                param_group['lr'] = current_lr * (epoch + 1) / 5.0
            logging.info('Warming-up Epoch: %d, LR: %e', epoch,
                         current_lr * (epoch + 1) / 5.0)
        model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        epoch_start = time.time()
        train_acc, train_obj = train(train_dataprovider, model,
                                     criterion_smooth, optimizer,
                                     per_epoch_iters)

        writer.add_scalar('Train/Loss', train_obj, epoch)
        writer.add_scalar('Train/LR', current_lr, epoch)

        if args.local_rank == 0 and (epoch % 5 == 0
                                     or args.epochs - epoch < 10):
            valid_acc_top1, valid_acc_top5 = infer(val_dataprovider,
                                                   model.module, val_iters)
            is_best = False
            if valid_acc_top5 > best_acc_top5:
                best_acc_top5 = valid_acc_top5
            if valid_acc_top1 > best_acc_top1:
                best_acc_top1 = valid_acc_top1
                is_best = True

            logging.info('Valid_acc_top1: %f', valid_acc_top1)
            logging.info('Valid_acc_top5: %f', valid_acc_top5)
            logging.info('best_acc_top1: %f', best_acc_top1)
            epoch_duration = time.time() - epoch_start
            logging.info('Epoch time: %ds.', epoch_duration)

            save_checkpoint_(
                {
                    'epoch': epoch,
                    'state_dict': model.state_dict(),
                    'best_acc_top1': best_acc_top1,
                    'optimizer': optimizer.state_dict(),
                }, args.save)
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)
    if args.parallel:
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=4)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=4)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                args.decay_period,
                                                gamma=args.gamma)

    best_acc_top1 = 0
    for epoch in range(args.epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        train_acc, train_obj = train(train_queue, model, criterion_smooth,
                                     optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)

        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc_top1': best_acc_top1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)
Esempio n. 20
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    print(torch.cuda.device_count())
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)

    if hvd.rank() == 0:
        logging.info('gpu device = %d' % args.gpu)
        logging.info("args = %s", args)

    best_acc_top1 = 0
    start_epoch = 0
    if args.resume:
        checkpoint = torch.load(os.path.join(args.save, 'checkpoint.pth.tar'))
        best_checkpoint = torch.load(
            os.path.join(args.save, 'model_best.pth.tar'))
        start_epoch = checkpoint['epoch']
        best_acc_top1 = best_checkpoint['best_acc_top1']
        start_epoch = hvd.broadcast(torch.tensor(start_epoch),
                                    root_rank=0,
                                    name='start_epoch').item()
        best_acc_top1 = hvd.broadcast(torch.tensor(best_acc_top1),
                                      root_rank=0,
                                      name='best_acc_top1').item()

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary,
                    genotype)

    if args.parallel:
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    if hvd.rank() == 0:
        logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate * hvd.size(),
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # ***************** horovod *******************
    optimizer = hvd.DistributedOptimizer(
        optimizer, named_parameters=model.named_parameters())
    # ***************** horovod *******************

    traindir = os.path.join(args.data, 'train')
    validdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_data = dset.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            normalize,
        ]))
    valid_data = dset.ImageFolder(
        validdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_data, num_replicas=hvd.size(), rank=hvd.rank())
    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              pin_memory=True,
                                              num_workers=args.num_workers,
                                              sampler=train_sampler)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=args.num_workers)

    if start_epoch > 0 and hvd.rank() == 0:
        checkpoint = torch.load(os.path.join(args.save, 'checkpoint.pth.tar'))
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("checkpoint {}, model, optimizer was loaded".format(start_epoch))

    hvd.broadcast_parameters(model.state_dict(), root_rank=0)
    hvd.broadcast_optimizer_state(optimizer, root_rank=0)

    if not args.resume:
        set_lr(0, 0, len(train_queue), optimizer, args.scheduler)

    for epoch in range(start_epoch, args.epochs + args.warmup_epochs):
        if hvd.rank() == 0:
            lr = optimizer.param_groups[0]['lr']
            logging.info('epoch %d lr %e', epoch, lr)
            with open(os.path.join(args.save, 'learning_rate.txt'),
                      mode='a') as f:
                f.write(str(lr) + '\n')

        if args.parallel:
            model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
        else:
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        hvd.broadcast_parameters(model.state_dict(), root_rank=0)

        train_acc, train_obj = train(train_queue, train_sampler, model,
                                     criterion_smooth, optimizer, epoch)
        if hvd.rank() == 0:
            logging.info('train_acc %f', train_acc)
            with open(os.path.join(args.save, "train_acc.txt"), mode='a') as f:
                f.write(str(train_acc) + '\n')
            with open(os.path.join(args.save, "train_loss.txt"),
                      mode='a') as f:
                f.write(str(train_obj) + '\n')

        valid_acc_top1, valid_acc_top5, valid_obj = infer(
            valid_queue, model, criterion)
        if hvd.rank() == 0:
            logging.info('valid_acc_top1 %f', valid_acc_top1)
            logging.info('valid_acc_top5 %f', valid_acc_top5)
            with open(os.path.join(args.save, "test_acc_1.txt"),
                      mode='a') as f:
                f.write(str(valid_acc_top1) + '\n')
            with open(os.path.join(args.save, "test_acc_5.txt"),
                      mode='a') as f:
                f.write(str(valid_acc_top5) + '\n')
            with open(os.path.join(args.save, "test_loss.txt"), mode='a') as f:
                f.write(str(valid_obj) + '\n')

        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True

        if hvd.rank() == 0:
            utils.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_acc_top1': best_acc_top1,
                    'optimizer': optimizer.state_dict(),
                }, is_best, args.save)