Ejemplo n.º 1
0
def train(epoch, net_new, trainloader, optimizer, fine=False):
    print('\nEpoch: %d' % epoch)
    net_new.train()
    train_loss = 0
    train_loss1 = 0
    train_loss2 = 0
    correct = 0
    total = 0
    optimizer = adjust_optimizer(optimizer, epoch, regime)
    for batch_idx, (inputs, input_idx, targets) in enumerate(trainloader):
        if fine:
            targets_c = Variable(targets.cuda())
            for idx, target in enumerate(targets):
                #print(targets[idx], int(label_f[input_idx[idx]]))
                targets[idx] = int(label_f[input_idx[idx]])
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs, feats = net_new(inputs)
        #print(outputs.data.cpu().numpy())
        #loss = criterion(outputs, targets)
        loss1 = criterion(outputs, targets)
        loss2 = criterion((outputs[:, 0:20:2] + outputs[:, 1:20:2]) / 2.,
                          targets_c)
        loss = loss1 * 0.3 + loss2
        loss.backward()
        optimizer.step()

        train_loss += loss.data[0]
        train_loss1 += loss1.data[0]
        train_loss2 += loss2.data[0]
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()
        #print('targets: ', targets.data)
        #print('predicted: ', predicted)

        #progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
        #    % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
        if batch_idx % 20 == 0:
            logging.info('\n Epoch: [{0}][{1}/{2}]\t'
                         'Training Loss {train_loss:.3f} \t'
                         'Training Loss1 {train_loss1:.3f} \t'
                         'Training Loss2 {train_loss2:.3f} \t'
                         'Training Prec@1 {train_prec1:.3f} \t'.format(
                             epoch,
                             batch_idx,
                             len(trainloader),
                             train_loss=train_loss / (batch_idx + 1),
                             train_loss1=train_loss1 / (batch_idx + 1),
                             train_loss2=train_loss2 / (batch_idx + 1),
                             train_prec1=100. * correct / total))
Ejemplo n.º 2
0
def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    global optimizer
    optimizer = adjust_optimizer(optimizer, epoch, regime)
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs, _ = net(inputs)
        loss = criterion(outputs, inputs)
        loss.backward()
        optimizer.step()

        train_loss += loss.data[0]
        if batch_idx % 10 == 0:
            logging.info('\n Epoch: [{0}][{1}/{2}]\t'
                        'Training Loss {train_loss:.3f} \t'
                        .format(epoch, batch_idx, len(trainloader),
                        train_loss=train_loss/(batch_idx+1)))
Ejemplo n.º 3
0
def main():
    global args
    args = parser.parse_args()
    if args.save is '':
        args.save = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    setup_logging(os.path.join(save_path, 'log.txt'))
    checkpoint_file = os.path.join(save_path, 'checkpoint_epoch_%s.pth.tar')

    logging.debug("run arguments: %s", args)
    logging.info("using pretrained cnn %s", args.cnn)
    cnn = resnet.__dict__[args.cnn](pretrained=True)

    vocab = build_vocab()
    model = CaptionModel(cnn, vocab,
                         embedding_size=args.embedding_size,
                         rnn_size=args.rnn_size,
                         num_layers=args.num_layers,
                         share_embedding_weights=args.share_weights)

    train_data = get_iterator(get_coco_data(vocab, train=True),
                              batch_size=args.batch_size,
                              max_length=args.max_length,
                              shuffle=True,
                              num_workers=args.workers)
    val_data = get_iterator(get_coco_data(vocab, train=False),
                            batch_size=args.eval_batch_size,
                            max_length=args.max_length,
                            shuffle=False,
                            num_workers=args.workers)

    if 'cuda' in args.type:
        cudnn.benchmark = True
        model.cuda()

    optimizer = select_optimizer(
        args.optimizer, params=model.parameters(), lr=args.lr)
    regime = lambda e: {'lr': args.lr * (args.lr_decay ** e),
                        'momentum': args.momentum,
                        'weight_decay': args.weight_decay}
    model.finetune_cnn(False)

    def forward(model, data, training=True, optimizer=None):
        use_cuda = 'cuda' in args.type
        loss = nn.CrossEntropyLoss()
        perplexity = AverageMeter()
        batch_time = AverageMeter()
        data_time = AverageMeter()

        if training:
            model.train()
        else:
            model.eval()

        end = time.time()
        for i, (imgs, (captions, lengths)) in enumerate(data):
            data_time.update(time.time() - end)
            if use_cuda:
                imgs = imgs.cuda()
                captions = captions.cuda(async=True)
            imgs = Variable(imgs, volatile=not training)
            captions = Variable(captions, volatile=not training)
            input_captions = captions[:-1]
            target_captions = pack_padded_sequence(captions, lengths)[0]

            pred, _ = model(imgs, input_captions, lengths)
            err = loss(pred, target_captions)
            perplexity.update(math.exp(err.data[0]))

            if training:
                optimizer.zero_grad()
                err.backward()
                clip_grad_norm(model.rnn.parameters(), args.grad_clip)
                optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            if i % args.print_freq == 0:
                logging.info('{phase} - Epoch: [{0}][{1}/{2}]\t'
                             'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                             'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                             'Perplexity {perp.val:.4f} ({perp.avg:.4f})'.format(
                                 epoch, i, len(data),
                                 phase='TRAINING' if training else 'EVALUATING',
                                 batch_time=batch_time,
                                 data_time=data_time, perp=perplexity))

        return perplexity.avg

    for epoch in range(args.start_epoch, args.epochs):
        if epoch >= args.finetune_epoch:
            model.finetune_cnn(True)
        optimizer = adjust_optimizer(
            optimizer, epoch, regime)
        # Train
        train_perp = forward(
            model, train_data, training=True, optimizer=optimizer)
        # Evaluate
        val_perp = forward(model, val_data, training=False)

        logging.info('\n Epoch: {0}\t'
                     'Training Perplexity {train_perp:.4f} \t'
                     'Validation Perplexity {val_perp:.4f} \n'
                     .format(epoch + 1, train_perp=train_perp, val_perp=val_perp))
        model.save_checkpoint(checkpoint_file % (epoch + 1))
Ejemplo n.º 4
0
update_sram_depth(model, args.sram_depth, args.quant_bound, args.prob_table,
                  args.prob_table_9, args.noise_std, args.noise_9_std,
                  args.noise_9_mean)

if args.evaluate:
    if args.sram_depth > 0:
        simplify_BN_parameters(model)
    if args.save_first_layer_out:
        record_input()
    else:
        if args.runs > 1:
            test_acc = torch.zeros((args.runs, )).cuda()
            for i in range(args.runs):
                test_loss, test_acc[i] = test(0)
            print("Average test accuracy: %.2f (%.2f)" %
                  (test_acc.mean(), test_acc.std()))
        else:
            test_loss, test_acc = test(0)
    exit(0)

for epoch in range(start_epoch, args.epochs):
    optimizer = adjust_optimizer(optimizer, epoch, regime)
    if epoch == start_epoch:
        print(optimizer)
    train_loss, train_acc = train(epoch)
    test_loss, test_acc = test(epoch)
    print(
        'Epoch: %d/%d | LR: %.4f | Train Loss: %.3f | Train Acc: %.2f | Test Loss: %.3f | Test Acc: %.2f (%.2f)'
        % (epoch + 1, args.epochs, optimizer.param_groups[0]['lr'], train_loss,
           train_acc, test_loss, test_acc, best_acc))
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchSize', type=int, default=64)
    parser.add_argument('--Epochs', type=int, default=175)
    parser.add_argument('--no-cuda', action='store_true')
    parser.add_argument('--save')
    parser.add_argument('--seed', type=int, default=1)
    parser.add_argument('--opt',
                        type=str,
                        default='sgd',
                        choices=('sgd', 'adam', 'rmsprop'))
    args = parser.parse_args()

    #Check for cuda
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    args.save = args.save or 'postprocessing'
    setproctitle.setproctitle(args.save)

    #manual seed on CPU or GPU
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    #Path for saving the progress
    if os.path.exists(args.save):
        shutil.rmtree(args.save)
    os.makedirs(args.save, exist_ok=True)

    # mean and std of the Fashion-MNIST train dataset images
    normMean = [0.2860405969887955]
    normStd = [0.35302424451492237]
    normTransform = transforms.Normalize(normMean, normStd)

    # Transforms : Random crop, random horizontal flip

    trainTransform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normTransform
    ])

    # # Transforms : RandomRotation, RandomVerticalFlip
    # trainTransform = transforms.Compose([
    #     transforms.RandomRotation(90),
    #     transforms.RandomVerticalFlip(),
    #     transforms.ToTensor(),
    #     normTransform
    # ])
    testTransform = transforms.Compose([transforms.ToTensor(), normTransform])

    #Loading the datasets, if not found will be downloaded
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    loader_train = DataLoader(dataset.FashionMNIST(root='Fashion-MNIST',
                                                   train=True,
                                                   download=True,
                                                   transform=trainTransform),
                              batch_size=args.batchSize,
                              shuffle=True,
                              **kwargs)
    loader_test = DataLoader(dataset.FashionMNIST(root='Fashion-MNIST',
                                                  train=False,
                                                  download=True,
                                                  transform=testTransform),
                             batch_size=args.batchSize,
                             shuffle=False,
                             **kwargs)

    # Calling the  Densenet
    dense_net = densenet.DenseNet(growthRate=15,
                                  depth=100,
                                  reduction=0.5,
                                  bottleneck=True,
                                  nClasses=10)

    print('  + Number of params: {}'.format(
        sum([p.data.nelement() for p in dense_net.parameters()])))
    if args.cuda:
        dense_net = dense_net.cuda()
    else:
        print("no cuda")

    #Choosing the optimizer
    if args.opt == 'sgd':
        optimizer = optim.SGD(dense_net.parameters(),
                              lr=1e-1,
                              momentum=0.9,
                              weight_decay=1e-4)
    elif args.opt == 'adam':
        optimizer = optim.Adam(dense_net.parameters(), weight_decay=1e-4)
    elif args.opt == 'rmsprop':
        optimizer = optim.RMSprop(dense_net.parameters(), weight_decay=1e-4)

    #Progress being saved to csv files
    pfile_train = open(os.path.join(args.save, 'train.csv'), 'w')
    pfile_test = open(os.path.join(args.save, 'test.csv'), 'w')

    # running the training loop
    for epoch in range(1, args.Epochs + 1):
        adjust_optimizer(args.opt, optimizer, epoch)
        train(args, epoch, dense_net, loader_train, optimizer, pfile_train)
        test(args, epoch, dense_net, loader_test, pfile_test)
        torch.save(dense_net, os.path.join(args.save, 'latest.pth'))
        os.system('./plot.py {} &'.format(args.save))

    pfile_train.close()
    pfile_test.close()
    end = time.time()
    print(end - start)