Пример #1
0
def test(model, standout, epoch):
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        if torch.cuda.is_available():
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += F.nll_loss(
            output, target, size_average=False).data[0]  # sum up batch loss
        pred = output.data.max(
            1, keepdim=True)[1]  # get the index of the max log-probability
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    test_acc = 100. * correct / len(test_loader.dataset)
    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.5f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset), test_acc))
    if standout == True:
        drop_way = "Standout"
    else:
        drop_way = "Dropout"
    saveLog(test_loss, test_acc, correct, drop_way, args, epoch)
Пример #2
0
def train(model, loss_func, dictionary, epoch, train_data, dev_data,
          identity_mat, stop_counter):
    global best_dev_loss, best_acc
    model.train()
    total_loss = 0
    for texts, labels, masks, bsz in utils.getBatch(data=train_data,
                                                    dictionary=dictionary,
                                                    maxlen=MAX_LEN,
                                                    batch_size=BATCH_SIZE):
        init_state = model.init_hidden(bsz)
        fc, outh, pred, attention = model.forward(sents=texts,
                                                  mask=masks,
                                                  init_hc=init_state)

        loss = loss_func(pred.view(texts.size(0), -1), labels)
        if USE_ATTENTION:
            attentionT = torch.transpose(attention, 1, 2).contiguous()
            extra_loss = Frobenius(
                torch.bmm(attention, attentionT) -
                identity_mat[:attention.size(0)])
            loss += PENALIZATION_COEFF * extra_loss

        optimizer.zero_grad()
        loss.backward()

        nn.utils.clip_grad_norm_(model.parameters(), CLIP)
        optimizer.step()

        total_loss += loss.data

    res, dev_loss, acc = evaluate(model, loss_func, dictionary, dev_data)
    print(res)
    utils.saveLog(LOG_PATH, res)

    total_res = 'epoch: %d, dev loss: %f, acc: %f' % (epoch + 1, dev_loss, acc)
    print(total_res)
    utils.saveLog(LOG_PATH, total_res)
    utils.div('-')

    if not best_dev_loss or dev_loss < best_dev_loss:
        with open(MODEL_PATH % (dev_loss, acc), 'wb') as f:
            torch.save(model, f)
        best_dev_loss = dev_loss
        stop_counter = 0
    else:
        for param_group in optimizer.param_groups:
            param_group['lr'] = param_group['lr'] * 0.2
        if EARLY_STOP != 0:
            stop_counter += 1

    return stop_counter
Пример #3
0
def pretrain(args, dataloaders, model, criterion, optimizer):

    print('\n Pretrain...\n')

    # Initialize Variables
    dataloader_HAND3D_train = dataloaders['HAND3D']['train']
    dataloader_HAND3D_valid = dataloaders['HAND3D']['valid']
    # dataloader_STEREO_valid = dataloaders['STEREO']['valid']
    loss_valid_best = 1000.0
    loss_valid_delta = 0.0

    # Pretrain the model
    for epoch in range(args.max_epochs_pretrain):
        # Initialize learning rate
        learning_rate = adjustLR(optimizer, epoch, args.lr_base_pretrain, policy=args.lr_policy_pretrain, policy_parameter=args.lr_policy_param_pretrain)
        # Intialize variables
        metrics = {'loss': [], 'loss_list': {'loss_2d': [], 'loss_3d': [], 'loss_mask': [], 'loss_reg': [], 'loss_camera': [], 'avg_distance_2d': [list() for _ in range(args.n_kps)], 'avg_distance_3d': [list() for _ in range(args.n_kps)]}}
        for i, (data) in enumerate(dataloader_HAND3D_train):
            # Set CUDA
            image, mask, targets, index = setCUDA(args, data)
            # Initialize optimizer
            optimizer.zero_grad()
            # Get camera_parameters
            predictions = model(image, right=True)
            # Get loss
            loss, loss_list = criterion(epoch, mask, predictions, targets)
            # Optimize the model
            loss.backward()
            optimizer.step()

            # Keep track of metrics
            metrics['loss'].append(loss.item())
            metrics['loss_list'] = convertLossList(metrics['loss_list'], loss_list)

            # Print log
            if (i+1) % 50 == 0:
                saveLog(args, epoch, args.max_epochs_pretrain, i, dataloader_HAND3D_train, learning_rate, loss, metrics, mode='Pretr')

        # Validation
        loss_HAND3D_valid = valid(args, epoch, args.max_epochs_pretrain, learning_rate, dataloader_HAND3D_valid, model, criterion, mode='Pretr', display_2D=True, display_3D=False)
        # loss_STEREO_valid = valid(args, epoch, args.max_epochs_pretrain, learning_rate, dataloader_STEREO_valid, model, criterion, mode='Pretr', display_2D=True, display_3D=False)

        # Save the model checkpoints
        if (epoch+1) % args.interval_checkpoint:
            saveCheckpoint(args, model, optimizer, pretrain=True)

        # Save the best model
        if loss_HAND3D_valid < (loss_valid_best - loss_valid_delta):
            loss_valid_best = loss_HAND3D_valid
            saveCheckpointBestModel(args, model, optimizer, pretrain=True)
Пример #4
0
def valid(args, epoch, max_epochs, learning_rate, dataloader, model, criterion, mode='Pretr', display_2D=False, display_3D=False):

    # Set the model in evaluation mode
    model.eval()

    # Intialize variables
    flag_right = True
    dataset_name = type(dataloader.dataset).__name__
    if dataset_name == 'EgoDexter_seq' or dataset_name == 'DexterObject_seq':
        n_kps = 5
    else:
        n_kps = args.n_kps
    metrics = {'loss': [], 'loss_list': {'loss_2d': [], 'loss_3d': [], 'loss_mask': [], 'loss_reg': [], 'loss_camera': [], 'avg_distance_2d': [list() for _ in range(n_kps)], 'avg_distance_3d': [list() for _ in range(n_kps)]}}
    with torch.no_grad():
        for i, (data) in enumerate(dataloader):
            # Set CUDA
            image, mask, targets, index = setCUDA(args, data)
            # Get camera_parameters
            predictions = model(image)
            # Get loss
            loss, loss_list = criterion(epoch, mask, predictions, targets, train=False)

            # Keep track of metrics
            metrics['loss'].append(loss.item())
            metrics['loss_list'] = convertLossList(metrics['loss_list'], loss_list)
            # if display_2D:
            #     displayImage(args, epoch, i, image, predictions, targets, '')
            # Print log
            if (i + 1) == len(dataloader):
                saveLog(args, epoch, max_epochs, i, dataloader, learning_rate, loss, metrics, mode='Valid')
            #     str = mode
            #     if display_2D:
            #         displayImage(args, epoch, i, image, predictions, targets, '')
            #         displayMask(args, epoch, i, mask, predictions, '')
                # if display_3D and n_kps == 21:
                #     displayHand(args, epoch, i, predictions, targets, '')
    # Set the model in training mode
    model.train()
    ll = loss.item() - mean(metrics['loss_list']['loss_reg'])
    # print(loss.item() - metrics['loss_list']['loss_reg'])
    return ll
Пример #5
0
# 损失函数 交叉熵
loss_func = nn.CrossEntropyLoss()

# 优化算法
optimizer = optim.Adam(model.parameters(),
                       lr=LR,
                       betas=[0.9, 0.99],
                       eps=1e-8,
                       weight_decay=0)

print('Begin to load data...')
data_train, data_dev, data_val = utils.divideData(DATA_PATH, N_TRAIN, N_DEV,
                                                  N_VAL)

print('number of train data: %d' % len(data_train))
print('number of develop data: %d' % len(data_dev))
print('number of valid dataL %d' % len(data_val))

counter = 0
for epoch in range(EPOCH):
    counter = train(model, loss_func, dictionary, epoch, data_train, data_dev,
                    I, counter)
    if counter == EARLY_STOP:
        break

test_loss, acc = evaluate(model, loss_func, dictionary, data_val)
res = 'testing model, dev loss: %f, acc: %f' % (test_loss, acc)
utils.saveLog(LOG_PATH, res)
print(res)
Пример #6
0
loss_func = nn.CrossEntropyLoss()

# 优化算法
optimizer = optim.Adam(model.parameters(),
                       lr=LR,
                       betas=[0.9, 0.99],
                       eps=1e-8,
                       weight_decay=0)

print('Begin to load data...')
data_train, data_dev, data_val = utils.divideData(DATA_PATH, N_TRAIN, N_DEV,
                                                  N_VAL)

print('number of train data: %d' % len(data_train))
print('number of develop data: %d' % len(data_dev))
print('number of valid dataL %d' % len(data_val))

counter = 0
for epoch in range(EPOCH):
    counter = train(model, loss_func, dictionary, epoch, data_train, data_dev,
                    I, counter)
    if counter == EARLY_STOP:
        break

res, test_loss, acc = evaluate(model, loss_func, dictionary, data_val)
utils.saveLog(LOG_PATH, res)
print(res)

total_res = 'testing model, dev loss: %f, acc: %f' % (test_loss, acc)
utils.saveLog(LOG_PATH, total_res)
print(total_res)