Exemple #1
0
def val_step(model, gpu, dataloader):
    model.train(False)
    apm = APMeter()
    tot_loss = 0.0
    error = 0.0
    num_iter = 0.
    num_preds = 0

    full_probs = {}

    # Iterate over data.
    for data in dataloader:
        num_iter += 1
        other = data[3]

        outputs, loss, probs, err = run_network(model, data, gpu)
        apm.add(probs.data.cpu().numpy()[0], data[2].numpy()[0])

        error += err.data[0]
        tot_loss += loss.data[0]

        # post-process preds
        outputs = outputs.squeeze()
        probs = probs.squeeze()
        fps = outputs.size()[1] / other[1][0]
        full_probs[other[0][0]] = (probs.data.cpu().numpy().T, fps)

    epoch_loss = tot_loss / num_iter
    error = error / num_iter
    print('val-map:', apm.value().mean())
    apm.reset()
    print('val-{} Loss: {:.4f} Acc: {:.4f}'.format(dataloader.root, epoch_loss,
                                                   error))

    return full_probs, epoch_loss
def val_step(model, gpu, dataloader):
    model.train(False)
    apm = APMeter()
    tot_loss = 0.0
    error = 0.0
    num_iter = 0.
    num_preds = 0

    full_probs = {}

    # Iterate over data.
    for data in dataloader:
        num_iter += 1
        other = data[3]

        outputs, loss, probs, err = run_network(model, data, gpu)
        apm.add(probs.data.cpu().numpy()[0], data[2].numpy()[0])

        error += err.item()  #data[0]
        tot_loss += loss.item()  #data[0]

        # post-process preds
        outputs = outputs.squeeze()
        probs = probs.squeeze()
        fps = outputs.size()[1] / other[1][0]
        full_probs[other[0][0]] = (probs.data.cpu().numpy().T, fps)

    epoch_loss = tot_loss / num_iter
    error = error / num_iter
    #print ('val-map:', apm.value().mean())
    #apm.reset()
    val_map = apm.value().mean()
    print('val-{} Loss: {:.4f} MAP: {:.4f}'.format(
        dataloader.root.split('/')[-1], epoch_loss, val_map))  #error
    #print('mu_x %f, sigma_x %f, mu_t %.10f, sigma_t %f, rho_xy %f'%(model.module.super_event.mu_x[0].item(), model.module.super_event.sigma_x[0].item(),
    #    model.module.super_event.mu_t[0].item(), model.module.super_event.sigma_t[0].item(), model.module.super_event.rho_xy[0].item()))
    #print ('LR:%f'%lr)
    #print('conv1 %f, fc1 %f'%(model.module.super_event.conv1.weight[0,0,0,0,0].item(), model.module.super_event.fc1.weight[0,0].item()))
    #print('sup_mat %f, per_frame %f'%(model.module.sup_mat[0][0][0].item(), model.module.per_frame.weight[0][0][0][0][0].item()))
    apm.reset()
    return full_probs, epoch_loss, val_map
def train_step(model, gpu, optimizer, dataloader, reg_margin):
    model.train(True)
    tot_loss = 0.0
    error = 0.0
    num_iter = 0.

    # Iterate over data.
    tr_apm = APMeter()
    for data in Bar(dataloader):
        optimizer.zero_grad()
        num_iter += 1
        reg = max(0.4,
                  0.05 * np.exp(-1 * num_iter / 500.) + (reg_margin - 0.05))
        #if num_iter<200: continue

        outputs, loss, probs, err = run_network(model,
                                                data,
                                                gpu,
                                                reg_margin=reg)
        #del outputs
        #print(err, loss)

        error += err.item()  #data[0]
        tot_loss += loss.item()  #data[0]

        loss.backward()
        optimizer.step()
        #print(probs.shape, data[2].shape)
        tr_apm.add(
            probs.view(-1, probs.shape[-1]).detach().cpu().numpy(),
            data[2].view(-1, data[2].shape[-1]).cpu().numpy())
    epoch_loss = tot_loss / num_iter
    error = error / num_iter
    print('train-{} Loss: {:.4f} MAP: {:.4f}'.format(
        dataloader.root.split('/')[-1], epoch_loss,
        tr_apm.value().mean()))  #error
    tr_apm.reset()