Exemplo n.º 1
0
def val_step(model, gpu, dataloader):
    model.train(False)
    apm = APMeter()
    tot_loss = 0.0
    error = 0.0
    num_iter = 0.
    num_preds = 0

    full_probs = {}

    # Iterate over data.
    for data in dataloader:
        num_iter += 1
        other = data[3]

        outputs, loss, probs, err = run_network(model, data, gpu)
        apm.add(probs.data.cpu().numpy()[0], data[2].numpy()[0])

        error += err.data[0]
        tot_loss += loss.data[0]

        # post-process preds
        outputs = outputs.squeeze()
        probs = probs.squeeze()
        fps = outputs.size()[1] / other[1][0]
        full_probs[other[0][0]] = (probs.data.cpu().numpy().T, fps)

    epoch_loss = tot_loss / num_iter
    error = error / num_iter
    print('val-map:', apm.value().mean())
    apm.reset()
    print('val-{} Loss: {:.4f} Acc: {:.4f}'.format(dataloader.root, epoch_loss,
                                                   error))

    return full_probs, epoch_loss
def val_step(model, gpu, dataloader):
    model.train(False)
    apm = APMeter()
    tot_loss = 0.0
    error = 0.0
    num_iter = 0.
    num_preds = 0

    full_probs = {}

    # Iterate over data.
    for data in dataloader:
        num_iter += 1
        other = data[3]

        outputs, loss, probs, err = run_network(model, data, gpu)
        apm.add(probs.data.cpu().numpy()[0], data[2].numpy()[0])

        error += err.item()  #data[0]
        tot_loss += loss.item()  #data[0]

        # post-process preds
        outputs = outputs.squeeze()
        probs = probs.squeeze()
        fps = outputs.size()[1] / other[1][0]
        full_probs[other[0][0]] = (probs.data.cpu().numpy().T, fps)

    epoch_loss = tot_loss / num_iter
    error = error / num_iter
    #print ('val-map:', apm.value().mean())
    #apm.reset()
    val_map = apm.value().mean()
    print('val-{} Loss: {:.4f} MAP: {:.4f}'.format(
        dataloader.root.split('/')[-1], epoch_loss, val_map))  #error
    #print('mu_x %f, sigma_x %f, mu_t %.10f, sigma_t %f, rho_xy %f'%(model.module.super_event.mu_x[0].item(), model.module.super_event.sigma_x[0].item(),
    #    model.module.super_event.mu_t[0].item(), model.module.super_event.sigma_t[0].item(), model.module.super_event.rho_xy[0].item()))
    #print ('LR:%f'%lr)
    #print('conv1 %f, fc1 %f'%(model.module.super_event.conv1.weight[0,0,0,0,0].item(), model.module.super_event.fc1.weight[0,0].item()))
    #print('sup_mat %f, per_frame %f'%(model.module.sup_mat[0][0][0].item(), model.module.per_frame.weight[0][0][0][0][0].item()))
    apm.reset()
    return full_probs, epoch_loss, val_map
def train_step(model, gpu, optimizer, dataloader, reg_margin):
    model.train(True)
    tot_loss = 0.0
    error = 0.0
    num_iter = 0.

    # Iterate over data.
    tr_apm = APMeter()
    for data in Bar(dataloader):
        optimizer.zero_grad()
        num_iter += 1
        reg = max(0.4,
                  0.05 * np.exp(-1 * num_iter / 500.) + (reg_margin - 0.05))
        #if num_iter<200: continue

        outputs, loss, probs, err = run_network(model,
                                                data,
                                                gpu,
                                                reg_margin=reg)
        #del outputs
        #print(err, loss)

        error += err.item()  #data[0]
        tot_loss += loss.item()  #data[0]

        loss.backward()
        optimizer.step()
        #print(probs.shape, data[2].shape)
        tr_apm.add(
            probs.view(-1, probs.shape[-1]).detach().cpu().numpy(),
            data[2].view(-1, data[2].shape[-1]).cpu().numpy())
    epoch_loss = tot_loss / num_iter
    error = error / num_iter
    print('train-{} Loss: {:.4f} MAP: {:.4f}'.format(
        dataloader.root.split('/')[-1], epoch_loss,
        tr_apm.value().mean()))  #error
    tr_apm.reset()
Exemplo n.º 4
0
def train_model(model,dset_loaders, criterion, optimizer, batch_size,
                get_probs = None ,lr_scheduler=None, num_epochs=25,
                verbose = False, validation=True):
    since = time.time()

    best_model = model
    best_loss = 1e12
    trainlen = len(dset_loaders['train'])
    train_loss_history = []
    validation_loss_history = []
    best_ap = 0
    
    try:
        for epoch in range(num_epochs):
            print('Epoch {}/{}'.format(epoch, num_epochs))
            print('-' * 10)

            # Each epoch has a training and validation phase
            phases = ['train', 'val'] if validation else ['train']

            for phase in phases:
                ap_total = {p:APMeter() for p in phases}
                if phase == 'train':
                    if lr_scheduler is not None:
                        optimizer = lr_scheduler(optimizer, epoch)
                    model.train(True)
                else:
                    model.train(False)

                running_loss = 0.0
                running_corrects = 0

                # Iterate over data.
                for i, data in enumerate(dset_loaders[phase]):
                    # get the inputs
                    inputs, labels = data

                    # wrap them in Variable
                    if torch.cuda.is_available():
                        inputs, labels = Variable(inputs.cuda()), \
                            Variable(labels.cuda())
                    else:
                        inputs, labels = Variable(inputs), Variable(labels)
                    labels = labels.float()

                    # zero the parameter gradients
                    optimizer.zero_grad()

                    # forward
                    outputs = model(inputs)
                    #_, preds = torch.max(outputs.data, 1)
                    #print (labels.size())
                    #labels have size (batch, 1,1)?
                    #weights = 0.75 * labels.data + 0.25 * (1 - labels.data)
                    #weights = weights.view(1,1).float()
                    #crit = nn.BCELoss(weight=weights)
                    loss = criterion(outputs, labels)
                    if phase == 'train':
                        train_loss_history += [loss.data.cpu()]
                    else:
                        validation_loss_history += [loss.data.cpu()]

                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                    # statistics
                    if get_probs:
                        outputs = get_probs(outputs)
                    average_precision = metrics.average_precision_score(labels.data.cpu().numpy(), outputs.data.cpu().numpy())
                    ap_total[phase].add(outputs.data, labels.data)
                    running_loss += loss.data[0]
                    running_corrects += torch.sum((outputs.data > .5) == (labels.data > .5))
                    if phase == 'train' and verbose and i % 25 == 0:
                        print ("tr loss: {}".format(running_loss / (i + 1)))


                # Note: BCE loss already divides by batchsize
                epoch_loss = running_loss / (len(dset_loaders[phase]))
                epoch_acc = float(running_corrects) / (len(dset_loaders[phase]) * batch_size)
                ap = ap_total[phase].value()[0]

                print('{} Loss: {:.4f} Acc: {:.4f} AP {:.4f}'.format(phase, epoch_loss, epoch_acc, ap))


                # deep copy the model
                if phase == 'val' and epoch_loss < best_loss:
                    best_loss = epoch_loss
                    best_ap = ap
                    best_model = copy.deepcopy(model)

            #flat_weights = []
            #for param in model.parameters():
            #    flat_weights += [param.data.view(-1).cpu().numpy()]
            #flat_weights = np.concatenate(flat_weights)
            #plt.hist(flat_weights, 50)
            #plt.savefig('../models/weights_hist_{}'.format(epoch))

            time_elapsed = time.time() - since
            print('Time spent so far: {:.0f}m {:.0f}s'.format(
                time_elapsed // 60, time_elapsed % 60))
    except KeyboardInterrupt:
        pass

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val AP: {:4f}'.format(best_ap))
    print('Best val Loss: {:4f}'.format(best_loss))
    return best_model, train_loss_history, validation_loss_history
Exemplo n.º 5
0
def train_model(model, criterion, optimizer, num_epochs=50):
    since = time.time()
    val_res = {}
    best_acc = 0

    for epoch in range(num_epochs):
        print 'Epoch {}/{}'.format(epoch, num_epochs - 1)
        print '-' * 10

        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            apm = APMeter()
            if phase == 'train':
                model.train(True)
            else:
                model.train(False)  # Set model to evaluate mode

            tot_loss = 0.0
            error = 0.0
            num_iter = 0.

            tr = {}
            # Iterate over data.
            for data in dataloaders[phase]:
                num_iter += 1
                # get the inputs
                features, mask, labels, name = data

                # wrap them in Variable
                features = Variable(features.cuda())
                labels = Variable(labels.float().cuda())
                mask = Variable(mask.cuda())  #.unsqueeze(1)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward

                #un-comment for max-pooling
                #features = torch.max(features, dim=2)[0].unsqueeze(2)
                #outputs = model(features)

                # un-comment for pyramid
                #b,c,t,h,w = features.size()
                #features = [pool(features,0,t), pool(features,0,t/2),pool(features,t/2,t),pool(features,0,t/4),pool(features,t/4,t/2),pool(features,t/2,3*t/4),pool(features,3*t/4,t)]
                #features = torch.cat(features, dim=1)
                #outputs = model(features)

                # sub-event learning
                outputs = model([features, torch.sum(mask, dim=1)])

                outputs = outputs.squeeze()  # remove spatial dims
                if features.size(0) == 1:
                    outputs = outputs.unsqueeze(0)
                #outputs = outputs.permute(0,2,1)

                # action-prediction loss
                loss = criterion(outputs, labels)

                probs = torch.sigmoid(outputs)
                apm.add(probs.data.cpu().numpy(),
                        (labels > 0.5).float().data.cpu().numpy())

                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()
                # statistics
                tot_loss += loss.data[0]

            epoch_loss = tot_loss / num_iter
            if phase == 'val' and apm.value().mean() > best_acc:
                best_acc = apm.value().mean()
                val_res = tr

            print '{} Loss: {:.4f} mAP: {:.4f}'.format(phase, epoch_loss,
                                                       apm.value().mean())
Exemplo n.º 6
0
        run([(model,0,dataloaders,optimizer, lr_sched, args.model_file)], num_epochs=60)

    else:
        print 'Evaluating...'
        rgb_model = nn.DataParallel(torch.load(args.rgb_model_file)
        rgb_model.train(False)
        
        dataloaders, datasets = load_data('', test_split, flow_root)

        rgb_results = eval_model(rgb_model, dataloaders['val'])

        flow_model = nn.DataParallel(torch.load(args.flow_model_files)
        flow_model.train(False)
            
        dataloaders, datasets = load_data('', test_split, flow_root)
            
        flow_results = eval_model(flow_model, dataloaders['val'])

        apm = APMeter()


        for vid in rgb_results.keys():
            o,p,l,fps = rgb_results[vid]
         
            if vid in flow_results:
                o2,p2,l2,fps = flow_results[vid]
                o = (o[:o2.shape[0]]*.5+o2*.5)
                p = (p[:p2.shape[0]]*.5+p2*.5)
            apm.add(sigmoid(o), l)
        print 'MAP:', apm.value().mean()
Exemplo n.º 7
0
    else:
        print('Evaluating...')
        rgb_model = torch.load(args.rgb_model_file)
        rgb_model.cuda()
        dataloaders, datasets = load_data('', test_split, rgb_root)
        rgb_results = eval_model(rgb_model, dataloaders['val'], baseline=True)

        flow_model = torch.load(args.flow_model_file)
        flow_model.cuda()
        dataloaders, datasets = load_data('', test_split, flow_root)
        flow_results = eval_model(flow_model,
                                  dataloaders['val'],
                                  baseline=True)

        rapm = APMeter()
        fapm = APMeter()
        tapm = APMeter()

        for vid in rgb_results.keys():
            o, p, l, fps = rgb_results[vid]
            rapm.add(sigmoid(o), l)
            fapm.add(sigmoid(flow_results[vid][0]), l)
            if vid in flow_results:
                o2, p2, l2, fps = flow_results[vid]
                o = (o[:o2.shape[0]] * .5 + o2 * .5)
                p = (p[:p2.shape[0]] * .5 + p2 * .5)
            tapm.add(sigmoid(o), l)
        print('rgb MAP:', rapm.value().mean())
        print('flow MAP:', fapm.value().mean())
        print('two-stream MAP:', tapm.value().mean())
def run(init_lr=INIT_LR,
        warmup_steps=0,
        max_epochs=100,
        root=CHARADES_ROOT,
        train_split=CHARADES_ANNO,
        batch_size=BS * BS_UPSCALE,
        frames=80,
        save_dir=FINE_SAVE_DIR):

    crop_size = {'S': 160, 'M': 224, 'XL': 312}[X3D_VERSION]
    resize_size = {
        'S': [180., 225.],
        'M': [256., 256.],
        'XL': [360., 450.]
    }[X3D_VERSION]  #[256.,320.]
    gamma_tau = {'S': 6, 'M': 5 * 1, 'XL': 5}[X3D_VERSION]  # 5

    load_steps = st_steps = steps = 0
    epochs = 0
    num_steps_per_update = 1
    cur_iterations = steps * num_steps_per_update
    iterations_per_epoch = CHARADES_TR_SIZE // (batch_size * 1)
    val_iterations_per_epoch = CHARADES_VAL_SIZE // (batch_size)
    max_steps = iterations_per_epoch * max_epochs

    val_spatial_transforms = Compose([
        CenterCropScaled(crop_size),
        ToTensor(255),
        Normalize(CHARADES_MEAN, CHARADES_STD)
    ])

    # SET 'TESTING' FOR BOTH, TO EXTRACT
    dataset = Charades(train_split,
                       'testing',
                       root,
                       train_spatial_transforms,
                       task='loc',
                       frames=frames,
                       gamma_tau=gamma_tau,
                       crops=1)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=8,
                                             pin_memory=True,
                                             collate_fn=collate_fn)

    val_dataset = Charades(train_split,
                           'testing',
                           root,
                           val_spatial_transforms,
                           task='loc',
                           frames=frames,
                           gamma_tau=gamma_tau,
                           crops=1)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 num_workers=8,
                                                 pin_memory=True,
                                                 collate_fn=collate_fn)

    dataloaders = {'train': dataloader, 'val': val_dataloader}
    datasets = {'train': dataset, 'val': val_dataset}
    print('train', len(datasets['train']), 'val', len(datasets['val']))
    print('Total iterations:', max_steps, 'Total epochs:', max_epochs)
    print('datasets created')

    fine_net = x3d_fine.generate_model(x3d_version=X3D_VERSION,
                                       n_classes=400,
                                       n_input_channels=3,
                                       task='loc',
                                       dropout=0.5,
                                       base_bn_splits=1,
                                       global_tower=True)

    fine_net.replace_logits(157)

    load_ckpt = torch.load('models/fine_charades_039000_SAVE.pt')
    state = fine_net.state_dict()
    state.update(load_ckpt['model_state_dict'])
    fine_net.load_state_dict(state)

    fine_net.cuda()
    fine_net = nn.DataParallel(fine_net)
    print('model loaded')

    lr = init_lr
    print('LR:%f' % lr)

    optimizer = optim.SGD(fine_net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=1e-5)
    lr_sched = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                    mode='min',
                                                    patience=3,
                                                    factor=0.1,
                                                    verbose=True)
    if steps > 0:
        optimizer.load_state_dict(load_ckpt['optimizer_state_dict'])
        lr_sched.load_state_dict(load_ckpt['scheduler_state_dict'])

    criterion = nn.BCEWithLogitsLoss()

    val_apm = APMeter()
    tr_apm = APMeter()

    while epochs < max_epochs:
        print('Step {} Epoch {}'.format(steps, epochs))
        print('-' * 10)

        # Each epoch has a training and validation phase
        for phase in ['train'] + ['val']:
            bar_st = iterations_per_epoch if phase == 'train' else val_iterations_per_epoch
            bar = pkbar.Pbar(name='update: ', target=bar_st)

            fine_net.train(False)  # Set model to evaluate mode
            # FOR EVAL AGGREGATE BN STATS
            _ = fine_net.module.aggregate_sub_bn_stats()
            torch.autograd.set_grad_enabled(False)

            tot_loss = 0.0
            tot_loc_loss = 0.0
            tot_cls_loss = 0.0
            tot_dis_loss = 0.0
            tot_acc = 0.0
            tot_corr = 0.0
            tot_dat = 0.0
            num_iter = 0
            optimizer.zero_grad()

            # Iterate over data.
            print(phase)
            for i, data in enumerate(dataloaders[phase]):
                #for data in dataloaders[phase]:
                num_iter += 1
                bar.update(i)

                inputs, labels, masks, meta, name = data
                b, n, c, t, h, w = inputs.shape
                inputs = inputs.view(b * n, c, t, h, w)

                inputs = inputs.cuda()  # B 3 T W H
                tl = labels.size(2)
                labels = labels.cuda()  # B C TL
                masks = masks.cuda()  # B TL
                valid_t = torch.sum(masks, dim=1).int()

                feat, _ = fine_net([inputs, masks])  # N C T 1 1
                keys = list(feat.keys())
                print(i, name[0], feat[keys[0]].cpu().numpy().shape,
                      feat[keys[1]].cpu().numpy().shape,
                      feat[keys[2]].cpu().numpy().shape,
                      feat[keys[3]].cpu().numpy().shape,
                      feat[keys[4]].cpu().numpy().shape)
                for k in feat:
                    torch.save(feat[k].data.cpu(),
                               os.path.join(save_dir, k, name[0]))
        break