Ejemplo n.º 1
0
    args = args.parse_args()
    # load model
    cc = Configs()
    print("Loading stored model")
    model = SegnetConvLSTM(cc.hidden_dims,
                           decoder_out_channels=2,
                           lstm_nlayers=len(cc.hidden_dims),
                           vgg_decoder_config=cc.decoder_config)
    model = model.to(device)
    tu.load_model_checkpoint(model,
                             args.model_path,
                             inference=False,
                             map_location=device)
    print("Model loaded")
    # create dataloader
    tu_test_dataset = TUSimpleDataset(config.ts_root, config.ts_subdirs,
                                      config.ts_flabels)
    tu_dataloader = DataLoader(tu_test_dataset, batch_size=2, shuffle=True)
    model.train()
    with torch.no_grad():

        for batchno, (frames, targets) in enumerate(tu_dataloader):
            frames = [f.to(device) for f in frames]

            output = model(frames)
            targets_ = targets.squeeze(1).long().to(device)

            print(
                "Loss:",
                nn.CrossEntropyLoss(
                    weight=torch.FloatTensor(cc.loss_weights).to(device))(
                        output, targets_))
Ejemplo n.º 2
0
    return (out == target).float().mean().item()

def f1_score(output, target, epsilon=1e-7):
    # turn output into 0-1 map
    probas = (output[:, 1, :, :] > 0.).float()

    TP = (probas * target).sum(dim=1)
    precision = TP / (probas.sum(dim=1) + epsilon)
    recall = TP / (target.sum(dim=1) + epsilon)
    f1 = 2 * precision * recall / (precision + recall + epsilon)
    f1 = f1.clamp(min=epsilon, max=1-epsilon)
    return f1.mean().item(), (precision.mean().item(), recall.mean().item())


cc = Configs()
print("Loading stored model")
model = SegnetConvLSTM(cc.hidden_dims, decoder_out_channels=2, lstm_nlayers=len(cc.hidden_dims),
                       vgg_decoder_config=cc.decoder_config)
tu.load_model_checkpoint(model, '../train-results/model-fixed.torch', inference=False, map_location=device)
model.to(device)
print("Model loaded")

tu_test_dataset = TUSimpleDataset(config.ts_root, config.ts_subdirs, config.ts_flabels, shuffle=False)#, shuffle_seed=9)

# build data loader
tu_test_dataloader = DataLoader(tu_test_dataset, batch_size=cc.test_batch, shuffle=True, num_workers=2)

# using crossentropy for weighted loss
criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor([0.02, 1.02])).to(device)

validate(tu_test_dataloader, model, criterion, log_every=1)
        decoded = self.decoder(output, unpool_indices, unpool_sizes)

        # return a probability map of the same size of each frame input to the model
        return decoded  # (NOTE: softmax is applied inside loss for efficiency)


# this won't work if not run in parent directory
if __name__ == '__main__':
    root = '/Users/nick/Desktop/train_set/clips/'
    subdirs = ['0601', '0531', '0313-1', '0313-2']
    flabels = [
        '/Users/nick/Desktop/train_set/label_data_0601.json',
        '/Users/nick/Desktop/train_set/label_data_0531.json',
        '/Users/nick/Desktop/train_set/label_data_0313.json'
    ]

    tu_dataset = TUSimpleDataset(root, subdirs, flabels, shuffle_seed=9)

    # build data loader
    tu_dataloader = DataLoader(tu_dataset,
                               batch_size=3,
                               shuffle=True,
                               num_workers=2)
    model = SegnetConvLSTM()
    for batch_no, (list_batched_samples,
                   batched_targets) in enumerate(tu_dataloader):
        with torch.no_grad():
            out = model(list_batched_samples)
            print(out.size())
        if batch_no == 1:
            break