Example #1
0
    return (out == target).float().mean().item()

def f1_score(output, target, epsilon=1e-7):
    # turn output into 0-1 map
    probas = (output[:, 1, :, :] > 0.).float()

    TP = (probas * target).sum(dim=1)
    precision = TP / (probas.sum(dim=1) + epsilon)
    recall = TP / (target.sum(dim=1) + epsilon)
    f1 = 2 * precision * recall / (precision + recall + epsilon)
    f1 = f1.clamp(min=epsilon, max=1-epsilon)
    return f1.mean().item(), (precision.mean().item(), recall.mean().item())


cc = Configs()
print("Loading stored model")
model = SegnetConvLSTM(cc.hidden_dims, decoder_out_channels=2, lstm_nlayers=len(cc.hidden_dims),
                       vgg_decoder_config=cc.decoder_config)
tu.load_model_checkpoint(model, '../train-results/model-fixed.torch', inference=False, map_location=device)
model.to(device)
print("Model loaded")

tu_test_dataset = TUSimpleDataset(config.ts_root, config.ts_subdirs, config.ts_flabels, shuffle=False)#, shuffle_seed=9)

# build data loader
tu_test_dataloader = DataLoader(tu_test_dataset, batch_size=cc.test_batch, shuffle=True, num_workers=2)

# using crossentropy for weighted loss
criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor([0.02, 1.02])).to(device)

validate(tu_test_dataloader, model, criterion, log_every=1)
Example #2
0
    args.add_argument("-m",
                      '--model-path',
                      required=True,
                      type=str,
                      help='Pre-trained model filepath')
    args = args.parse_args()
    # load model
    cc = Configs()
    print("Loading stored model")
    model = SegnetConvLSTM(cc.hidden_dims,
                           decoder_out_channels=2,
                           lstm_nlayers=len(cc.hidden_dims),
                           vgg_decoder_config=cc.decoder_config)
    model = model.to(device)
    tu.load_model_checkpoint(model,
                             args.model_path,
                             inference=False,
                             map_location=device)
    print("Model loaded")
    # create dataloader
    tu_test_dataset = TUSimpleDataset(config.ts_root, config.ts_subdirs,
                                      config.ts_flabels)
    tu_dataloader = DataLoader(tu_test_dataset, batch_size=2, shuffle=True)
    model.train()
    with torch.no_grad():

        for batchno, (frames, targets) in enumerate(tu_dataloader):
            frames = [f.to(device) for f in frames]

            output = model(frames)
            targets_ = targets.squeeze(1).long().to(device)
    recall = TP / (target.sum(dim=1) + epsilon)
    f1 = 2 * precision * recall / (precision + recall + epsilon)
    f1 = f1.clamp(min=epsilon, max=1 - epsilon)
    return f1.mean().item(), (precision.mean().item(), recall.mean().item())


if __name__ == '__main__':

    cc = Configs()
    print("Loading stored model")
    model = SegnetConvLSTM(cc.hidden_dims,
                           decoder_out_channels=2,
                           lstm_nlayers=len(cc.hidden_dims),
                           vgg_decoder_config=cc.decoder_config)
    tu.load_model_checkpoint(model,
                             'C:/Users/arind/Downloads/model.torch',
                             inference=False,
                             map_location=device)
    model.to(device)
    print("Model loaded")

    tu_test_dataset = TUSimpleDataset(config.ts_root,
                                      config.ts_subdirs,
                                      config.ts_flabels,
                                      shuffle=False)  #, shuffle_seed=9)

    # build data loader
    tu_test_dataloader = DataLoader(tu_test_dataset,
                                    batch_size=cc.test_batch,
                                    shuffle=True,
                                    num_workers=1)
Example #4
0
import numpy as np
""" 
    Asses model output quality visually, by plotting
    inputs-target-prediction.
"""
if __name__ == '__main__':
    # load model
    cc = Configs()
    print("Loading stored model")
    model = SegnetConvLSTM(cc.hidden_dims,
                           decoder_out_channels=2,
                           lstm_nlayers=len(cc.hidden_dims),
                           vgg_decoder_config=cc.decoder_config)
    tu.load_model_checkpoint(
        model,
        '/Volumes/Samsung128/projects/ispr-project/train-results/model-fixed.torch',
        inference=False,
        map_location=device)
    print("Model loaded")
    # create dataloader
    tu_test_dataset = TUSimpleDataset(config.ts_root, config.ts_subdirs,
                                      config.ts_flabels)
    tu_dataloader = DataLoader(tu_test_dataset, batch_size=2, shuffle=True)
    model.train()
    with torch.no_grad():

        for batchno, (frames, targets) in enumerate(tu_dataloader):
            output = model(frames)
            targets_ = targets.squeeze(1).long()

            print(
Example #5
0
    tu_train_dataloader = DataLoader(tu_tr_dataset,
                                     batch_size=batch_size,
                                     shuffle=True,
                                     num_workers=workers)
    # tu_test_dataloader = DataLoader(tu_test_dataset, batch_size=cc.test_batch, shuffle=False, num_workers=4)

    # **MODEL**
    # output size must have dimension (B, C..), where C = number of classes
    model = SegnetConvLSTM(hidden_dims,
                           decoder_out_channels=2,
                           lstm_nlayers=len(hidden_dims),
                           vgg_decoder_config=decoder_config)
    if cc.load_model:
        trainu.load_model_checkpoint(
            model,
            '/content/drive/My Drive/model-fixed.torch',
            inference=False,
            map_location=device)

    model.to(device)

    # define loss function (criterion) and optimizer
    # using crossentropy for weighted loss on background and lane classes
    criterion = nn.CrossEntropyLoss(
        weight=torch.FloatTensor([0.02, 1.02])).to(device)
    # criterion = nn.BCEWithLogitsLoss(pos_weight=torch.FloatTensor([17.])).to(device)

    # optimizer = torch.optim.SGD(model.parameters(), lr, momentum=momentum, weight_decay=weight_decay)
    optimizer = torch.optim.Adam(model.parameters(),
                                 init_lr)  #, weight_decay=weight_decay)
import matplotlib.pyplot as plt
import numpy as np
""" 
    Asses model output quality visually, by plotting
    inputs-target-prediction.
"""
if __name__ == '__main__':
    # load model
    cc = Configs()
    print("Loading stored model")
    model = SegnetConvLSTM(cc.hidden_dims,
                           decoder_out_channels=2,
                           lstm_nlayers=len(cc.hidden_dims),
                           vgg_decoder_config=cc.decoder_config)
    tu.load_model_checkpoint(model,
                             'model.torch',
                             inference=False,
                             map_location=device)
    print("Model loaded")
    # create dataloader
    tu_test_dataset = TUSimpleDataset(config.ts_root, config.ts_subdirs,
                                      config.ts_flabels)
    tu_dataloader = DataLoader(tu_test_dataset, batch_size=2, shuffle=True)
    model.train()
    with torch.no_grad():

        for batchno, (frames, targets) in enumerate(tu_dataloader):
            output = model(frames)
            targets_ = targets.squeeze(1).long()

            print(
                "Loss:",