Exemplo n.º 1
0
def test_one_image(args, dt_config, dataset_class):
    input_size = (475, 475)
    model_path = args.snapshot
    dataset_instance = dataset_class(data_path=dt_config.DATA_PATH)
    num_classes = dataset_instance.num_classes
    model = PSPNet(num_classes=num_classes)
    model.load_state_dict(torch.load(model_path)["state_dict"])
    model.eval()

    img = cv2.imread(args.image_path)
    processed_img = cv2.resize(img, input_size)
    overlay = np.copy(processed_img)
    processed_img = processed_img / 255.0
    processed_img = torch.tensor(
        processed_img.transpose(2, 0, 1)[np.newaxis, :]).float()
    if torch.cuda.is_available():
        model = model.cuda()
        processed_img = processed_img.cuda()
    output = model(processed_img)[0]
    mask = output.data.max(1)[1].cpu().numpy().reshape(475, 475)
    color_mask = np.array(dataset_instance.colors)[mask]
    alpha = args.alpha
    overlay = (((1 - alpha) * overlay) + (alpha * color_mask)).astype("uint8")
    overlay = cv2.resize(overlay, (img.shape[1], img.shape[0]))
    cv2.imwrite("result.jpg", overlay)
Exemplo n.º 2
0
def main():
    net = PSPNet(num_classes=num_classes)

    if len(args['snapshot']) == 0:
        # net.load_state_dict(torch.load(os.path.join(ckpt_path, 'cityscapes (coarse)-psp_net', 'xx.pth')))
        curr_epoch = 1
        args['best_record'] = {'epoch': 0, 'iter': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0,
                               'fwavacc': 0}
    else:
        print('training resumes from ' + args['snapshot'])
        net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
        split_snapshot = args['snapshot'].split('_')
        curr_epoch = int(split_snapshot[1]) + 1
        args['best_record'] = {'epoch': int(split_snapshot[1]), 'iter': int(split_snapshot[3]),
                               'val_loss': float(split_snapshot[5]), 'acc': float(split_snapshot[7]),
                               'acc_cls': float(split_snapshot[9]),'mean_iu': float(split_snapshot[11]),
                               'fwavacc': float(split_snapshot[13])}
    net.cuda().train()

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    train_joint_transform = joint_transforms.Compose([
        joint_transforms.Scale(args['longer_size']),
        joint_transforms.RandomRotate(10),
        joint_transforms.RandomHorizontallyFlip()
    ])
    sliding_crop = joint_transforms.SlidingCrop(args['crop_size'], args['stride_rate'], ignore_label)
    train_input_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    val_input_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    target_transform = extended_transforms.MaskToTensor()
    visualize = standard_transforms.Compose([
        standard_transforms.Scale(args['val_img_display_size']),
        standard_transforms.ToTensor()
    ])

    train_set = Retinaimages('training', joint_transform=train_joint_transform, sliding_crop=sliding_crop,
                                      transform=train_input_transform, target_transform=target_transform)
    train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=2, shuffle=True)
    val_set = Retinaimages('validate', transform=val_input_transform, sliding_crop=sliding_crop,
                                    target_transform=target_transform)
    val_loader = DataLoader(val_set, batch_size=1, num_workers=2, shuffle=False)

    criterion = CrossEntropyLoss2d(size_average=True).cuda()

    optimizer = optim.SGD([
        {'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
         'lr': 2 * args['lr']},
        {'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
         'lr': args['lr'], 'weight_decay': args['weight_decay']}
    ], momentum=args['momentum'], nesterov=True)

    if len(args['snapshot']) > 0:
        optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, 'opt_' + args['snapshot'])))
        optimizer.param_groups[0]['lr'] = 2 * args['lr']
        optimizer.param_groups[1]['lr'] = args['lr']

    check_mkdir(ckpt_path)
    check_mkdir(os.path.join(ckpt_path, exp_name))
    open(os.path.join(ckpt_path, exp_name, "_1" + '.txt'), 'w').write(str(args) + '\n\n')

    train(train_loader, net, criterion, optimizer, curr_epoch, args, val_loader, visualize, val_set)
Exemplo n.º 3
0
#     num_classes=2,
#     pretrained_backbone=None,
# )

#------------------------------------------------------------------------------
#   Summary network
#------------------------------------------------------------------------------
model.train()
model.summary(input_shape=(3, args.input_sz, args.input_sz), device='cpu')

#------------------------------------------------------------------------------
#   Measure time
#------------------------------------------------------------------------------
input = torch.randn([1, 3, args.input_sz, args.input_sz], dtype=torch.float)
if args.use_cuda:
    model.cuda()
    input = input.cuda()

for _ in range(10):
    model(input)

start_time = time()
for _ in range(args.n_measures):
    model(input)
finish_time = time()

if args.use_cuda:
    print("Inference time on cuda: %.2f [ms]" %
          ((finish_time - start_time) * 1000 / args.n_measures))
    print("Inference fps on cuda: %.2f [fps]" %
          (1 / ((finish_time - start_time) / args.n_measures)))