Exemplo n.º 1
0
# Create data loaders (return data in batches)
trainset_loader, valset_loader = \
    data.get_train_val_loaders(train_dir=args.train_dir,
                               max_trainset_size=args.max_trainset_size,
                               collate_fn=csv_collator,
                               height=args.height,
                               width=args.width,
                               seed=args.seed,
                               batch_size=args.batch_size,
                               drop_last_batch=args.drop_last_batch,
                               num_workers=args.nThreads,
                               val_dir=args.val_dir,
                               max_valset_size=args.max_valset_size)

# Model
with peter('Building network'):
    model = unet_model.UNet(3,
                            1,
                            height=args.height,
                            width=args.width,
                            known_n_points=args.n_points,
                            device=device)
    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f" with {ballpark(num_params)} trainable parameters. ", end='')
model = nn.DataParallel(model)
model.to(device)

# Loss functions
loss_regress = nn.SmoothL1Loss()
loss_loc = losses.WeightedHausdorffDistance(resized_height=args.height,
                                            resized_width=args.width,
Exemplo n.º 2
0
testset_loader = data.DataLoader(testset,
                                 batch_size=1,
                                 num_workers=args.nThreads,
                                 collate_fn=csv_collator)

# Array with [height, width] of the new size
resized_size = np.array([args.height, args.width])

# Loss function
criterion_training = losses.WeightedHausdorffDistance(resized_height=args.height,
                                                      resized_width=args.width,
                                                      return_2_terms=True,
                                                      device=device)

# Restore saved checkpoint (model weights)
with peter("Loading checkpoint"):

    # Pretrained models that come with this package
    if args.model == 'unet_256x256_sorghum':
        args.model = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                  'checkpoints',
                                  'unet_256x256_sorghum.ckpt')
    if os.path.isfile(args.model):
        if args.cuda:
            checkpoint = torch.load(args.model)
        else:
            checkpoint = torch.load(
                args.model, map_location=lambda storage, loc: storage)
        # Model
        if args.n_points is None:
            if 'n_points' not in checkpoint:
    training_transforms += [RandomVerticalFlipImageAndLabel(p=0.5)]
training_transforms += [ScaleImageAndLabel(size=(args.height, args.width))]
training_transforms += [transforms.ToTensor()]
training_transforms += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
trainset = CSVDataset(args.train_dir,
                      transforms=transforms.Compose(training_transforms),
                      max_dataset_size=args.max_trainset_size)
trainset_loader = DataLoader(trainset,
                             batch_size=args.batch_size,
                             drop_last=args.drop_last_batch,
                             shuffle=True,
                             num_workers=args.nThreads,
                             collate_fn=csv_collator)

# Model
with peter('Building network'):
    model = unet_model.UNet(3,
                            1,
                            height=args.height,
                            width=args.width,
                            known_n_points=args.n_points)
    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f" with {ballpark(num_params)} trainable parameters. ", end='')
model = nn.DataParallel(model)
model.to(device)

# Loss function
loss_regress = nn.SmoothL1Loss()
loss_loc = losses.WeightedHausdorffDistance(resized_height=args.height,
                                            resized_width=args.width,
                                            p=args.p,
Exemplo n.º 4
0
    config = utils.config_parser(config_path, experiment_type="training")

    torch.set_default_dtype(torch.float32)
    device_cpu = torch.device('cpu')
    device = torch.device('cuda:0') if config['use_cuda'] else device_cpu

    # data_dictionary,batch_size,num_workers,instance_seg = False):
    test_loader = cranberry_dataset.build_single_loader(
        data_dictionary=config['data']['eval_dir'],
        batch_size=config['testing']['batch_size'],
        num_workers=config['testing']['num_workers'],
        instance_seg=config['data']['instance_seg'],
        test=config['testing']['img_only'],
        has_mask=config['data']['has_mask'])

    with peter('Building Network'):
        model = unet_refined.UNetRefined(n_channels=3, n_classes=2)
        # model = unet_regres.Unet(in_channels=3,classes=2,decoder_channels= (512,256,128),encoder_depth=3)
        num_params = sum(p.numel() for p in model.parameters()
                         if p.requires_grad)
        print("model has {} trainable parameters".format(num_params))
    # model = nn.DataParallel(model)
    model.to(device)
    model.cuda()

    class_weights = torch.Tensor((1, 1)).float()
    class_weights = class_weights.to(device)
    loss_segmentation = nn.CrossEntropyLoss(class_weights)
    # loss_convexity = loss.ConvexShapeLoss(height=456,width=608,device=device)
    optimizer = optim.Adam(model.parameters(),
                           lr=config['testing']['learning_rate'],
Exemplo n.º 5
0
                                 batch_size=1,
                                 num_workers=args.nThreads,
                                 collate_fn=csv_collator)

# Array with [height, width] of the new size
resized_size = np.array([args.height, args.width])

# Loss function
criterion_training = losses.WeightedHausdorffDistance(
    resized_height=args.height,
    resized_width=args.width,
    return_2_terms=True,
    device=device)

# Restore saved checkpoint (model weights)
with peter("Loading checkpoint"):

    # Pretrained models that come with this package
    if args.model == 'unet_256x256_sorghum':
        args.model = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                  'checkpoints', 'unet_256x256_sorghum.ckpt')
    if os.path.isfile(args.model):
        if args.cuda:
            checkpoint = torch.load(args.model)
        else:
            checkpoint = torch.load(args.model,
                                    map_location=lambda storage, loc: storage)
        # Model
        if args.n_points is None:
            if 'n_points' not in checkpoint:
                # Model will also estimate # of points