コード例 #1
0
ファイル: gen_results.py プロジェクト: carrenD/MF-TAPNet
def main():
    # check cuda available
    assert torch.cuda.is_available() == True

    for fold in params.folds:
        num_classes = config.problem_class[params.problem_type]

        model = get_model(params.model_dir / 'model_{fold}.pt'.format(fold=fold),
                    num_classes=num_classes)
        _, filenames = trainval_split(fold)
        print('fold {}, number of files: {}'.format(fold, len(filenames)))
        eval(model, filenames, num_classes)
コード例 #2
0
ファイル: train.py プロジェクト: carrenD/MF-TAPNet
def main(fold):
    # check cuda available
    assert torch.cuda.is_available() == True

    # when the input dimension doesnot change, add this flag to speed up
    cudnn.benchmark = True

    num_classes = config.problem_class[params.problem_type]
    # input are RGB images in size 3 * h * w
    # output are binary
    model = params.model(in_channels=3, num_classes=num_classes)
    # data parallel
    model = nn.DataParallel(model, device_ids=params.device_ids).cuda()
    # loss function
    if num_classes == 2:
        loss = LossBinary(jaccard_weight=params.jaccard_weight)
        valid_metric = validation_binary
    else:
        loss = LossMulti(num_classes=num_classes, jaccard_weight=params.jaccard_weight)
        valid_metric = validation_multi


    # trainset transform
    train_transform = Compose([
        Resize(height=params.train_height, width=params.train_width, p=1),
        Normalize(p=1)
    ], p=1)

    # validset transform
    valid_transform = Compose([
        Resize(height=params.valid_height, width=params.valid_width, p=1),
        Normalize(p=1)
    ], p=1)

    # train/valid filenmaes
    train_filenames, valid_filenames = trainval_split(fold)
    print('num of train / validation files = {} / {}'.format(len(train_filenames), len(valid_filenames)))

    # train dataloader
    train_loader = DataLoader(
        dataset=RobotSegDataset(train_filenames, transform=train_transform),
        shuffle=True,
        num_workers=params.num_workers,
        batch_size=params.batch_size,
        pin_memory=True
    )
    # valid dataloader
    valid_loader = DataLoader(
        dataset=RobotSegDataset(valid_filenames, transform=valid_transform),
        shuffle=True,
        num_workers=params.num_workers,
        batch_size=len(params.device_ids), # in valid time use one img for each dataset
        pin_memory=True
    )

    train(
        model=model,
        loss_func=loss,
        train_loader=train_loader,
        valid_loader=valid_loader,
        valid_metric=valid_metric,
        fold=fold,
        num_classes=num_classes
    )
コード例 #3
0
def main(fold):
    # check cuda available
    assert torch.cuda.is_available() == True

    # when the input dimension doesnot change, add this flag to speed up
    cudnn.benchmark = True

    num_classes = config.problem_class[params.problem_type]
    # input are RGB images in size 3 * h * w
    # output are binary
    model = params.model(in_channels=3, num_classes=num_classes)
    # data parallel
    model = nn.DataParallel(model, device_ids=params.device_ids).cuda()
    # loss function
    if num_classes == 2:
        loss = LossBinary(jaccard_weight=params.jaccard_weight)
        valid_metric = validation_binary
    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=params.jaccard_weight)
        valid_metric = validation_multi

    # trainset transform
    train_transform = Compose([
        Resize(height=params.train_height, width=params.train_width, p=1),
        Normalize(p=1),
        PadIfNeeded(
            min_height=params.train_height, min_width=params.train_width, p=1),
    ],
                              p=1)

    # validset transform
    valid_transform = Compose([
        PadIfNeeded(
            min_height=params.valid_height, min_width=params.valid_width, p=1),
        Resize(height=params.train_height, width=params.train_width, p=1),
        Normalize(p=1)
    ],
                              p=1)

    # train/valid filenmaes
    train_filenames, valid_filenames = trainval_split(fold)
    print('fold {}, {} train / {} validation files'.format(
        fold, len(train_filenames), len(valid_filenames)))

    # train dataloader
    train_loader = DataLoader(
        dataset=RobotSegDataset(train_filenames, transform=train_transform, \
            schedule="ordered", batch_size=params.batch_size, problem_type=params.problem_type, semi_percentage=params.semi_percentage),
        shuffle=False, # set to false to disable pytorch dataset shuffle
        num_workers=params.num_workers,
        batch_size=params.batch_size,
        pin_memory=True
    )
    # valid dataloader
    valid_loader = DataLoader(
        dataset=RobotSegDataset(valid_filenames,
                                transform=valid_transform,
                                problem_type=params.problem_type,
                                mode='valid'),
        shuffle=False,  # set to false to disable pytorch dataset shuffle
        num_workers=0,  # params.num_workers,
        batch_size=1,  # in valid time. have to use one image by one
        pin_memory=True)

    train(model=model,
          loss_func=loss,
          train_loader=train_loader,
          valid_loader=valid_loader,
          valid_metric=valid_metric,
          fold=fold,
          num_classes=num_classes)