コード例 #1
0
    def __init__(self, content=None):

        get = content.get_resource

        self.__model = onnxruntime.InferenceSession(
            get(content.model_path))

        self.line_transform = Compose([
            Resize((512, 512)),
            ToTensor(),
            Normalize([0.5], [0.5]),
            Lambda(lambda img: np.expand_dims(img, 0))
        ])
        self.hint_transform = Compose([
            # input must RGBA !
            Resize((128, 128), Image.NEAREST),
            Lambda(lambda img: img.convert(mode='RGB')),
            ToTensor(),
            Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            Lambda(lambda img: np.expand_dims(img, 0))
        ])
        self.line_draft_transform = Compose([
            Resize((128, 128)),
            ToTensor(),
            Normalize([0.5], [0.5]),
            Lambda(lambda img: np.expand_dims(img, 0))
        ])
        self.alpha_transform = Compose([
            Lambda(lambda img: self.get_alpha(img)),
        ])
def test_augment(img, mask=None, model='scale'):
    if model == 'scale':
        return DualCompose([
            Scale(size=128),
            ImageOnly(
                Normalize(mean=(0.485, 0.456, 0.406),
                          std=(0.229, 0.224, 0.225)))
        ])(img, mask=None)
    else:
        return DualCompose([
            Centerpad(size=(128, 128)),
            ImageOnly(
                Normalize(mean=(0.485, 0.456, 0.406),
                          std=(0.229, 0.224, 0.225)))
        ])(img, mask=None)
コード例 #3
0
ファイル: val_transforms.py プロジェクト: surmenok/ssd
def get_transforms():
    data_transform = torchvision.transforms.Compose([
        ToTensor(),
        Normalize(mean=constants.DATA_MEAN, std=constants.DATA_STD),
        Resize(constants.TRANSFORMED_IMAGE_SIZE)
    ])
    return data_transform
コード例 #4
0
ファイル: inference.py プロジェクト: flo-he/CRC-Segmentation
def main():

    # get test set and test set loader
    test_set = CRC_Dataset(root_dir=os.path.join(os.getcwd(), "data\\test"),
                           transforms=[
                               MirrorPad(((6, ), (6, ), (0, ))),
                               ToTensor(),
                               Normalize(means=(0.7942, 0.6693, 0.7722),
                                         stds=(0.1998, 0.3008, 0.2037))
                           ])

    test_loader = torch.utils.data.DataLoader(
        test_set,
        batch_size=args.batch_size if args.batch_size else 8,
        num_workers=args.workers if args.workers else 1,
        pin_memory=use_cuda,
    )

    model = UNet((512, 512), (500, 500),
                 32,
                 64,
                 128,
                 256,
                 512,
                 droprate=0.5,
                 Norm=torch.nn.BatchNorm2d)
    #model = UNet((512, 512), (500, 500), 32, 64, 128, 256, Norm=torch.nn.BatchNorm2d)
    model.load_state_dict(torch.load(chkpt))
    model.to(device)

    dice, acc = compute_metrics_on_test_set(model, test_loader)
    print(dice, acc)
コード例 #5
0
def main():
    class imshowCollate(object):
        def __init__(self):
            pass

        def __call__(self, batch):
            images, labels = zip(*batch)
            idx = 0
            for img in images:
                img = img.cpu().numpy().transpose((1, 2, 0)) * 255  #totensor
                cv2.imwrite(
                    'datatest/sev_img/img' + str(idx) + '——' +
                    str(labels[idx]) + '.jpg', img)
                # print(img.shape)
                idx += 1
            return images, labels

    from transforms import  Compose, Normalize, RandomResizedCrop, RandomHorizontalFlip, \
        ColorJitter, ToTensor,Lighting

    batch_size = 16
    normalize = Normalize(mean=[0.485, 0.456, 0.406],
                          std=[0.229, 0.224, 0.225])

    dataset = FileListLabeledDataset(
        '/workspace/mnt/group/algo/yangdecheng/work/multi_task/pytorch-train/datatest/test.txt',
        '/workspace/mnt/group/algo/yangdecheng/work/multi_task/pytorch-train/datatest/pic',
        Compose([
            RandomResizedCrop((112),
                              scale=(0.7, 1.2),
                              ratio=(1. / 1., 4. / 1.)),
            RandomHorizontalFlip(),
            ColorJitter(brightness=[0.5, 1.5],
                        contrast=[0.5, 1.5],
                        saturation=[0.5, 1.5],
                        hue=0),
            ToTensor(),
            Lighting(1, [0.2175, 0.0188, 0.0045],
                     [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
                      [-0.5836, -0.6948, 0.4203]]),  #0.1
            # 				normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=100,
                                               shuffle=True,
                                               num_workers=10,
                                               pin_memory=True,
                                               sampler=None,
                                               collate_fn=imshowCollate())

    from multiprocessing import Process
    p_list = []
    for i in range(1):
        p_list.append(Process(target=iter_f, args=(train_loader, )))
    for p in p_list:
        p.start()
    for p in p_list:
        p.join()
コード例 #6
0
def init_seg(input_sizes,
             std,
             mean,
             dataset,
             test_base=None,
             test_label_id_map=None,
             city_aug=0):

    if dataset == 'voc':
        transform_test = Compose([
            ToTensor(),
            ZeroPad(size=input_sizes),
            Normalize(mean=mean, std=std)
        ])
    elif dataset == 'city' or dataset == 'gtav' or dataset == 'synthia':  # All the same size
        if city_aug == 2:  # ERFNet and ENet
            transform_test = Compose([
                ToTensor(),
                Resize(size_image=input_sizes, size_label=input_sizes),
                LabelMap(test_label_id_map)
            ])
        elif city_aug == 1:  # City big
            transform_test = Compose([
                ToTensor(),
                Resize(size_image=input_sizes, size_label=input_sizes),
                Normalize(mean=mean, std=std),
                LabelMap(test_label_id_map)
            ])
    else:
        raise ValueError

    # Not the actual test set (i.e. validation set)
    test_set = StandardSegmentationDataset(
        root=test_base,
        image_set='val',
        transforms=transform_test,
        data_set='city'
        if dataset == 'gtav' or dataset == 'synthia' else dataset)

    val_loader = torch.utils.data.DataLoader(dataset=test_set,
                                             batch_size=1,
                                             num_workers=0,
                                             shuffle=False)

    # Testing
    return val_loader
コード例 #7
0
def init(batch_size, state, input_sizes, dataset, mean, std, base, workers=10):
    # Return data_loaders
    # depending on whether the state is
    # 0: training
    # 1: fast validation by mean IoU (validation set)
    # 2: just testing (test set)
    # 3: just testing (validation set)

    # Transformations
    # ! Can't use torchvision.Transforms.Compose
    transforms_test = Compose(
        [Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
         ToTensor(),
         Normalize(mean=mean, std=std)])
    transforms_train = Compose(
        [Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
         RandomRotation(degrees=3),
         ToTensor(),
         Normalize(mean=mean, std=std)])

    if state == 0:
        data_set = StandardLaneDetectionDataset(root=base, image_set='train', transforms=transforms_train,
                                                data_set=dataset)
        data_loader = torch.utils.data.DataLoader(dataset=data_set, batch_size=batch_size,
                                                  num_workers=workers, shuffle=True)
        validation_set = StandardLaneDetectionDataset(root=base, image_set='val',
                                                      transforms=transforms_test, data_set=dataset)
        validation_loader = torch.utils.data.DataLoader(dataset=validation_set, batch_size=batch_size * 4,
                                                        num_workers=workers, shuffle=False)
        return data_loader, validation_loader

    elif state == 1 or state == 2 or state == 3:
        image_sets = ['valfast', 'test', 'val']
        data_set = StandardLaneDetectionDataset(root=base, image_set=image_sets[state - 1],
                                                transforms=transforms_test, data_set=dataset)
        data_loader = torch.utils.data.DataLoader(dataset=data_set, batch_size=batch_size,
                                                  num_workers=workers, shuffle=False)
        return data_loader
    else:
        raise ValueError
コード例 #8
0
def get_transform(train=True,fixsize=False,img_size=416,min_size=800,max_size=1333,
                  image_mean=None,image_std=None,advanced=False):
    if image_mean is None:image_mean = [0.485, 0.456, 0.406]
    if image_std is None:image_std = [0.229, 0.224, 0.225]
    if train:
        transforms = Compose(
            [
                Augment(advanced),
                ToTensor(),
                ResizeFixSize(img_size) if fixsize else ResizeMinMax(min_size, max_size),
                RandomHorizontalFlip(0.5),
                Normalize(image_mean,image_std)
            ])
    else:
        transforms = Compose(
            [
                ToTensor(),
                ResizeFixSize(img_size) if fixsize else ResizeMinMax(min_size, max_size),
                # RandomHorizontalFlip(0.5),
                Normalize(image_mean, image_std)
            ])
    return transforms
コード例 #9
0
def calculate_SDR(music, model, n_fft=2048, hop_length=512, slice_duration=2):
    model.eval()
    scores = []
    sr = music.rate
    ind = 0
    mixture = torch.mean(music.audio.transpose(), dim=0)
    vocal = torch.mean(music.targets['vocals'].audio.transpose(), dim=0)
    for i in range(0, len(music.audio), slice_duration * sr):
        ind += 1
        mixture = mixture[i:i + slice_duration * sr]
        vocal = vocal[i:i + slice_duration * sr]

        if np.all(vocal == 0):
            # print('[!] -  all 0s, skipping')
            continue

        if i + 2 * sr >= len(music.audio):
            break
        resampled_mixture = mixture
        mixture_stft = torch.stft(resampled_mixture,
                                  n_fft=n_fft,
                                  hop_length=hop_length,
                                  window=torch.hann_window(n_fft),
                                  center=True)
        magnitude_mixture_stft, mixture_phase = torchaudio.functional.magphase(
            mixture_stft)
        normalized_magnitude_mixture_stft = torch.Tensor(Normalize().forward(
            [magnitude_mixture_stft])[0])

        sr_v = music.rate
        with torch.no_grad():
            mask = model.forward(
                normalized_magnitude_mixture_stft.unsqueeze(0)).squeeze(0)
            out = mask * torch.Tensor(normalized_magnitude_mixture_stft)
        predicted_vocal_stft = out.numpy() * mixture_phase
        predicted_vocal_audio = torch.stft(predicted_vocal_stft.squeeze(0),
                                           n_fft=n_fft,
                                           hop_length=hop_length,
                                           window=torch.hann_window(n_fft),
                                           center=True)
        try:
            scores.append(
                mir_eval.separation.bss_eval_sources(
                    vocal[:predicted_vocal_audio.shape[0]],
                    predicted_vocal_audio)[0])
        except ValueError:
            print(vocal.all() == 0)
            print(predicted_vocal_stft.all() == 0)
            print('Error but skipping')
コード例 #10
0
def init_lane(input_sizes, dataset, mean, std, base, workers=0):
    transforms_test = Compose([
        Resize(size_image=input_sizes, size_label=input_sizes),
        ToTensor(),
        Normalize(mean=mean, std=std)
    ])
    validation_set = StandardLaneDetectionDataset(root=base,
                                                  image_set='val',
                                                  transforms=transforms_test,
                                                  data_set=dataset)
    validation_loader = torch.utils.data.DataLoader(dataset=validation_set,
                                                    batch_size=1,
                                                    num_workers=workers,
                                                    shuffle=False)
    return validation_loader
コード例 #11
0
def get_transform_fixsize(train=True,img_size=416,
                  image_mean=None,image_std=None,advanced=False):
    if image_mean is None:image_mean = [0.485, 0.456, 0.406]
    if image_std is None:image_std = [0.229, 0.224, 0.225]
    if train:
        transforms = Compose(
            [
                Augment(advanced),
                Pad(),
                ToTensor(),
                Resize(img_size),
                RandomHorizontalFlip(0.5),
                Normalize(image_mean,image_std)
            ])
    else:
        transforms = Compose(
            [
                Pad(),
                ToTensor(),
                Resize(img_size),
                # RandomHorizontalFlip(0.5),
                Normalize(image_mean, image_std)
            ])
    return transforms
コード例 #12
0
def train_augment(img,mask,prob=0.5):
    return DualCompose([HorizontalFlip(prob=0.5),ShiftScale(limit=4,prob=0.5),
                        OneOf([#ImageOnly(CLAHE(clipLimit=2.0, tileGridSize=(8, 8))),
                               ImageOnly(Brightness_shift(limit=0.1)),
                               ImageOnly(do_Gamma(limit=0.08)),                              
                               ImageOnly(Brightness_multiply(limit=0.08)),
                               ],prob=0.5),
                        ImageOnly(Median_blur( ksize=3, prob=.15)),
                        Scale(size=pad),
                        #Centerpad(size=(pad,pad)),
                        ImageOnly(Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)))
                        #OneOf([Scale(size=128),Randompadding(size=(128,128))],prob=1),    
                        
                        #RandomErasing(probability = 0.22, sl = 0.02, sh = 0.2, r1 = 0.2, mean=[0.4914, 0.4822, 0.4465])
                        ])(img,mask)
コード例 #13
0
def load_dataset(name, root='datasets', feature_range=None, sparse=False, device='cpu'):
    dataset = _available_datasets[name](root=os.path.join(root, name))
    data = dataset[0]

    if feature_range is not None:
        low, high = feature_range
        data = Normalize(low, high)(data)

    if sparse:
        data = ToSparseTensor()(data)

    device = 'cpu' if not torch.cuda.is_available() else device
    data = data.to(device)

    data.name = name
    data.num_classes = dataset.num_classes
    return data
コード例 #14
0
    def __init__(self, options):
        self.options = options
        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')

        test_transform_list = []
        if self.options.max_scale > 1:
            test_transform_list.append(
                RandomRescaleBB(1.0, self.options.max_scale))
        test_transform_list.append(
            CropAndResize(out_size=(self.options.crop_size,
                                    self.options.crop_size)))
        test_transform_list.append(
            LocsToHeatmaps(out_size=(self.options.heatmap_size,
                                     self.options.heatmap_size)))
        test_transform_list.append(ToTensor())
        test_transform_list.append(Normalize())

        self.test_ds = RctaDataset(
            root_dir=self.options.dataset_dir,
            is_train=False,
            transform=transforms.Compose(test_transform_list))

        self.model = StackedHourglass(self.options.num_keypoints).to(
            self.device)
        # Only create optimizer because it is required to restore from checkpoint
        self.optimizer = torch.optim.RMSprop(params=self.model.parameters(),
                                             lr=0,
                                             momentum=0,
                                             weight_decay=0)
        self.models_dict = {'stacked_hg': self.model}
        self.optimizers_dict = {'optimizer': self.optimizer}
        print("log dir:", options.log_dir)
        print("checkpoint dir:", options.checkpoint_dir)
        self.saver = CheckpointSaver(save_dir=options.checkpoint_dir)
        print("checkpoint:", self.options.checkpoint)
        self.checkpoint = self.saver.load_checkpoint(
            self.models_dict,
            self.optimizers_dict,
            checkpoint_file=self.options.checkpoint)

        self.criterion = nn.MSELoss().to(self.device)
        self.pose = Pose2DEval(detection_thresh=self.options.detection_thresh,
                               dist_thresh=self.options.dist_thresh)
コード例 #15
0
 def get_testing_loader(img_root, label_root, file_list, batch_size,
                        img_size, num_class):
     transformed_dataset = VOCDataset(
         img_root,
         label_root,
         file_list,
         transform=transforms.Compose([
             Resize(img_size),
             ToTensor(),
             Normalize(imagenet_stats['mean'], imagenet_stats['std']),
             # GenOneHotLabel(num_class),
         ]))
     loader = DataLoader(
         transformed_dataset,
         batch_size,
         shuffle=False,
         num_workers=0,
         pin_memory=False,
     )
     return loader
コード例 #16
0
 def get_training_loader(img_root, label_root, file_list, batch_size,
                         img_height, img_width, num_class):
     transformed_dataset = VOCTestDataset(
         img_root,
         label_root,
         file_list,
         transform=transforms.Compose([
             RandomHorizontalFlip(),
             Resize((img_height + 5, img_width + 5)),
             RandomCrop((img_height, img_width)),
             ToTensor(),
             Normalize(imagenet_stats['mean'], imagenet_stats['std']),
             # GenOneHotLabel(num_class),
         ]))
     loader = DataLoader(
         transformed_dataset,
         batch_size,
         shuffle=True,
         num_workers=0,
         pin_memory=False,
     )
     return loader
コード例 #17
0
ファイル: datasets.py プロジェクト: sisaman/LPGNN
def load_dataset(
    dataset: dict(help='name of the dataset',
                  option='-d',
                  choices=supported_datasets) = 'cora',
    data_dir: dict(help='directory to store the dataset') = './datasets',
    data_range: dict(help='min and max feature value', nargs=2,
                     type=float) = (0, 1),
    val_ratio: dict(help='fraction of nodes used for validation') = .25,
    test_ratio: dict(help='fraction of nodes used for test') = .25,
):
    data = supported_datasets[dataset](root=os.path.join(data_dir, dataset))
    data = AddTrainValTestMask(split='train_rest',
                               num_val=val_ratio,
                               num_test=test_ratio)(data[0])
    data = ToSparseTensor()(data)
    data.name = dataset
    data.num_classes = int(data.y.max().item()) + 1

    if data_range is not None:
        low, high = data_range
        data = Normalize(low, high)(data)

    return data
コード例 #18
0
ファイル: main.py プロジェクト: zeta1999/audio
def main(rank, args):

    # Distributed setup

    if args.distributed:
        setup_distributed(rank, args.world_size)

    not_main_rank = args.distributed and rank != 0

    logging.info("Start time: %s", datetime.now())

    # Explicitly set seed to make sure models created in separate processes
    # start from same random weights and biases
    torch.manual_seed(args.seed)

    # Empty CUDA cache
    torch.cuda.empty_cache()

    # Change backend for flac files
    torchaudio.set_audio_backend("soundfile")

    # Transforms

    melkwargs = {
        "n_fft": args.win_length,
        "n_mels": args.n_bins,
        "hop_length": args.hop_length,
    }

    sample_rate_original = 16000

    if args.type == "mfcc":
        transforms = torch.nn.Sequential(
            torchaudio.transforms.MFCC(
                sample_rate=sample_rate_original,
                n_mfcc=args.n_bins,
                melkwargs=melkwargs,
            ), )
        num_features = args.n_bins
    elif args.type == "waveform":
        transforms = torch.nn.Sequential(UnsqueezeFirst())
        num_features = 1
    else:
        raise ValueError("Model type not supported")

    if args.normalize:
        transforms = torch.nn.Sequential(transforms, Normalize())

    augmentations = torch.nn.Sequential()
    if args.freq_mask:
        augmentations = torch.nn.Sequential(
            augmentations,
            torchaudio.transforms.FrequencyMasking(
                freq_mask_param=args.freq_mask),
        )
    if args.time_mask:
        augmentations = torch.nn.Sequential(
            augmentations,
            torchaudio.transforms.TimeMasking(time_mask_param=args.time_mask),
        )

    # Text preprocessing

    char_blank = "*"
    char_space = " "
    char_apostrophe = "'"
    labels = char_blank + char_space + char_apostrophe + string.ascii_lowercase
    language_model = LanguageModel(labels, char_blank, char_space)

    # Dataset

    training, validation = split_process_librispeech(
        [args.dataset_train, args.dataset_valid],
        [transforms, transforms],
        language_model,
        root=args.dataset_root,
        folder_in_archive=args.dataset_folder_in_archive,
    )

    # Decoder

    if args.decoder == "greedy":
        decoder = GreedyDecoder()
    else:
        raise ValueError("Selected decoder not supported")

    # Model

    model = Wav2Letter(
        num_classes=language_model.length,
        input_type=args.type,
        num_features=num_features,
    )

    if args.jit:
        model = torch.jit.script(model)

    if args.distributed:
        n = torch.cuda.device_count() // args.world_size
        devices = list(range(rank * n, (rank + 1) * n))
        model = model.to(devices[0])
        model = torch.nn.parallel.DistributedDataParallel(model,
                                                          device_ids=devices)
    else:
        devices = ["cuda" if torch.cuda.is_available() else "cpu"]
        model = model.to(devices[0], non_blocking=True)
        model = torch.nn.DataParallel(model)

    n = count_parameters(model)
    logging.info("Number of parameters: %s", n)

    # Optimizer

    if args.optimizer == "adadelta":
        optimizer = Adadelta(
            model.parameters(),
            lr=args.learning_rate,
            weight_decay=args.weight_decay,
            eps=args.eps,
            rho=args.rho,
        )
    elif args.optimizer == "sgd":
        optimizer = SGD(
            model.parameters(),
            lr=args.learning_rate,
            momentum=args.momentum,
            weight_decay=args.weight_decay,
        )
    elif args.optimizer == "adam":
        optimizer = Adam(
            model.parameters(),
            lr=args.learning_rate,
            momentum=args.momentum,
            weight_decay=args.weight_decay,
        )
    elif args.optimizer == "adamw":
        optimizer = AdamW(
            model.parameters(),
            lr=args.learning_rate,
            momentum=args.momentum,
            weight_decay=args.weight_decay,
        )
    else:
        raise ValueError("Selected optimizer not supported")

    if args.scheduler == "exponential":
        scheduler = ExponentialLR(optimizer, gamma=args.gamma)
    elif args.scheduler == "reduceonplateau":
        scheduler = ReduceLROnPlateau(optimizer, patience=10, threshold=1e-3)
    else:
        raise ValueError("Selected scheduler not supported")

    criterion = torch.nn.CTCLoss(blank=language_model.mapping[char_blank],
                                 zero_infinity=False)

    # Data Loader

    collate_fn_train = collate_factory(model_length_function, augmentations)
    collate_fn_valid = collate_factory(model_length_function)

    loader_training_params = {
        "num_workers": args.workers,
        "pin_memory": True,
        "shuffle": True,
        "drop_last": True,
    }
    loader_validation_params = loader_training_params.copy()
    loader_validation_params["shuffle"] = False

    loader_training = DataLoader(
        training,
        batch_size=args.batch_size,
        collate_fn=collate_fn_train,
        **loader_training_params,
    )
    loader_validation = DataLoader(
        validation,
        batch_size=args.batch_size,
        collate_fn=collate_fn_valid,
        **loader_validation_params,
    )

    # Setup checkpoint

    best_loss = 1.0

    load_checkpoint = args.checkpoint and os.path.isfile(args.checkpoint)

    if args.distributed:
        torch.distributed.barrier()

    if load_checkpoint:
        logging.info("Checkpoint: loading %s", args.checkpoint)
        checkpoint = torch.load(args.checkpoint)

        args.start_epoch = checkpoint["epoch"]
        best_loss = checkpoint["best_loss"]

        model.load_state_dict(checkpoint["state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        scheduler.load_state_dict(checkpoint["scheduler"])

        logging.info("Checkpoint: loaded '%s' at epoch %s", args.checkpoint,
                     checkpoint["epoch"])
    else:
        logging.info("Checkpoint: not found")

        save_checkpoint(
            {
                "epoch": args.start_epoch,
                "state_dict": model.state_dict(),
                "best_loss": best_loss,
                "optimizer": optimizer.state_dict(),
                "scheduler": scheduler.state_dict(),
            },
            False,
            args.checkpoint,
            not_main_rank,
        )

    if args.distributed:
        torch.distributed.barrier()

    torch.autograd.set_detect_anomaly(False)

    for epoch in range(args.start_epoch, args.epochs):

        logging.info("Epoch: %s", epoch)

        train_one_epoch(
            model,
            criterion,
            optimizer,
            scheduler,
            loader_training,
            decoder,
            language_model,
            devices[0],
            epoch,
            args.clip_grad,
            not_main_rank,
            not args.reduce_lr_valid,
        )

        loss = evaluate(
            model,
            criterion,
            loader_validation,
            decoder,
            language_model,
            devices[0],
            epoch,
            not_main_rank,
        )

        if args.reduce_lr_valid and isinstance(scheduler, ReduceLROnPlateau):
            scheduler.step(loss)

        is_best = loss < best_loss
        best_loss = min(loss, best_loss)
        save_checkpoint(
            {
                "epoch": epoch + 1,
                "state_dict": model.state_dict(),
                "best_loss": best_loss,
                "optimizer": optimizer.state_dict(),
                "scheduler": scheduler.state_dict(),
            },
            is_best,
            args.checkpoint,
            not_main_rank,
        )

    logging.info("End time: %s", datetime.now())

    if args.distributed:
        torch.distributed.destroy_process_group()
コード例 #19
0
            showlegend=True, title='DG_z2', ytickmin=0, ytinkmax=2.0)),
        D_x = Scalar(vis, 'D_x', opts=dict(
            showlegend=True, title='D_x', ytickmin=0, ytinkmax=2.0)),
        inputs0 = Image3D(vis, 'inputs0'),
        inputs1 = Image3D(vis, 'inputs1'),
        fake0 = Image3D(vis, 'fake0'),
        fake1 = Image3D(vis, 'fake1'),
        outputs0 = Image3D(vis, 'outputs0'),
        outputs1 = Image3D(vis, 'outputs1'))

    # dataset setting
    x, y = Trainset(FG)
    # x, y, train_idx, test_idx, ratio = fold_split(FG)
    # transform = Compose([ToFloatTensor(), Normalize(0.5,0.5)])
    # trainset = ADNIDataset2D(FG, x, y, transform=transform)
    transform=Compose([ToWoldCoordinateSystem(), ToTensor(), Pad(1,0,1,0,1,0), Normalize(0.5,0.5)])
    trainset = ADNIDataset(FG, x, y, transform=transform)
    trainloader = DataLoader(trainset, batch_size=FG.batch_size,
                             shuffle=True, pin_memory=True)
    # trainset = ADNIDataset2D(FG, x[train_idx], y[train_idx], transform=transform)
    # testset = ADNIDataset2D(FG, x[test_idx], y[test_idx], transform=transform)
    # trainloader = DataLoader(trainset, batch_size=FG.batch_size, shuffle=True,
    #                          pin_memory=True, num_workers=4)
    # testloader = DataLoader(testset, batch_size=FG.batch_size, shuffle=True,
    #                         num_workers=4, pin_memory=True)

    # models
    D = infoDiscriminator3D(FG.c_code).to('cuda:{}'.format(FG.devices[0]))
    G = infoGenerator3D(FG.z_dim, FG.c_code).to('cuda:{}'.format(FG.devices[0]))

    if len(FG.devices) != 1:
コード例 #20
0
                    fake=Image3D(vis, 'fake'),
                    valid=Image3D(vis, 'valid'),
                    outputs=Image3D(vis, 'outputs'),
                    outputs2=Image3D(vis, 'outputs2'))

    # x, y = Trainset(FG)      # x = image, y=target
    x, y, train_idx, test_idx, ratio = fold_split(FG)
    # transform=Compose([ToFloatTensor(), Normalize(0.5,0.5)])
    # trainset = ADNIDataset2D(FG, x[train_idx], y[train_idx], transform=transform)
    # testset = ADNIDataset2D(FG, x[test_idx], y[test_idx], transform=transform)

    transform = Compose([
        ToWoldCoordinateSystem(),
        ToTensor(),
        Pad(1, 0, 1, 0, 1, 0),
        Normalize(0.5, 0.5)
    ])
    trainset = ADNIDataset(FG, x[train_idx], y[train_idx], transform=transform)
    testset = ADNIDataset(FG, x[test_idx], y[test_idx], transform=transform)

    trainloader = DataLoader(trainset,
                             batch_size=FG.batch_size,
                             shuffle=True,
                             pin_memory=True,
                             num_workers=4)
    testloader = DataLoader(testset,
                            batch_size=FG.batch_size,
                            shuffle=True,
                            num_workers=4,
                            pin_memory=True)
    # trainset = ADNIDataset2D(FG, x, y, transform=transform)
コード例 #21
0
from torch.utils.data import DataLoader
from torch.nn import functional as F
# from prepare_data import (original_height,
#                           original_width,
#                           h_start, w_start
#                           )
# from crop_utils import join_mask
# import crowdai

from validation import convert_bin_coco
from transforms import (ImageOnly, Normalize, RandomCrop, DualCompose, Rescale)

img_transform = DualCompose([
    # RandomCrop([128, 128]),
    # Rescale([256, 256]),
    ImageOnly(Normalize(mean=(0), std=(1)))
])

PAD = (13, 13, 14, 14)


def get_model(model_path, model_type='unet11', problem_type='parts'):
    """

    :param model_path:
    :param model_type: 'UNet', 'UNet16', 'UNet11', 'LinkNet34'
    :param problem_type: 'binary', 'parts', 'instruments'
    :return:
    """
    num_classes = 1
コード例 #22
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.3, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=12)
    arg('--model',
        type=str,
        default='UNet',
        choices=['UNet', 'UNet11', 'UNet16', 'AlbuNet34'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1
    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained=True)
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained=True)
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    elif args.model == 'AlbuNet':
        model = AlbuNet34(num_classes=num_classes, pretrained=True)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    loss = LossBinary(jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names, shuffle=False, transform=None, limit=None):
        return DataLoader(dataset=AngyodysplasiaDataset(file_names,
                                                        transform=transform,
                                                        limit=limit),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=torch.cuda.is_available())

    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    train_transform = DualCompose([
        SquarePaddingTraining(),
        CenterCrop([574, 574]),
        HorizontalFlip(),
        VerticalFlip(),
        Rotate(),
        ImageOnly(RandomHueSaturationValue()),
        ImageOnly(Normalize())
    ])

    val_transform = DualCompose([
        SquarePaddingTraining(),
        CenterCrop([574, 574]),
        ImageOnly(Normalize())
    ])

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform,
                               limit=args.limit)
    valid_loader = make_loader(val_file_names, transform=val_transform)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=validation_binary,
                fold=args.fold)
コード例 #23
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.3, type=float)
    arg('--device-ids', type=str, default='0', help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.001)
    arg('--workers', type=int, default=12)
    arg('--model', type=str, default='UNet', choices=['UNet', 'UNet11', 'LinkNet34', 'UNet16', 'AlbuNet34', 'MDeNet', 'EncDec', 'hourglass', 'MDeNetplus'])

    args = parser.parse_args()
    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1
    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained=True)
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained=True)
    elif args.model == 'MDeNet':
        print('Mine MDeNet..................')
        model = MDeNet(num_classes=num_classes, pretrained=True)
    elif args.model == 'MDeNetplus':
        print('load MDeNetplus..................')
        model = MDeNetplus(num_classes=num_classes, pretrained=True)
    elif args.model == 'EncDec':
        print('Mine EncDec..................')
        model = EncDec(num_classes=num_classes, pretrained=True)
    elif args.model == 'GAN':
        model = GAN(num_classes=num_classes, pretrained=True)
    elif args.model == 'AlbuNet34':
        model = AlbuNet34(num_classes=num_classes, pretrained=False)
    elif args.model == 'hourglass':
        model = hourglass(num_classes=num_classes, pretrained=True) 
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model).cuda()   #  nn.DataParallel(model, device_ids=device_ids).cuda()
    
    cudnn.benchmark = True
    
    def make_loader(file_names, shuffle=False, transform=None, limit=None):
        return DataLoader(
            dataset=Polyp(file_names, transform=transform, limit=limit),
            shuffle=shuffle,
            num_workers=args.workers,
            batch_size=args.batch_size,
            pin_memory=torch.cuda.is_available()
        )

    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names), len(val_file_names)))
    
    train_transform = DualCompose([
        CropCVC612(),
        img_resize(512),
        HorizontalFlip(),
        VerticalFlip(),
        Rotate(),
        Rescale(), 
        Zoomin(),
        ImageOnly(RandomHueSaturationValue()),
        ImageOnly(Normalize())
    ])

    train_loader = make_loader(train_file_names, shuffle=True, transform=train_transform, limit=args.limit)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    utils.train(
        args=args,
        model=model,
        train_loader=train_loader,
        fold=args.fold
    )
コード例 #24
0
#                           h_start, w_start
#                           )
# from crop_utils import join_mask
import crowdai

from validation import convert_bin_coco
from transforms import (ImageOnly,
                        Normalize,
                        RandomCrop,
                        DualCompose,
                        Rescale)

img_transform = DualCompose([
    # RandomCrop([128, 128]),
    Rescale([256, 256]),
    ImageOnly(Normalize())
])


def get_model(model_path, model_type='unet11', problem_type='parts'):
    """

    :param model_path:
    :param model_type: 'UNet', 'UNet16', 'UNet11', 'LinkNet34'
    :param problem_type: 'binary', 'parts', 'instruments'
    :return:
    """
    num_classes = 1

    # if model_type == 'UNet16':
    #     model = UNet16(num_classes=num_classes)
コード例 #25
0
def init(batch_size,
         state,
         input_sizes,
         dataset,
         mean,
         std,
         base,
         workers=10,
         method='baseline'):
    # Return data_loaders
    # depending on whether the state is
    # 0: training
    # 1: fast validation by mean IoU (validation set)
    # 2: just testing (test set)
    # 3: just testing (validation set)

    # Transformations
    # ! Can't use torchvision.Transforms.Compose
    transforms_test = Compose([
        Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
        ToTensor(),
        Normalize(mean=mean, std=std)
    ])
    transforms_train = Compose([
        Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
        RandomRotation(degrees=3),
        ToTensor(),
        Normalize(mean=mean,
                  std=std,
                  normalize_target=True if method == 'lstr' else False)
    ])

    # Batch builder
    if method == 'lstr':
        collate_fn = dict_collate_fn
    else:
        collate_fn = None

    if state == 0:
        if method == 'lstr':
            if dataset == 'tusimple':
                data_set = TuSimple(root=base,
                                    image_set='train',
                                    transforms=transforms_train,
                                    padding_mask=True,
                                    process_points=True)
            elif dataset == 'culane':
                data_set = CULane(root=base,
                                  image_set='train',
                                  transforms=transforms_train,
                                  padding_mask=True,
                                  process_points=True)
            else:
                raise ValueError
        else:
            data_set = StandardLaneDetectionDataset(
                root=base,
                image_set='train',
                transforms=transforms_train,
                data_set=dataset)

        data_loader = torch.utils.data.DataLoader(dataset=data_set,
                                                  batch_size=batch_size,
                                                  collate_fn=collate_fn,
                                                  num_workers=workers,
                                                  shuffle=True)
        validation_set = StandardLaneDetectionDataset(
            root=base,
            image_set='val',
            transforms=transforms_test,
            data_set=dataset)
        validation_loader = torch.utils.data.DataLoader(dataset=validation_set,
                                                        batch_size=batch_size *
                                                        4,
                                                        num_workers=workers,
                                                        shuffle=False,
                                                        collate_fn=collate_fn)
        return data_loader, validation_loader

    elif state == 1 or state == 2 or state == 3:
        image_sets = ['valfast', 'test', 'val']
        if method == 'lstr':
            if dataset == 'tusimple':
                data_set = TuSimple(root=base,
                                    image_set=image_sets[state - 1],
                                    transforms=transforms_test,
                                    padding_mask=False,
                                    process_points=False)
            elif dataset == 'culane':
                data_set = CULane(root=base,
                                  image_set=image_sets[state - 1],
                                  transforms=transforms_test,
                                  padding_mask=False,
                                  process_points=False)
            else:
                raise ValueError
        else:
            data_set = StandardLaneDetectionDataset(
                root=base,
                image_set=image_sets[state - 1],
                transforms=transforms_test,
                data_set=dataset)
        data_loader = torch.utils.data.DataLoader(dataset=data_set,
                                                  batch_size=batch_size,
                                                  collate_fn=collate_fn,
                                                  num_workers=workers,
                                                  shuffle=False)
        return data_loader
    else:
        raise ValueError
コード例 #26
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=1, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--n-epochs', type=int, default=10)
    arg('--lr', type=float, default=0.0002)
    arg('--workers', type=int, default=10)
    arg('--type',
        type=str,
        default='binary',
        choices=['binary', 'parts', 'instruments'])
    arg('--model',
        type=str,
        default='DLinkNet',
        choices=['UNet', 'UNet11', 'LinkNet34', 'DLinkNet'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    if args.type == 'parts':
        num_classes = 4
    elif args.type == 'instruments':
        num_classes = 8
    else:
        num_classes = 1

    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    elif args.model == 'DLinkNet':
        model = D_LinkNet34(num_classes=num_classes, pretrained=True)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    if args.type == 'binary':
        # loss = LossBinary(jaccard_weight=args.jaccard_weight)
        loss = LossBCE_DICE()
    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    problem_type='binary'):
        return DataLoader(dataset=RoboticsDataset(file_names,
                                                  transform=transform,
                                                  problem_type=problem_type),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=torch.cuda.is_available())

    # train_file_names, val_file_names = get_split(args.fold)
    train_file_names, val_file_names = get_train_val_files()

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    train_transform = DualCompose(
        [HorizontalFlip(),
         VerticalFlip(),
         ImageOnly(Normalize())])

    val_transform = DualCompose([ImageOnly(Normalize())])

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform,
                               problem_type=args.type)
    valid_loader = make_loader(val_file_names,
                               transform=val_transform,
                               problem_type=args.type)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    if args.type == 'binary':
        valid = validation_binary
    else:
        valid = validation_multi

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=valid,
                fold=args.fold,
                num_classes=num_classes)
コード例 #27
0
ファイル: all_utils_ost.py プロジェクト: lilujunai/DST-CBC
def init(batch_size, state, split, input_sizes, sets_id, std, mean, keep_scale, reverse_channels, data_set,
         valtiny, no_aug):
    # Return data_loaders/data_loader
    # depending on whether the split is
    # 1: semi-supervised training
    # 2: fully-supervised training
    # 3: Just testing

    # Transformations (compatible with unlabeled data/pseudo labeled data)
    # ! Can't use torchvision.Transforms.Compose
    if data_set == 'voc':
        base = base_voc
        workers = 4
        transform_train = Compose(
            [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
             RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]),
             RandomCrop(size=input_sizes[0]),
             RandomHorizontalFlip(flip_prob=0.5),
             Normalize(mean=mean, std=std)])
        if no_aug:
            transform_train_pseudo = Compose(
                [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
                 Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
                 Normalize(mean=mean, std=std)])
        else:
            transform_train_pseudo = Compose(
                [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
                 RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]),
                 RandomCrop(size=input_sizes[0]),
                 RandomHorizontalFlip(flip_prob=0.5),
                 Normalize(mean=mean, std=std)])
        transform_pseudo = Compose(
            [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
             Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
             Normalize(mean=mean, std=std)])
        transform_test = Compose(
            [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
             ZeroPad(size=input_sizes[2]),
             Normalize(mean=mean, std=std)])
    elif data_set == 'city':  # All the same size (whole set is down-sampled by 2)
        base = base_city
        workers = 8
        transform_train = Compose(
            [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
             RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]),
             RandomCrop(size=input_sizes[0]),
             RandomHorizontalFlip(flip_prob=0.5),
             Normalize(mean=mean, std=std),
             LabelMap(label_id_map_city)])
        if no_aug:
            transform_train_pseudo = Compose(
                [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
                 Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
                 Normalize(mean=mean, std=std)])
        else:
            transform_train_pseudo = Compose(
                [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
                 RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]),
                 RandomCrop(size=input_sizes[0]),
                 RandomHorizontalFlip(flip_prob=0.5),
                 Normalize(mean=mean, std=std)])
        transform_pseudo = Compose(
            [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
             Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
             Normalize(mean=mean, std=std),
             LabelMap(label_id_map_city)])
        transform_test = Compose(
            [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
             Resize(size_image=input_sizes[2], size_label=input_sizes[2]),
             Normalize(mean=mean, std=std),
             LabelMap(label_id_map_city)])
    else:
        base = ''

    # Not the actual test set (i.e.validation set)
    test_set = StandardSegmentationDataset(root=base, image_set='valtiny' if valtiny else 'val',
                                           transforms=transform_test, label_state=0, data_set=data_set)
    val_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, num_workers=workers, shuffle=False)

    # Testing
    if state == 3:
        return val_loader
    else:
        # Fully-supervised training
        if state == 2:
            labeled_set = StandardSegmentationDataset(root=base, image_set=(str(split) + '_labeled_' + str(sets_id)),
                                                      transforms=transform_train, label_state=0, data_set=data_set)
            labeled_loader = torch.utils.data.DataLoader(dataset=labeled_set, batch_size=batch_size,
                                                         num_workers=workers, shuffle=True)
            return labeled_loader, val_loader

        # Semi-supervised training
        elif state == 1:
            pseudo_labeled_set = StandardSegmentationDataset(root=base, data_set=data_set,
                                                             image_set=(str(split) + '_unlabeled_' + str(sets_id)),
                                                             transforms=transform_train_pseudo, label_state=1)
            reference_set = SegmentationLabelsDataset(root=base, image_set=(str(split) + '_unlabeled_' + str(sets_id)),
                                                      data_set=data_set)
            reference_loader = torch.utils.data.DataLoader(dataset=reference_set, batch_size=batch_size,
                                                           num_workers=workers, shuffle=False)
            unlabeled_set = StandardSegmentationDataset(root=base, data_set=data_set,
                                                        image_set=(str(split) + '_unlabeled_' + str(sets_id)),
                                                        transforms=transform_pseudo, label_state=2)
            labeled_set = StandardSegmentationDataset(root=base, data_set=data_set,
                                                      image_set=(str(split) + '_labeled_' + str(sets_id)),
                                                      transforms=transform_train, label_state=0)

            unlabeled_loader = torch.utils.data.DataLoader(dataset=unlabeled_set, batch_size=batch_size,
                                                           num_workers=workers, shuffle=False)

            pseudo_labeled_loader = torch.utils.data.DataLoader(dataset=pseudo_labeled_set,
                                                                batch_size=int(batch_size / 2),
                                                                num_workers=workers, shuffle=True)
            labeled_loader = torch.utils.data.DataLoader(dataset=labeled_set,
                                                         batch_size=int(batch_size / 2),
                                                         num_workers=workers, shuffle=True)
            return labeled_loader, pseudo_labeled_loader, unlabeled_loader, val_loader, reference_loader

        else:
            # Support unsupervised learning here if that's what you want
            raise ValueError
コード例 #28
0
import utils

mask_file_names = dataset.get_file_names('/home/xingtong/CAD_models/knife1',
                                         'knife_mask')
print(mask_file_names)

color_file_names = dataset.get_file_names('/home/xingtong/CAD_models/knife1',
                                          'color_')
print(color_file_names)

img_size = 128
train_transform = DualCompose([
    Resize(size=img_size),
    HorizontalFlip(),
    VerticalFlip(),
    Normalize(normalize_mask=True),
    MaskOnly([MaskShiftScaleRotate(scale_upper=4.0),
              MaskShift(limit=50)])
])

num_workers = 4
batch_size = 6
train_loader = DataLoader(dataset=dataset.CADDataset(
    color_file_names=color_file_names,
    mask_file_names=mask_file_names,
    transform=train_transform),
                          shuffle=True,
                          num_workers=num_workers,
                          batch_size=batch_size)

root = Path('/home/xingtong/ToolTrackingData/runs/debug_CAD_knife')
コード例 #29
0
ファイル: test.py プロジェクト: lppllppl920/LumiPath
    use_previous_model = True
    batch_size = 8
    num_workers = 8
    n_epochs = 1500
    gamma = 0.99

    img_width = 768
    img_height = 768

    display_img_height = 300
    display_img_width = 300

    test_transform = DualCompose(
        [MaskLabel(),
         Resize(w=img_width, h=img_height),
         Normalize()])

    input_path = "../datasets/lumi/A/test"
    label_path = "../datasets/lumi/B/test"

    input_file_names = utils.read_lumi_filenames(input_path)
    label_file_names = utils.read_lumi_filenames(label_path)

    dataset = dataset.LumiDataset(input_filenames=input_file_names,
                                  label_filenames=label_file_names,
                                  transform=test_transform)
    loader = DataLoader(dataset=dataset,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=num_workers)
コード例 #30
0
ファイル: main_dcgan.py プロジェクト: christy4526/mri_biGAN
                                           ytickmin=0,
                                           ytinkmax=2.0)),
                    D_x=Scalar(vis,
                               'D_x',
                               opts=dict(showlegend=True,
                                         title='D_x',
                                         ytickmin=0,
                                         ytinkmax=2.0)),
                    inputs=Image3D(vis, 'inputs'),
                    fake=Image3D(vis, 'fake'),
                    valid=Image3D(vis, 'valid'),
                    outputs=Image3D(vis, 'outputs'),
                    outputs2=Image3D(vis, 'outputs2'))

    x, y = Trainset(FG)  # x = image, y=target
    transform = Compose([ToFloatTensor(), Normalize(0.5, 0.5)])
    trainset = ADNIDataset2D(FG, x, y, transform=transform)
    trainloader = DataLoader(trainset,
                             batch_size=FG.batch_size,
                             shuffle=True,
                             pin_memory=True)

    D = dcDiscriminator2D(FG).to('cuda:{}'.format(
        FG.devices[0]))  # discriminator net D(x, z)
    G = dcGenerator2D(FG.z_dim).to('cuda:{}'.format(
        FG.devices[0]))  # generator net (decoder) G(x|z)

    if FG.load_ckpt:
        D.load_state_dict(
            torch.load(os.path.join(FG.checkpoint_root, save_dir, 'D.pth')))
        G.load_state_dict(