Example #1
0
 def predict(self, fold, val_indexes):
     prefix = ('fold' + str(fold) + "_") if self.test else ""
     val_dataset = SequentialDataset(self.ds, val_indexes, stage='test', config=self.config, transforms=self.val_transforms)
     val_dl = PytorchDataLoader(val_dataset, batch_size=self.config.predict_batch_size, num_workers=self.num_workers, drop_last=False)
     model = read_model(self.folder, fold)
     pbar = tqdm.tqdm(val_dl, total=len(val_dl))
     for data in pbar:
         samples = data['image']
         # predicted = predict(model, samples, flips=self.flips)
         predicted = predict8tta(model, samples, self.config.sigmoid)
         self.process_batch(predicted, model, data, prefix=prefix)
     self.post_predict_action(prefix=prefix)
Example #2
0
    def predict(self, fold, val_indexes, device=None, num_tiles=None):

        if num_tiles == None:
            num_tiles = 1

        f = factors(num_tiles)
        X_TILE_SIZE = 512 * f[1]
        Y_TILE_SIZE = 512 * f[0]

        prefix = ('fold' + str(fold) + "_") if self.test else ""
        val_dataset = SequentialDataset(self.ds,
                                        val_indexes,
                                        stage='test',
                                        config=self.config,
                                        transforms=self.val_transforms)
        val_dl = PytorchDataLoader(val_dataset,
                                   batch_size=self.config.predict_batch_size,
                                   num_workers=self.num_workers,
                                   drop_last=False)
        model = read_model(self.folder, fold)
        pbar = tqdm.tqdm(val_dl, total=len(val_dl))

        for data in pbar:
            samples = data['image']

            predicted = np.zeros((samples.shape[0], ) + samples.shape[-2:] +
                                 (samples.shape[1], ),
                                 dtype=np.float32)

            for x in range(0, samples.shape[3], X_TILE_SIZE):
                x_max = min([samples.shape[3], x + X_TILE_SIZE])

                for y in range(0, samples.shape[2], Y_TILE_SIZE):
                    y_max = min([samples.shape[2], y + Y_TILE_SIZE])

                    p = predict8tta(model, samples[0:1, :, y:y_max, x:x_max],
                                    self.config.sigmoid)

                    predicted[0, y:y_max, x:x_max, :] = p

            self.process_batch(predicted, model, data, prefix=prefix)

        self.post_predict_action(prefix=prefix)
    def predict(self, fold, val_indexes, weight_dir, verbose=False):
        print("run eval.Evaluator.predict()...")
        prefix = "" if not fold or not self.test else ('fold' + str(fold) +
                                                       "_")
        print("prefix:", prefix)

        print("Creating datasets within pytorch_utils/eval.py()...")
        val_dataset = SequentialDataset(self.ds,
                                        val_indexes,
                                        stage='test',
                                        config=self.config,
                                        transforms=self.val_transforms)
        val_dl = PytorchDataLoader(val_dataset,
                                   batch_size=self.config.predict_batch_size,
                                   num_workers=self.num_workers,
                                   drop_last=False)
        print("len val_dl:", len(val_dl))
        #print ("weights_dir:", self.weights_dir)
        model = read_model(weight_dir)
        #model = read_model(self.config, fold)

        pbar = tqdm.tqdm(val_dl, total=len(val_dl))

        for data in pbar:
            #print ("pbar data:", data)
            samples = torch.autograd.Variable(data['image'],
                                              volatile=True).cuda()
            predicted = predict(model, samples, flips=self.flips)
            if verbose:
                print("  eval.py - Evaluator - predict() - len samples:",
                      len(samples))
                print("  eval.py - Evaluator - predict()- samples.shape:",
                      samples.shape)
                print("  eval.py - Evaluator - predict() - predicted.shape:",
                      predicted.shape)
                print("  eval.py - Evaluator - predict() - predicted.mean:",
                      predicted.mean())
                print("  eval.py - Evaluator - predict() - predicted.max:",
                      predicted.max())

            self.process_batch(predicted, model, data, prefix=prefix)
        self.post_predict_action(prefix=prefix)
Example #4
0
 def predict(self, fold, val_indexes):
     prefix = ('fold' + str(fold) +
               "_") if (self.test and fold is not None) else ""
     val_dataset = SequentialDataset(self.ds,
                                     val_indexes,
                                     stage='test',
                                     config=self.config,
                                     transforms=self.val_transforms)
     val_dl = PytorchDataLoader(val_dataset,
                                batch_size=self.config.predict_batch_size,
                                num_workers=self.num_workers,
                                drop_last=False)
     model = read_model(self.config, fold)
     pbar = tqdm.tqdm(val_dl, total=len(val_dl))
     for data in pbar:
         samples = torch.autograd.Variable(data['image'],
                                           volatile=True).cuda()
         predicted = predict(model, samples, flips=self.flips)
         self.process_batch(predicted, model, data, prefix=prefix)
     self.post_predict_action(prefix=prefix)
Example #5
0
 def predict(self, fold, val_indexes, weight_dir):
     print ("run eval.Evaluator.predict()...")
     prefix = ('fold' + str(fold) + "_") if (self.test and fold is not None) else ""
     print ("prefix:", prefix)
     print ("Creating datasets within pytorch_utils.py eval()...")
     val_dataset = SequentialDataset(self.ds, val_indexes, stage='test', config=self.config, transforms=self.val_transforms)
     val_dl = PytorchDataLoader(val_dataset, batch_size=self.config.predict_batch_size, num_workers=self.num_workers, drop_last=False)
     print ("len val_dl:", len(val_dl))
     #print ("weights_dir:", self.weights_dir)
     model = read_model(weight_dir, fold)
     #model = read_model(self.config, fold)
     #print ("glurp")
     pbar = tqdm.tqdm(val_dl, total=len(val_dl))
     #print ("tqdm pbar:", pbar)
     for data in pbar:
         #print ("pbar data:", data)
         samples = torch.autograd.Variable(data['image'], volatile=True).cuda()
         #print ("samples:", samples)
         predicted = predict(model, samples, flips=self.flips)
         self.process_batch(predicted, model, data, prefix=prefix)
     self.post_predict_action(prefix=prefix)
    def predict(self, fold, val_indexes, weight_dir, verbose=False):
        print("run eval.Evaluator.predict()...")
        prefix = ('fold' + str(fold) +
                  "_") if (self.test and fold is not None) else ""
        print("prefix:", prefix)
        print("Creating datasets within pytorch_utils/eval.py()...")
        val_dataset = SequentialDataset(self.ds,
                                        val_indexes,
                                        stage='test',
                                        config=self.config,
                                        transforms=self.val_transforms)
        val_dl = PytorchDataLoader(val_dataset,
                                   batch_size=self.config.predict_batch_size,
                                   num_workers=self.num_workers,
                                   drop_last=False)
        print("len val_dl:", len(val_dl))
        # print ("weights_dir:", self.weights_dir)
        model = read_model(weight_dir, fold)
        # model = read_model(self.config, fold)
        # print ("glurp")
        pbar = tqdm.tqdm(val_dl, total=len(val_dl))
        # print ("tqdm pbar:", pbar)
        for data in pbar:
            # print ("pbar data:", data)
            print("data['image'].shape:", data['image'].shape)
            samples = data['image'].cuda()
            predicted = predict(model, samples, flips=self.flips)
            if verbose:
                print("  eval.py -  - Evaluator - predict() - len samples:",
                      len(samples))
                print("  eval.py - Evaluator - predict()- samples.shape:",
                      samples.shape)
                print("  eval.py - Evaluator - predict() - predicted.shape:",
                      predicted.shape)
                # print("  eval.py - Evaluator - predict() - torchsummary.summary(model, (4, 3, 1344, 1344):", torchsummary.summary(model, samples.shape))

            self.process_batch(predicted, model, data, prefix=prefix)
        self.post_predict_action(prefix=prefix)
Example #7
0
    def predict(self, fold, val_indexes, weight_dir, verbose=False):
        global MOD
        global FLIPS
        global BORDER
        global PREFIX
        global SAVE_DIR
        n_threads_cpu = 12

        print("run eval.Evaluator.predict()...")
        prefix = ('fold' + str(fold) +
                  "_") if (self.test and fold is not None) else ""
        print("prefix:", prefix)
        print("Creating datasets within pytorch_utils/eval.py()...")
        if not torch.cuda.is_available():
            self.num_workers = n_threads_cpu

        val_dataset = SequentialDataset(self.ds,
                                        val_indexes,
                                        stage='test',
                                        config=self.config,
                                        transforms=self.val_transforms)
        val_dl = PytorchDataLoader(val_dataset,
                                   batch_size=self.config.predict_batch_size,
                                   num_workers=self.num_workers,
                                   drop_last=False)
        print("len val_dl:", len(val_dl))
        print("self.num_workers", self.num_workers)
        #print ("weights_dir:", self.weights_dir)
        model = read_model(weight_dir, fold)

        # set global variables
        FLIPS = self.flips
        BORDER = self.border
        SAVE_DIR = self.save_dir
        PREFIX = prefix
        MOD = model

        pbar = tqdm.tqdm(val_dl, total=len(val_dl))
        if torch.cuda.is_available():
            for data in pbar:
                samples = torch.autograd.Variable(data['image'],
                                                  volatile=True).cuda()
                predicted = predict(model, samples, flips=self.flips)
                if verbose:
                    print(
                        "  eval.py -  - Evaluator - predict() - len samples:",
                        len(samples))
                    print("  eval.py - Evaluator - predict()- samples.shape:",
                          samples.shape)
                    print(
                        "  eval.py - Evaluator - predict() - predicted.shape:",
                        predicted.shape)
                    print(
                        "  eval.py - Evaluator - predict() - data['image'].shape:",
                        data['image'].shape)
                self.process_batch(predicted, model, data, prefix=prefix)
        else:
            for data in pbar:
                samples = torch.autograd.Variable(data['image'], volatile=True)
                predicted = predict(model, samples, flips=self.flips)
                if verbose:
                    print(
                        "  eval.py -  - Evaluator - predict() - len samples:",
                        len(samples))
                    print("  eval.py - Evaluator - predict()- samples.shape:",
                          samples.shape)
                    print(
                        "  eval.py - Evaluator - predict() - predicted.shape:",
                        predicted.shape)
                    print(
                        "  eval.py - Evaluator - predict() - data['image'].shape:",
                        data['image'].shape)
                self.process_batch(predicted, model, data, prefix=prefix)
        self.post_predict_action(prefix=prefix)