示例#1
0
文件: test.py 项目: ufownl/toy_gan
def test(batch_size, seed_size, filters, context):
    mx.random.seed(int(time.time()))

    net_g = Generator(filters)
    net_g.load_parameters("model/toy_gan.generator.params", ctx=context)

    seeds = mx.nd.random_normal(shape=(batch_size, seed_size, 1, 1), ctx=context)
    imgs = net_g(seeds)
    for i in range(imgs.shape[0]):
        plt.subplot(imgs.shape[0] // 8 + 1, 8, i + 1)
        visualize(imgs[i])
    plt.show()
 def view_dataset(self, mode='train'):
     train_pairs, valid_pairs = dataset.prepare_data_CANCER()
     if mode == 'train':
         train_augmentors = self.train_augmentors()
         ds = dataset.DatasetSerial(train_pairs,
                         shape_augs=iaa.Sequential(train_augmentors[0]),
                         input_augs=iaa.Sequential(train_augmentors[1]))
     else:
         infer_augmentors = self.infer_augmentors()
         ds = dataset.DatasetSerial(valid_pairs,
                         shape_augs=iaa.Sequential(infer_augmentors))
     dataset.visualize(ds, 1)
     return
示例#3
0
 def view_dataset(self, mode='train'):
     train_pairs, valid_pairs = getattr(dataset, ('prepare_%s_data' % self.dataset))()
     if mode == 'train':
         train_augmentors = self.train_augmentors()
         ds = dataset.DatasetSerial(train_pairs, has_aux=False,
                                    shape_augs=iaa.Sequential(train_augmentors[0]),
                                    input_augs=iaa.Sequential(train_augmentors[1]))
     else:
         infer_augmentors = self.infer_augmentors()  # HACK
         ds = dataset.DatasetSerial(valid_pairs, has_aux=False,
                                    shape_augs=iaa.Sequential(infer_augmentors)[0])
     dataset.visualize(ds, 4)
     return
示例#4
0
 def view_dataset(self, mode='train'):
     train_pairs, valid_pairs = dataset.prepare_PAIP2020_PANDA(
         self.fold_idx)
     if mode == 'train':
         train_augmentors = self.train_augmentors()
         ds = dataset.DatasetSerial(train_pairs, self.tile_size,
                                    self.num_tile, True)
     else:
         infer_augmentors = self.infer_augmentors()  # HACK
         ds = dataset.DatasetSerial(valid_pairs, self.tile_size,
                                    self.num_tile, False)
     dataset.visualize(ds, 1)
     return
示例#5
0
def detect_plate(wpod, vocab, ocr, raw, dims, threshold, plt_hw, beam,
                 beam_size, context):
    h = raw.shape[0]
    w = raw.shape[1]
    f = min(288 * max(h, w) / min(h, w), 608) / min(h, w)
    ts = time.time()
    img = mx.image.imresize(raw,
                            int(w * f) + (0 if w % 16 == 0 else 16 - w % 16),
                            int(h * f) + (0 if h % 16 == 0 else 16 - h % 16))
    x = color_normalize(img).transpose((2, 0, 1)).expand_dims(0)
    y = wpod(x.as_in_context(context))
    probs = y[0, :, :, 0]
    affines = y[0, :, :, 2:]
    labels = plate_labels(img, probs, affines, dims, 16, threshold)
    plates = reconstruct_plates(raw, [pts for pts, _ in labels],
                                (plt_hw[1], plt_hw[0]))
    print("wpod profiling: %f" % (time.time() - ts))
    plt.subplot(math.ceil((len(plates) + 2) / 2), 2, 1)
    visualize(img, [(pts.reshape((-1)).asnumpy().tolist(), str(prob))
                    for pts, prob in labels])
    plt.subplot(math.ceil((len(plates) + 2) / 2), 2, 2)
    visualize(probs > threshold)
    for i, plate in enumerate(plates):
        plt.subplot(math.ceil((len(plates) + 2) / 2), 2, i + 3)
        visualize(plate)
        print("plate[%d]:" % i)
        recognize_plate(vocab, ocr, plate, beam, beam_size, context)
    plt.show()
示例#6
0
def test(dataset, batch_size, filters, context):
    datasets = {
        "facades": True,
        "cityscapes": False,
        "maps": False,
        "edges2shoes": False,
        "edges2handbags": False
    }
    mx.random.seed(int(time.time()))

    print("Loading dataset...", flush=True)
    validating_set = load_dataset(dataset,
                                  "val",
                                  batch_size,
                                  is_reversed=datasets[dataset])

    net_g = UnetGenerator(3, filters)
    net_g.load_parameters("model/{}.generator.params".format(dataset),
                          ctx=context)

    print("Testing...", flush=True)
    for batch in validating_set:
        real_in = batch.data[0].as_in_context(context)
        real_out = batch.data[1].as_in_context(context)
        fake_out = net_g(real_in)

        for i in range(batch_size):
            plt.subplot(3, batch_size, i + 1)
            visualize(real_in[i])
            plt.subplot(3, batch_size, i + batch_size + 1)
            visualize(real_out[i])
            plt.subplot(3, batch_size, i + batch_size * 2 + 1)
            visualize(fake_out[i])
        plt.show()
示例#7
0
        return A.Compose(
            train_transform,
            p=base_proba,
        )

    def get_validation_augmentation(self):
        test_transform = []
        return A.Compose(test_transform)

    def get_preprocessing(self, preprocessing_fn):
        _transform = [A.Lambda(image=preprocessing_fn)]
        return A.Compose(_transform)


if __name__ == "__main__":
    from dataset import Dataset, visualize

    data_path = "data"
    dataset = Dataset(
        images_dir=os.path.join(data_path, "test"),
        masks_dir=os.path.join(data_path, "train_annotations"),
        classes=["bulk"],
        augmentations=Augmentor.get_training_augmentation(),
        preprocessing=None,
    )

    image, mask = dataset[5]

    visualize(image=image, bulk_mask=mask[..., 0].squeeze())
示例#8
0
    def predict_dataset(
        self,
        dataset: Dataset,
        output_folder: str = None,
        if_blobs: bool = False,
        review: bool = False,
        binary_threshold: float = None,
    ):
        elements_count = len(dataset)
        for i in range(elements_count):
            image, mask_true = dataset[i]

            # mask_pred is an np array of shape (256, 256, 1)
            # with values in range(0,1)
            mask_pred = (self.predict(image) * 255).astype(np.uint8)

            # binary mask if needed
            if binary_threshold:
                mask_pred = ((mask_pred > binary_threshold) * 255).astype(
                    np.uint8)

            # find blobs on prediction
            if if_blobs:
                detector = SkimageBlobDetector(images=None)
                # detector = OpenCVBlobDetector(images=None)
                try:
                    with open(f"best_blob_params_{detector.name}.pickle",
                              "rb") as f:
                        ext_params = pickle.load(f)

                except Exception as e:
                    print(e)
                    ext_params = {}

                try:
                    noise_threshold = ext_params.pop("noise_threshold")
                    # filter little dark gray noise
                    mask_pred[mask_pred < noise_threshold] = 0

                    min_radius = ext_params.pop("min_radius")
                    max_radius = ext_params.pop("max_radius")
                except Exception as e:
                    print("Params not found", e)
                    min_radius = 0
                    max_radius = np.inf

                # invert colors only for detector
                keypoints = self.detect_blobs(
                    image=255 -
                    mask_pred if detector.name == "opencv" else mask_pred,
                    detector=detector,
                    ext_params=ext_params,
                )

                keypoints_filtered = detector.filter_keypoints(
                    keypoints=keypoints,
                    min_radius=min_radius,
                    max_radius=max_radius,
                )

                mask_pred = detector.draw_blobs(
                    image=mask_pred,
                    keypoints=keypoints_filtered,
                )

            # obtaining filename for saving if needed
            (
                base_name,
                file_format,
            ) = os.path.split(dataset.masks_fps[i])[-1].split(".")

            save_name = os.path.join(
                output_folder if output_folder else "",
                f"{base_name}"
                f"{'_' + str(i) if self.if_crop else ''}"
                f"{'_' + str(len(keypoints_filtered)) if if_blobs else ''}"
                f"{'_blobs' if if_blobs else ''}."
                f"{file_format}",
            )

            print(save_name)

            # mode for saving image
            if review:
                visualize(
                    save_name=save_name,
                    image=denormalize(image.squeeze()),
                    mask_true=mask_true,
                    mask_pred=mask_pred,
                )
            else:
                self.save_image(
                    mask_pred,
                    filename=save_name,
                )
示例#9
0
testset = NuclieTestDataset(test_data_dir)
test_loader = DataLoader(dataset=testset,
                         drop_last=False,
                         sampler=SequentialSampler(testset),
                         batch_size=1)

model = model.cuda()
with torch.no_grad():
    for step, (image) in enumerate(test_loader):
        print(image.shape)
        imgshow = image.clone()
        imgshow = imgshow.squeeze()
        imgshow = imgshow.permute(1, 2, 0).numpy()
        imgshow = imgshow.astype(int)
        visualize(imgshow)
        image = image.cuda()
        pred_mask_1 = model.forward(image)
        print(pred_mask_1.shape)

        mask_img_1 = mask_convert(pred_mask_1)
        ### round values ###
        #mask_img_1 = np.round(mask_img_1)
        #mask_img_2 = np.round(mask_img_2)
        mask_img_1 = cv.normalize(mask_img_1, None, 0, 1, cv.NORM_MINMAX)
        ### normalize values to 0-1 ###
        #print(mask_img_1)
        mask_img_1 = np.round(mask_img_1)
        visualize(mask_img_1)
        break
示例#10
0
linear_svm_model = svm.LinearSVC()
linear_svm_model.fit(features, labels)

test_data = dataset.get_test_data()
features, labels = dataset.get_imgs_feature(test_data, hog, hog_params)
results = linear_svm_model.predict(features)

util.precision_and_recall_evaluate(results, labels)

pos, neg = dataset.get_predict_data()
for i in range(5):
    rects = util.detect_person(pos[i], hog, hog_params, linear_svm_model)
    rects = util.non_max_suppression_fast(np.array(rects), 0.5)
    print(pos[i])
    print(rects)
    dataset.visualize(pos[i], rects)

# # imgs = []
# # img_paths = []
# rects = util.detect_person(
#     'dataset/test/pos/crop_000001.jpg', hog, hog_params, linear_svm_model)
# print(np.array(rects))
# rect = util.non_max_suppression_fast(np.array(rects), 0.5)
# print(rect)
# # images = glob.glob('inria/sliding/*.jpg')
# # for image in images:
# #     feature = hog.compute(cv2.imread(image),winStride,padding,locations)
# #     feature = feature.reshape(-1)
# #     imgs.append(feature)
# #     img_paths.append(image)