예제 #1
0
 def get_minibatch(self,
                   list_used,
                   drawing_per_char=20,
                   mb_dim=32):
     mb_x_i = np.zeros((mb_dim, self.n_samples, self.x_dim, self.x_dim, 1))
     mb_y_i = np.zeros((mb_dim, self.n_samples), dtype=np.int)
     mb_x_hat = np.zeros((mb_dim, self.x_dim, self.x_dim, 1))
     mb_y_hat = np.zeros((mb_dim,), dtype=np.int)
     for i in range(mb_dim):
         ind = 0
         pinds = np.random.permutation(self.n_samples)
         class_indexes = np.random.choice(list_used.shape[0], self.y_dim, False)
         x_hat_class = np.random.randint(self.y_dim)
         for j, cur_class in enumerate(class_indexes): #each class
             example_inds = np.random.choice(drawing_per_char, self.n_samples_per_class, False)
             drawing_list = np.sort(glob.glob(os.path.join(list_used[cur_class][0], '*.png')))
             for eind in example_inds:
                 cur_data = transform(get_image(drawing_list[eind]),
                                      105, 105, resize_height=self.x_dim, resize_width=self.x_dim, crop=False)
                 mb_x_i[i, pinds[ind], :, :, 0] = np.rot90(cur_data, np.random.randint(4))
                 mb_y_i[i, pinds[ind]] = j
                 ind +=1
             if j == x_hat_class:
                 eval_idx = np.random.choice(drawing_per_char, 1, False)[0]
                 cur_data = transform(get_image(drawing_list[eval_idx]),
                                      105, 105, resize_height=self.x_dim, resize_width=self.x_dim, crop=False)
                 mb_x_hat[i, :, :, 0] = np.rot90(cur_data, np.random.randint(4))
                 mb_y_hat[i] = j
     return mb_x_i, mb_y_i, mb_x_hat, mb_y_hat
예제 #2
0
    def __getitem__(self, index):

        key = self.samples[self.idx[index]]
        image, seg = self.dataset[key[0]]["slices"][str(key[1])]
        label = self.labels[index]

        image = Image.fromarray(image)

        # Make the segmentation the entire image if it isn't in masks_selector.
        if not self.masks_selector[index]:
            seg = np.ones(seg.shape)

        # Make the segmentation the entire image if it is the negative class.
        if int(label) == 0:
            seg = np.ones(seg.shape)

        # If there is a segmentation, blur it a bit.
        if (self.blur > 0) and (seg.max() != 0):
            seg = skimage.filters.gaussian(seg, self.blur)
            seg = seg / seg.max()

        seg = (seg > 0) * 1.
        seg = Image.fromarray(seg)

        if self.mode == "train":
            image, seg = transform(image, seg, True, self.new_size)
        else:
            image, seg = transform(image, seg, False, self.new_size)

        # Control condition where we mask all data. Used to see if traditional
        # training works.
        if self.mask_all:
            image *= seg

        return (image, seg), int(label), self.masks_selector[index]
예제 #3
0
def crop(img, center, scale, res, rot=0):
    img = im_to_numpy(img)

    # Preprocessing for efficient cropping
    ht, wd = img.shape[0], img.shape[1]
    sf = scale * 200.0 / res[0]
    if sf < 2:
        sf = 1
    else:
        new_size = int(np.math.floor(max(ht, wd) / sf))
        new_ht = int(np.math.floor(ht / sf))
        new_wd = int(np.math.floor(wd / sf))
        if new_size < 2:
            return torch.zeros(res[0], res[1], img.shape[2]) \
                if len(img.shape) > 2 else torch.zeros(res[0], res[1])
        else:
            img = skimage.transform.resize(img, [new_ht, new_wd])
            center = center * 1.0 / sf
            scale = scale / sf

    # print(scale)

    # Upper left point
    ul = np.array(transform([0, 0], center, scale, res, invert=1))
    # Bottom right point
    br = np.array(transform(res, center, scale, res, invert=1))

    # print([ul, br])

    # Padding so that when rotated proper amount of context is included
    pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
    if not rot == 0:
        ul -= pad
        br += pad

    new_shape = [br[1] - ul[1], br[0] - ul[0]]
    if len(img.shape) > 2:
        new_shape += [img.shape[2]]
    new_img = np.zeros(new_shape)

    # Range to fill new array
    new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
    new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
    # Range to sample from original image
    old_x = max(0, ul[0]), min(len(img[0]), br[0])
    old_y = max(0, ul[1]), min(len(img), br[1])
    new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]

    if not rot == 0:
        # Remove padding
        new_img = scipy.misc.imrotate(new_img, rot)
        new_img = new_img[pad:-pad, pad:-pad]

    new_img = im_to_torch(skimage.transform.resize(new_img, res))
    return new_img
예제 #4
0
 def __getitem__(self, idx):
     # str.split(' ')表示以空格分割字符串
     image_path = self.root_dir + self.files_list[idx].split(' ')[0]
     if not os.path.isfile(image_path):
         print(image_path + 'does not exist!')
         return None
     image = io.imread(image_path)  # use skitimage
     label = int(self.files_list[idx].split(' ')[1])
     if self.transform:
         transform(image)
     return image, label
예제 #5
0
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
                        target_width):
    transform = torchvision.transforms.Pad(
        (offset_width, offset_height, 0, 0))  #left,top,right,bottom
    image = transform(image)
    (x, y) = image.size
    if target_height > y:
        transform = torchvision.transforms.Pad((0, 0, 0, target_height - y))
        image = transform(image)
    if target_width > x:
        transform = torchvision.transforms.Pad((0, 0, target_width - x, 0))
        image = transform(image)
    return image
 def get_picture(self, pic_name, transform):
     img = skimage.io.imread(pic_name)
     img = skimage.transform.resize(img, (256, 256))
     img = np.asarray(img, dtype=np.float32)
     # skimage.io.imshow(img)
     # plt.show()
     return transform(img)
예제 #7
0
    def __getitem__(self, index):

        video = os.path.join(self.video_root, self.num_video[index])
        frames_list = sorted(listdir(video))
        # print(len(frames_list))

        # df = pd.read_csv(os.path.join(self.csv_root, self.num_csv[index]),header=None,squeeze=True)

        # df = torch.tensor(df.values.tolist())

        frames = []
        labels = []
        for img in frames_list:

            # image = np.load(os.path.join(video,img))
            # image = torch.from_numpy(image)
            image = io.imread(os.path.join(video, img))
            image = transform(image)
            image = image.unsqueeze(dim=0)
            image = vgg_feature(image.cuda()).view(-1)
            # print(image.size())
            frames.append(image)

        frames = torch.stack(frames).cuda()
        # frames = vgg_feature(frames)
        # print("frames",frames.size())
        print("num_video", self.num_video[index])
        return self.num_video[index], frames
예제 #8
0
def main():
    # initialise runner
    logging.info('==> Initializing ModelInferRunner ...')
    runner = ModelInferRunner(args)

    # load model
    logging.info('==> Loading model ...')
    model = runner.load_model(
        update_model_fn=create_update_model_fn(),
        update_state_dict_fn=create_update_state_dict_fn())
    model.eval()

    logging.info('==> Loading an image from {} ...'.format(args.image))
    image = Image.open(args.image)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ])

    x = transform(image)
    y = model.forward(x.view([1, *x.shape]))
    label = torch.argmax(F.softmax(y, dim=1), dim=1)[0]

    print('Predicted label index: {}'.format(label))
예제 #9
0
def pad_to_bounding_box(image, offset_y, offset_x, target_y, target_x):
    tensor2PIL = transforms.ToPILImage()
    PIL2tensor = transforms.ToTensor()
    transform = torchvision.transforms.Pad((offset_x, offset_y, 0, 0),
                                           fill=0)  #left,top,right,bottom
    image = tensor2PIL(image)
    image = transform(image)
    (x, y) = image.size
    if target_y > y:
        transform = torchvision.transforms.Pad((0, 0, 0, target_y - y))
        image = transform(image)
    if target_x > x:
        transform = torchvision.transforms.Pad((0, 0, target_x - x, 0))
        image = transform(image)
    image = PIL2tensor(image)
    return image
예제 #10
0
def get_patch(path, size):
    patch = Image.open(path).convert('RGB')
    #patch.show()

    patch = transform(patch, size)

    return patch
예제 #11
0
def stretch_img(data, contrast=0.25):
    """ Apply z-scale stretch to image """

    transform = ZScaleInterval(contrast=contrast)
    data_stretched = transform(data)

    return data_stretched
예제 #12
0
def getImageTensor(rad_jimg):
    paths = rad_jimg['file_paths']  # radiology

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    transform = transforms.Compose([normalize])

    # IMAGE
    image_tensor = torch.zeros(len(paths), 3, 224, 224).to(device)

    for i, path in enumerate(paths):
        #image = Image.open(path).convert('RGB')
        # Read image and process
        img = imread(path)
        if len(img.shape) == 2:
            img = img[:, :, np.newaxis]
            img = np.concatenate([img, img, img], axis=2)
        img = resize(img, (224, 224))
        img = img.transpose(2, 0, 1)
        img = img / 255.
        img = torch.FloatTensor(img).to(device)
        if transform is not None:
            img = transform(img)
        image_tensor[i] = img
    return image_tensor
예제 #13
0
    def __getitem__(self, i):
        # Read image
        image = Image.open(self.images[i], mode='r')
        image = image.convert('RGB')

        # Read objects in this image (bounding boxes, labels, difficulties)
        objects = self.objects[i]
        boxes = torch.FloatTensor(objects['boxes'])  # (n_objects, 4)
        labels = torch.LongTensor(objects['labels'])  # (n_objects)
        difficulties = torch.ByteTensor(objects['difficulties'])  # (n_objects)

        # Discard difficult objects, if desired
        if not self.keep_difficult:
            boxes = boxes[1 - difficulties]
            labels = labels[1 - difficulties]
            difficulties = difficulties[1 - difficulties]

        # Apply transformations
        image, boxes, labels, difficulties = transform(image,
                                                       boxes,
                                                       labels,
                                                       difficulties,
                                                       split=self.split)

        return image, boxes, labels, difficulties
예제 #14
0
    def __getitem__(self, index):
        ellipse_name = re.search(self.ELLIPSE_MATCH_PATTERN,
                                 self._images[index])
        if ellipse_name is None:
            return None
        ellipse_name = ellipse_name.group()
        ellipse_image = skimage.io.imread(
            os.path.join(self._root_dir, 'pics', self._images[index]))
        ellipse_metadata_match = [
            metadata_entry for metadata_entry in self._metadata
            if re.search(ellipse_name, metadata_entry) is not None
        ]
        assert len(ellipse_metadata_match) == 1
        ellipse_metadata_strings = open(
            os.path.join(self._root_dir, 'T',
                         ellipse_metadata_match[0])).readlines()
        ellipse_metadata = torch.Tensor(
            [float(line) for line in ellipse_metadata_strings])
        print("Loading photo . . . ")
        print(ellipse_name)
        transform = torchvision.transforms.ToTensor()
        ellipse_image = transform(ellipse_image)

        return {
            'image_tensor': ellipse_image,
            'metadata_tensor': ellipse_metadata,
        }
예제 #15
0
def loadjson(path, root_dir='./', transform=None):
    load_f = open(path, 'r')
    load_dict = json.load(load_f)
    img_name = load_dict['imgName']
    img_name = os.path.join(root_dir, img_name)
    image = io.imread(img_name)
    if transform:
        image = transform(image)
    d1, d2, d3 = image.shape
    image = image.reshape([1, d1, d2, d3])
    objs = load_dict['objs']
    annotations = {'labels': np.empty((0, )), 'bboxes': np.empty((0, 4))}
    for i in range(len(objs)):
        annotations['labels'] = np.concatenate(
            [annotations['labels'], [label_dic[objs[i]['label']]]], axis=0)
        annotations['bboxes'] = np.concatenate([
            annotations['bboxes'],
            [[
                objs[i]['xmin'],
                objs[i]['ymin'],
                objs[i]['xmax'],
                objs[i]['ymax'],
            ]]
        ],
                                               axis=0)
    sample = {'image': image, 'annotations': annotations}
    return sample
예제 #16
0
    def __getitem__(self, idx):
        if torch.is_tensor(idx):
            idx = idx.tolist()
        
        # get name of current image
        img_name = self.mandelbrot_frame.iloc[idx, 0].split('_')
        full_name = os.path.join(
            self.root_dir,
            img_name[0],
            img_name[1] + '.jpg'
            )
        
        # load current image
        image = io.imread(full_name)
        coords = self.mandelbrot_frame.iloc[idx, 1:]
        frame = np.asarray([coords])
        frame = frame.astype('float').reshape(-1, 2)
        
        sample = {'image': image, 'coords': coords}
        
        if self.transform:
            sample = transform(sample)

        #print(f'TYPE of sample: {type(sample)}')
        #print(f'TYPE of frame: {type(frame)}')
            
        return sample
예제 #17
0
def augmented_sliding_window(patches, flip=False, mirror=False, rotations=[]):
    transformed_patches = []

    for patch in patches:
        transformed_patches.extend(transform(patch, flip, mirror, rotations))

    return transformed_patches
예제 #18
0
def preprocess_image(image):
    '''
	Preprocesses the image to load into the prebuilt network.

	[input]
	* image: numpy.ndarray of shape (H,W,3)

	[output]
	* image_processed: torch.Tensor of shape (3,H,W)
	'''
    # ----- TODO -----
    #image = image.astype('float')
    image = skimage.transform.resize(image, (224, 224))
    h, w, channel = np.shape(image)
    if channel == 1:  # grey
        image = np.matlib.repmat(image, 1, 1, 3)
    if channel == 4:  # special case
        image = image[:, :, 0:3]
    #print(image)
    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.485, 0.456, 0.406),
                                         (0.229, 0.224, 0.225))
    ])
    image = transform(image)
    image = image.unsqueeze(0)
    return (image)
예제 #19
0
def get_imgs(img_path, imsize, bbox=None, transform=None, normalize=None):
    img = Image.open(img_path).convert('RGB')
    width, height = img.size
    if bbox is not None:
        r = int(np.maximum(bbox[2], bbox[3]) * 0.75)
        center_x = int((2 * bbox[0] + bbox[2]) / 2)
        center_y = int((2 * bbox[1] + bbox[3]) / 2)
        y1 = np.maximum(0, center_y - r)
        y2 = np.minimum(height, center_y + r)
        x1 = np.maximum(0, center_x - r)
        x2 = np.minimum(width, center_x + r)
        img = img.crop([x1, y1, x2, y2])

    if transform is not None:
        img = transform(img)

    ret = []
    if cfg.GAN.B_DCGAN:
        ret = [normalize(img)]
    else:
        for i in range(cfg.TREE.BRANCH_NUM):
            # print(imsize[i])
            if i < (cfg.TREE.BRANCH_NUM - 1):
                re_img = transforms.Scale(imsize[i])(img)
            else:
                re_img = img
            ret.append(normalize(re_img))

    return ret
예제 #20
0
    def __getitem__(self, idx):
        row = self.df.iloc[idx].to_numpy()  # get row out of big csv

        # getting image
        img_name = row[0]
        image = io.imread(img_name)

        # getting ground truth label
        index = row[1]
        label_name = img_name.replace('image_2',
                                      'label_2').replace('png', 'txt')
        label = ''
        with open(label_name, 'r') as f:
            lines = f.readlines()
            if index >= len(lines):
                # bad label bc yolo found more objects than reality
                label = "Pedestrian 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00"
            else:
                label = lines[index]
        label = label.split()
        label[0] = self.types[label[0]]
        label = [float(x) for x in label]
        label = torch.Tensor(label)

        # getting yolo outputs
        yolo_outputs = row[2]
        yolo_outputs = yolo_outputs.split(',')
        yolo_outputs = [float(x) for x in yolo_outputs]
        yolo_outputs = yolo_outputs[:-1]
        yolo_outputs = torch.Tensor(yolo_outputs)

        if self.transform:
            image = transform(image)

        return image, yolo_outputs, label
예제 #21
0
파일: detect.py 프로젝트: zino974/rembg
def preprocess(image):
    label_3 = np.zeros(image.shape)
    label = np.zeros(label_3.shape[0:2])

    if 3 == len(label_3.shape):
        label = label_3[:, :, 0]
    elif 2 == len(label_3.shape):
        label = label_3

    if 3 == len(image.shape) and 2 == len(label.shape):
        label = label[:, :, np.newaxis]
    elif 2 == len(image.shape) and 2 == len(label.shape):
        image = image[:, :, np.newaxis]
        label = label[:, :, np.newaxis]

    transform = transforms.Compose(
        [data_loader.RescaleT(320),
         data_loader.ToTensorLab(flag=0)])
    sample = transform({
        "imidx": np.array([0]),
        "image": image,
        "label": label
    })

    return sample
예제 #22
0
def applyTransforms(img):
    img = np.array(img)
    allTransforms = [
        [colorPrecisionReduction], [jpegNoise], [swirl], [fftPerturbation],
        [alterHSV, alterXYZ, alterLAB, alterYUV],
        [
            greyScaleMix, greyScalePartialMix, greyScaleMixTwoThirds,
            oneChannelPartialGrey
        ], [gaussianBlur, chambolleDenoising, nonlocalMeansDenoising]
    ]
    numTransforms = random.randint(0, 5)

    id = str(img[0][0])
    # savedImage = Image.fromarray(np.uint8(img), 'RGB')
    # savedImage.save("sample_data/transform" + str(id) + str(0) + ".png")

    #print("Original.")
    img = img / 255.0

    #for i in range(numTransforms):
    for i in range(numTransforms):
        transformGroup = random.choice(allTransforms)
        transform = random.choice(transformGroup)
        #transform = alterHSV
        # print(img)

        img = transform(img)

        # savedImage = Image.fromarray(np.uint8(img * 255.0), 'RGB')
        # savedImage.save("sample_data/transform" + str(id) + str(i + 1) + str(transform) + ".png")
        allTransforms.remove(transformGroup)

    return torch.from_numpy(np.swapaxes(img, 0, 2)).float()
	def __getitem__(self, idx):

		if self.train:
			s = str(int(idx)+4000).zfill(5)
			img_name = os.path.join(self.root_dir,
			                        'train_images_128x128/train_{}.png'.format(s))
			image = pil_loader(img_name)
			img_name = os.path.join(self.root_dir,
			                        'train_images_64x64/train_{}.png'.format(s))
			image64 = pil_loader(img_name)
			if self.transform:
				if random.random() > 0.5:
					image = TF.hflip(image)
					image64 = TF.hflip(image64)
				if random.random() > 0.5:
					image = TF.vflip(image)
					image64 = TF.vflip(image64)
				image = self.transform(image)
				image64 = self.transform(image64)# Random horizontal flipping
			sample = {'img128': image, 'img64': image64, 'img_name':s}

		else:
			s = str(int(idx)+1).zfill(5)
			img_name = os.path.join(self.root_dir,
			                        'test_images_64x64/test_{}.png'.format(s))
			image64 = pil_loader(img_name)
			transform = transforms.Compose([
					transforms.Grayscale(),
					transforms.ToTensor()
			])
			if self.transform:
				image64 = transform(image64)
			sample = {'img64': image64, 'img_name':s}

		return sample
예제 #24
0
def preprocess(image):
    #print('Start image preprocess')
    label_3 = np.zeros(image.shape)
    label = np.zeros(label_3.shape[0:2])

    if (3 == len(label_3.shape)):
        label = label_3[:, :, 0]
    elif (2 == len(label_3.shape)):
        label = label_3

    if (3 == len(image.shape) and 2 == len(label.shape)):
        label = label[:, :, np.newaxis]
    elif (2 == len(image.shape) and 2 == len(label.shape)):
        image = image[:, :, np.newaxis]
        label = label[:, :, np.newaxis]

    transform = transforms.Compose([RescaleT(320), ToTensorLab(flag=0)])
    sample = transform({
        'imidx': np.array([0]),
        'image': image,
        'label': label
    })
    #print('Preprocess completed')

    return sample
예제 #25
0
def CNN_features_1(encoder, image_path):
    img = imread(image_path)
    if len(img.shape) == 2:
        img = img[:, :, np.newaxis]
        img = np.concatenate([img, img, img], axis=2)
    img = imresize(img, (256, 256))
    img = img.transpose(2, 0, 1)
    img = img / 255.
    img = torch.FloatTensor(img).to(device)
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    transform = transforms.Compose([normalize])
    image = transform(img)  # (3, 256, 256)

    # Encode
    image = image.unsqueeze(0)  # (1, 3, 256, 256)
    encoder_out = encoder(
        image)  # (1, enc_image_size, enc_image_size, encoder_dim)
    enc_image_size = encoder_out.size(1)
    encoder_dim = encoder_out.size(3)

    # Flatten encoding
    encoder_out = encoder_out.view(1, -1,
                                   encoder_dim)  # (1, num_pixels, encoder_dim)
    num_pixels = encoder_out.size(1)

    return encoder_out[0, :, :]
예제 #26
0
def image_to_tensor(image, image_size=64):

    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize(image_size),
    	torchvision.transforms.ToTensor()
    ])
    return transform(image).unsqueeze(0)
예제 #27
0
    def __getitem__(self, index):
        img_name = self.data[index]['name']
        bbox = self.data[index]['bbox']
        image = Image.open(os.path.join(self.img_dir, img_name))

        if self.crop == True:
            region = crop_data_of_img(bbox)
            image = image.crop(tuple(region[0:4]))

        if self.mode == 'test':
            trans_crop = transforms.CenterCrop([54, 54])
        else:
            trans_crop = transforms.RandomCrop([54, 54])

        transform = transforms.Compose([
            transforms.Resize([64, 64]), trans_crop,
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
        image = transform(image)
        length = len(bbox)
        digits = [10 for i in range(5)]
        # print('img_name : {}'.format(os.path.join(self.img_dir,img_name)))
        for index in range(length):
            if index >= 5:
                break
            # print('index : {}'.format(index) )
            digits[index] = bbox[index][-1]
        return image, length, digits
예제 #28
0
def preprocess_image(image):
    '''
	Preprocesses the image to load into the prebuilt network.

	[input]
	* image: numpy.ndarray of shape (H,W,3)

	[output]
	* image_processed: torch.Tensor of shape (3,H,W)
	'''

    image = skimage.transform.resize(image, (224, 224))
    c = image.shape[2]
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)

    if c == 1:
        image = np.matlib.repmat(image, 1, 1, 3)
    if c == 4:  # Weird, but found this error while processing
        image = image[:, :, 0:3]

    transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
                                                torchvision.transforms.Normalize(mean=mean, std=std)])
    image = transform(image)
    image = image.unsqueeze(0)

    return image
예제 #29
0
 def translate_coords(self, bias, y, x):
     pattern_shape = self._raw_pattern.shape
     ysize, xsize = pattern_shape
     transform, lyscale, lxscale = bias
     lyscale *= ysize
     lxscale *= xsize
     x, y = transform([[x / lxscale, y / lyscale]])
     return y * lyscale, x * lxscale
예제 #30
0
    def __getitem__(self, index):
        
#         start = time.time()
        
        with h5py.File(self.filename, "r") as f:
    
            key = self.samples[self.idx[index]] # self.samples는 ('liver_0.nii.gz', '123')이 담겨있음. 환자와 그 환자의 몇번째 slice인지
            image, seg = [], []
            for num in range(self.depth):
    #             print(str(key[1]+num))
                image_tmp, seg_tmp = f[key[0]]["slices"][str(key[1]+num)] # self.dataset는 hdf5 읽을 때 쓰는 f와 같음
                image.append(image_tmp)
                seg.append(seg_tmp)
            image = np.stack(image, axis=2)
            seg = np.stack(seg, axis=2).mean(2, keepdims=True)
            label = self.labels[index]
            use_mask = key[2]

    #         print(image.shape, seg.shape)

            # Make the segmentation the entire image if it isn't in masks_selector.
    #         if not self.masks_selector[index]:
    #             seg = np.zeros(seg.shape)

            # Make the segmentation the entire image if it is the negative class.
    #         if int(label) == 0:
    #             seg = np.ones(seg.shape)

            # If there is a segmentation, blur it a bit.
    #         if (self.blur > 0) and (seg.max() != 0):
    #             seg = skimage.filters.gaussian(seg, self.blur)
    #             seg = seg / seg.max()

    #         seg = (seg > 0) * 1.

    #         print(time.time() - start, '1')
    #         start = time.time()
            
            # data가 synthetic일 때는 flipping을 하면 안되므로 is_train을 False로 한다
            if self.mode == "train" and 'synthetic' not in self.dataroot:
                image, seg = transform(image, seg, True, self.new_size, self.mask_size, self.aug)
            else:
                image, seg = transform(image, seg, False, self.new_size, self.mask_size, self.aug)

    #         print(time.time() - start, '2')
        return (image, seg), int(label), use_mask
예제 #31
0
def throw_party(out_file, img):
    """Make a party img. Might overwrite `img`!"""
    offsets = [0, 2, 4, 2, 0, -2, -4, -2]
    images = []
    NUM_CHANNELS = 3

    if len(img.shape) == 3 and img.shape[2] > 3:
        img = img[:, :, :3]

    for i in range(16):
        offset = offsets[i % len(offsets)]
        c = i % NUM_CHANNELS
        images.append(transform(img, emphasize_channel=c, offset=offset))
    writeGif(out_file, images, loops=float('inf'))
예제 #32
0
def throw_party_in_memory(img: np.array) -> bytes:
    """Make `img` party and return it as bytes."""

    offsets_x = OFFSET_MULTIPLIER * np.sin(np.arange(-np.pi, np.pi, OFFSET_STEP))
    offsets_y = OFFSET_MULTIPLIER * np.cos(np.arange(-np.pi, np.pi, OFFSET_STEP))

    images = []
    NUM_CHANNELS = 3

    if len(img.shape) == 3 and img.shape[2] > 3:
        img = img[:, :, :3]

    for i, (offset_x, offset_y) in enumerate(zip(offsets_x, offsets_y)):
        c = i % NUM_CHANNELS
        images.append(
            transform(img, emphasize_channel=c, offset_x=offset_x, offset_y=offset_y)
        )
    return imageio.mimwrite(imageio.RETURN_BYTES, images, format="gif", fps=FPS)
예제 #33
0
ctswarp_names = ['Reference', 'Rotate 90', 'Rotate 45', 'Dilation',
                 'Erosion', 'Y Reflection', 'X Shift', 'Row Shuffle',
                 'Sin()', 'Cosine', 'Rand.', 'Gauss',
                 'Rand. Affine', 'Gauss Noise','More Noise']

warp_continuous( sine )

# Call Metrics on list of test patterns

structural_sim( ctswarp_data )
reg_mse( ctswarp_data )
procrustes_analysis( ctswarp_data )
make_quadrants( ctswarp_data )
imse(ctswarp_data)
cw_ssim_value(ctswarp_data, 30)
transform( ctswarp_data )
disccost( ctswarp_data )

# Zip names, data, metrics, quadrants into a mega list!
# Generally this is indavisable because it relies on index locations...in the next cell we will make a dictionary.
cts_zip = zip(ctswarp_names, ctswarp_data, mse_vals, ssim_vals, disp_vals, top_lefts, top_rights, low_lefts, low_rights, 
               imse_vals, imse_maps, mse_maps, ssim_maps, cw_ssim_vals, cw_ssim_maps, mag_maps, freqs, dct_maps, dct_curves )


# In[15]:

continuous_dict = defaultdict(dict)

# Making a look up dictionary from all the patterns and their comparison scores.
# zipped list [i][0] is namne, 1 is full array, 2 is mse val, 3 is SSIM, 4 is PD,
# 5 through 8 are quadrants
예제 #34
0
# Subplot Titles and Dictionary Keys
binwarp_names = ['Original', 'Half Phase Shift', 'Rotate 90','Rotate 45',
                 'Dilation', 'Erosion','Y - Reflection', 'X Shift',
                 'Row Shuffle', 'Random Affine', 'Gauss', 'Random','Edges']

warp_binary(stripes)

# Call Metrics on list of test patterns

structural_sim( binwarp_data )
reg_mse( binwarp_data )
make_quadrants( binwarp_data )
imse(binwarp_data)
cw_ssim_value(binwarp_data, 30)
transform( binwarp_data )
disccost( binwarp_data )

# Match names and arrays
binary_zip = zip(binwarp_names,binwarp_data, mse_vals, ssim_vals, top_lefts,
                 top_rights, low_lefts, low_rights, imse_vals, imse_maps, mse_maps,
                 ssim_maps, cw_ssim_vals, cw_ssim_maps, mag_maps, freqs, dct_maps, dct_curves)

binary_dict = defaultdict(dict)

# Making a look up dictionary from all the patterns and their comparison scores.
# zipped list [i][0] is namne, 1 is full array, 2 is mse val, 3 is SSIM, 4 is PD,
# 5 through 8 are quadrants
# 9 is IMSE, 10 is IMSE Map...

def to_dict_w_hists( data_dict, keys, data_zip ):
예제 #35
0
# Call It.
warp_snow( snow_test )

# Call Metrics on list of test snows

structural_sim( snow_data )
reg_mse( snow_data )
procrustes_analysis( snow_data )
make_quadrants( snow_data )

imse(snow_data)

cw_ssim_value(snow_data, 30)

transform( snow_data )
disccost( snow_data )

# Zip names, data, metrics, quadrants into a mega list!
# Generally this is indavisable because it relies on indexing...in the next cell we will make a dictionary.
snow_zip = zip(snow_names,snow_data, mse_vals, ssim_vals, disp_vals, top_lefts, top_rights, low_lefts, low_rights, 
               imse_vals, imse_maps, mse_maps, ssim_maps, cw_ssim_vals, cw_ssim_maps, mag_maps, freqs, dct_maps, dct_curves )


# In[5]:

snow_dict = defaultdict(dict)

'''
# Making a look up dictionary from all the patterns and their comparison scores.
# zipped list [i][0] is namne, 1 is full array, 2 is mse val, 3 is SSIM, 4 is PD,