예제 #1
0
    def test_transcoding(self):
        dct_y, dct_c, dct_r = load(self.jpeg_file_411)
        self.assertEqual(dct_y.shape, (50, 75, 64), "wrong dct shape")
        self.assertEqual(dct_c.shape, (25, 38, 64), "wrong dct shape")
        self.assertEqual(dct_r.shape, (25, 38, 64), "wrong dct shape")

        dct_y, dct_c, dct_r = load(self.jpeg_file_420)
        self.assertEqual(dct_y.shape, (50, 75, 64), "wrong dct shape")
        self.assertEqual(dct_c.shape, (25, 38, 64), "wrong dct shape")
        self.assertEqual(dct_r.shape, (25, 38, 64), "wrong dct shape")

        dct_y, dct_c, dct_r = load(self.jpeg_file_422)
        self.assertEqual(dct_y.shape, (50, 75, 64), "wrong dct shape")
        self.assertEqual(dct_c.shape, (25, 38, 64), "wrong dct shape")
        self.assertEqual(dct_r.shape, (25, 38, 64), "wrong dct shape")

        dct_y, dct_c, dct_r = load(self.jpeg_file_440)
        self.assertEqual(dct_y.shape, (50, 75, 64), "wrong dct shape")
        self.assertEqual(dct_c.shape, (25, 38, 64), "wrong dct shape")
        self.assertEqual(dct_r.shape, (25, 38, 64), "wrong dct shape")

        dct_y, dct_c, dct_r = load(self.jpeg_file_444)
        self.assertEqual(dct_y.shape, (50, 75, 64), "wrong dct shape")
        self.assertEqual(dct_c.shape, (25, 38, 64), "wrong dct shape")
        self.assertEqual(dct_r.shape, (25, 38, 64), "wrong dct shape")
예제 #2
0
def get_image_info(path_to_file):
    """Returns image info as a dictionary with elements, type, width, height, channels"""
    assert (fs.file_exists(path_to_file))
    img_type = get_image_type(path_to_file)
    im = Image.open(path_to_file)
    width, height = im.size
    channels = im.mode

    info_dict = collections.OrderedDict()
    info_dict[lookup.file_path] = path_to_file
    info_dict[lookup.image_type] = img_type
    info_dict[lookup.image_width] = width
    info_dict[lookup.image_height] = height
    info_dict[lookup.image_channels] = channels

    if img_type in lookup.lossy_encoding_types():
        coefficients = np.array(load(path_to_file))

        ac_dct_y, ac_dct_all = process_coefficient(coefficients)
        info_dict[lookup.embedding_max] = ac_dct_y
    else:
        info_dict[
            lookup.
            embedding_max] = width * height * convert_channels_to_int(channels)

    return info_dict
예제 #3
0
    def test_load(self):
        dct_y, dct_c, dct_r = load(self.jpeg_file)
        self.assertEqual(dct_y.shape, (205, 205, 64), "wrong dct shape")
        self.assertEqual(dct_c.shape, (103, 103, 64), "wrong dct shape")
        self.assertEqual(dct_r.shape, (103, 103, 64), "wrong dct shape")

        dct_y_nonormalized, dct_c_nonormalized, dct_r_nonormalized = load(
            self.jpeg_file, normalized=False)
        self.assertEqual(dct_y_nonormalized.shape, (205, 205, 64),
                         "wrong dct shape")
        self.assertEqual(dct_c_nonormalized.shape, (103, 103, 64),
                         "wrong dct shape")
        self.assertEqual(dct_r_nonormalized.shape, (103, 103, 64),
                         "wrong dct shape")

        for c in range(dct_y.shape[-1]):
            normalized_range = dct_y.min(), dct_y.max()
            unnormalized_range = dct_y_nonormalized.min(
            ), dct_y_nonormalized.max()
            self.assertTrue(
                unnormalized_range[0] >= normalized_range[0]
                and unnormalized_range[1] <= normalized_range[1],
                "normalized shall produce large range of values")
예제 #4
0
 def __getitem__(self, idx):
     img, target = self.test_data[idx], self.test_labels[idx]
     #img = center_crop(img, (56, 56))
     dct_y, dct_cb, dct_cr = load(img)
     y_mean, cb_mean, cr_mean = np.load('/home/michal5/cs445/avgs.npy')
     y_std, cb_std, cr_std = np.load('/home/michal5/cs445/stds.npy')
     dct_cb = upscale(dct_cb)
     dct_cr = upscale(dct_cr)
     dct_y = np.divide(np.subtract(dct_y, y_mean), y_std)
     dct_cb = np.divide(np.subtract(dct_cb, cb_mean), cb_std)
     dct_cr = np.divide(np.subtract(dct_cr, cr_mean), cr_std)
     dct_y_t = torch.from_numpy(dct_y).float()
     dct_cr_t = torch.from_numpy(dct_cr).float()
     dct_cb_t = torch.from_numpy(dct_cb).float()
     val = torch.cat((dct_y_t, dct_cb_t, dct_cr_t), dim=1)
     return val, target
예제 #5
0
    def __getitem__(self, idx):
        if self.datatype == 'val':
            img, target = self.val_data[idx], self.val_labels[idx]
        else:
            img, target = self.train_data[idx], self.train_labels[idx]
        img = center_crop(img, (320, 320))
        #print(img)
        dct_y, dct_cb, dct_cr = load(img)
        y_mean, cb_mean, cr_mean = np.load(
            '/home/michal5/cs445/avgs_imagenette_320.npy')
        y_std, cb_std, cr_std = np.load(
            '/home/michal5/cs445/stds_imagenette_320.npy')
        dct_cb = upscale(dct_cb)
        dct_cr = upscale(dct_cr)
        dct_y = np.divide(np.subtract(dct_y, y_mean), y_std)
        dct_cb = np.divide(np.subtract(dct_cb, cb_mean), cb_std)
        dct_cr = np.divide(np.subtract(dct_cr, cr_mean), cr_std)
        dct_y_t = torch.from_numpy(dct_y).float()
        dct_cr_t = torch.from_numpy(dct_cr).float()
        dct_cb_t = torch.from_numpy(dct_cb).float()

        val = torch.cat((dct_y_t, dct_cb_t, dct_cr_t), dim=2)
        return val, target
예제 #6
0
def convert_to_dct(img_location):
    dct_y, dct_cb, dct_cr = load(img_location)
    with open(img_location, 'rb') as src:
        buffer = src.read()
    dct_y, dct_cb, dct_cr = loads(buffer)
    return [dct_y, dct_cb, dct_cr]