def __getitem__( self, index: int ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, torch.Tensor]: kind, file, label = ( self.kinds[index], self.files[index], self.labels[index], ) dct_y, dct_cb, dct_cr = dct_from_jpeg_imageio(file) dct_y = dct_y.astype(np.float32) dct_cb = dct_cb.astype(np.float32) dct_cr = dct_cr.astype(np.float32) dct_y = np.rollaxis(dct_y, 2, 0) dct_cb = np.rollaxis(dct_cb, 2, 0) dct_cr = np.rollaxis(dct_cr, 2, 0) dct_y = dct_y / 1024 dct_cb = dct_cb / 1024 dct_cr = dct_cr / 1024 target = one_hot(10, label) return dct_y, dct_cb, dct_cr, target
def __getitem__( self, index: int ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, torch.Tensor]: kind, image_name, label = ( self.kinds[index], self.image_names[index], self.labels[index], ) dct_y, dct_cb, dct_cr = dct_from_jpeg_imageio( f"data/{kind}/{image_name}" ) dct_y = dct_y.astype(np.float32) dct_cb = dct_cb.astype(np.float32) dct_cr = dct_cr.astype(np.float32) dct_y = np.rollaxis(dct_y, 2, 0) dct_cb = np.rollaxis(dct_cb, 2, 0) dct_cr = np.rollaxis(dct_cr, 2, 0) dct_y = dct_y / 1024 dct_cb = dct_cb / 1024 dct_cr = dct_cr / 1024 target = one_hot(self.n_classes, label) return dct_y, dct_cb, dct_cr, target
def __getitem__(self, index): kind, image_name, label = ( self.kinds[index], self.image_names[index], self.labels[index], ) dct_y, dct_cb, dct_cr = dct_from_jpeg_imageio( f"data/{kind}/{image_name}") dct_y = dct_y.astype(np.float32) dct_cb = dct_cb.astype(np.float32) dct_cr = dct_cr.astype(np.float32) # dct_y = np.rollaxis(dct_y, 2, 0) # dct_cb = np.rollaxis(dct_cb, 2, 0) # dct_cr = np.rollaxis(dct_cr, 2, 0) dct_y = dct_y / 1024 dct_cb = dct_cb / 1024 dct_cr = dct_cr / 1024 # Flatten each array from shape (64, 64, 64) to (4096, 64) dct_y = dct_y.reshape((4096, 64)) dct_cb = dct_cb.reshape((4096, 64)) dct_cr = dct_cr.reshape((4096, 64)) # Concatenate the arrays input_data = np.concatenate((dct_y, dct_cb, dct_cr), axis=0) target = one_hot(4, label) return input_data, target
def pre_process_and_save_dct_data(): for kind in tqdm(["Cover", "JMiPOD", "JUNIWARD", "UERD"], desc=""): for file in tqdm(glob.glob1(f"data/Cover/", "*.jpg"), desc=""): save_dir = f"data/dct/{kind}/" load_dir = f"data/{kind}/" save_file = f"{save_dir}/{file}".replace(".jpg", ".npz") load_file = f"{load_dir}/{file}" make_dir_if_not_exists(save_dir) # Load the DCT arrays dct_y, dct_cb, dct_cr = dct_from_jpeg_imageio(load_file) # Save the arrays without any normalisation np.savez_compressed(save_file, dct_y, dct_cb, dct_cr)
def test_dct_methods() -> None: test_path = "data/UERD/00001.jpg" uber_dct_y, uber_dct_cb, uber_dct_cr = dct_from_jpeg(test_path) imageio_dct_y, imageio_dct_cb, imageio_dct_cr = dct_from_jpeg_imageio( test_path ) assert uber_dct_y.shape == (64, 64, 64) assert uber_dct_cb.shape == (32, 32, 64) assert uber_dct_cr.shape == (32, 32, 64) assert imageio_dct_y.shape == (64, 64, 64) assert imageio_dct_cb.shape == (64, 64, 64) assert imageio_dct_cr.shape == (64, 64, 64)
def __getitem__(self, index): image_name = self.image_names[index] img_path = f"data/Test/{image_name}" dct_y, dct_cb, dct_cr = dct_from_jpeg_imageio(img_path) dct_y = dct_y.astype(np.float32) dct_cb = dct_cb.astype(np.float32) dct_cr = dct_cr.astype(np.float32) dct_y = np.rollaxis(dct_y, 2, 0) dct_cb = np.rollaxis(dct_cb, 2, 0) dct_cr = np.rollaxis(dct_cr, 2, 0) dct_y = dct_y / 1024 dct_cb = dct_cb / 1024 dct_cr = dct_cr / 1024 return image_name, [dct_y, dct_cb, dct_cr]
def load_dct_values_from_pre_processed_imagenet_image(image_path, is_training=True): # Read the image with CV2 and convert to BGR img = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) if is_training: try: img = get_random_crop(img, image_size=IMAGE_SIZE) except ValueError: print("Invalid random crop, reverting to center crop.") img = get_center_crop(img, image_size=IMAGE_SIZE) else: img = get_center_crop(img, image_size=IMAGE_SIZE) with tempfile.TemporaryDirectory() as tmp_dir: tmp_file = f"{tmp_dir}/img.jpg" # Convert to PIL and save without subsampling img = Image.fromarray(np.array(img)) img.save(tmp_file, subsampling=0, format="JPEG") return dct_from_jpeg_imageio(tmp_file)
def compare_cover_vs_modified_dct() -> None: dct_y_0, dct_cb_0, dct_cr_0 = dct_from_jpeg_imageio("data/UERD/00001.jpg") dct_y_1, dct_cb_1, dct_cr_1 = dct_from_jpeg_imageio("data/UERD/00001.jpg") print(dct_y_0[0]) print(dct_y_1[0])