예제 #1
0
    def process_files(self):
        def write_example(_image_bytes, _label, _writer):
            example = tf.train.Example(features=tf.train.Features(
                feature={
                    'image_bytes': bytes_feature(_image_bytes),
                    'label': int64_feature(int(_label))
                }))
            _writer.write(example.SerializeToString())

        fns = sorted([
            fn for fn in os.listdir(self._val_data_dir) if fn.endswith('.JPEG')
        ])
        total_files = len(fns)

        records_file = os.path.join(self._target_dir, 'val_data.tfrecords')
        writer = tf.python_io.TFRecordWriter(records_file)
        self._logger.info('writing image data to: {}'.format(records_file))

        self._logger.info('number of files: {}'.format(total_files))

        for i, (fn, label) in enumerate(zip(fns, self._labels)):
            image_file = os.path.join(self._val_data_dir, fn)

            with open(image_file, 'rb') as img_f:
                image_bytes = img_f.read()

            write_example(tf.compat.as_bytes(image_bytes), label, writer)
            progress(
                i, total_files,
                '{}/{} validation images processed...'.format(i, total_files))

        writer.flush()
        writer.close()
예제 #2
0
    def process_files(self):
        def write_example(_image_bytes, _label, _writer):
            example = tf.train.Example(features=tf.train.Features(feature={
                'image_bytes': bytes_feature(_image_bytes),
                'label': int64_feature(int(_label))
            }))
            _writer.write(example.SerializeToString())

        # get image files from .mat
        val_list = scipy_io.loadmat(self._val_list_mat)
        file_list = [os.path.join(self._images_dir, fn[0][0]) for fn in val_list['file_list']]
        label_list = [label[0] for label in val_list['labels']]
        num_files = len(file_list)

        self._logger.info('data_dir={}'.format(self._data_dir))
        self._logger.info('target_dir={}'.format(self._target_dir))
        self._logger.info('val_list_mat={}'.format(self._val_list_mat))
        self._logger.info('num_files={}'.format(num_files))
        self._logger.info('unique_labels={}'.format(np.unique(label_list)))

        # create writer
        records_file = os.path.join(self._target_dir, 'val_data.tfrecords')
        writer = tf.python_io.TFRecordWriter(records_file)

        i = 0
        for i, (image_file, label) in enumerate(zip(file_list, label_list)):
            with open(image_file, 'rb') as img_f:
                image_bytes = img_f.read()

            label += LABEL_OFFSET

            write_example(tf.compat.as_bytes(image_bytes), label, writer)
            progress(i + 1, num_files, '{}/{} validation images processed...'.format(i, num_files))

        self._logger.info('num_files processed={}'.format(i + 1))
예제 #3
0
    def process_files(self):
        def write_example(_image_bytes, _label, _writer):
            example = tf.train.Example(features=tf.train.Features(
                feature={
                    'image_bytes': bytes_feature(_image_bytes),
                    'label': int64_feature(int(_label))
                }))
            _writer.write(example.SerializeToString())

        records_file = os.path.join(self._target_dir, 'train_data.tfrecords')
        writer = tf.python_io.TFRecordWriter(records_file)
        self._logger.info('writing image data to: {}'.format(records_file))

        synsets = os.listdir(self._train_data_dir)
        num_synsets = len(synsets)
        self._logger.info('number of synsets found: {}'.format(num_synsets))
        print('')

        num_images, n = 0, 0
        for i, s in enumerate(synsets):

            keras_label = self._label_converter.synset_to_keras_index(s)
            synset_dir = os.path.join(self._train_data_dir, s)
            image_files_in_synset = [
                os.path.join(synset_dir, f) for f in os.listdir(synset_dir)
            ]

            for n, image_file in enumerate(image_files_in_synset, 1):
                with open(image_file, 'rb') as img_f:
                    image_bytes = img_f.read()

                write_example(tf.compat.as_bytes(image_bytes), keras_label,
                              writer)

            progress(i + 1, num_synsets,
                     '{}/{} synsets processed'.format(i + 1, num_synsets))
            num_images += n

        print('')
        self._logger.info('number of train images: {}'.format(num_images))

        writer.flush()
        writer.close()
예제 #4
0
    def run0(self, ae_config, pc_config, cpm_checkpoint):

        tf.reset_default_graph()

        print('')
        self.logger.info('===== building graph start =====')

        with tf.Graph().as_default() as graph:
            # datafeed
            self.logger.info('* datafeed')

            input_pipeline = InputPipeline(
                records=self.records_file,
                records_type=RecordsParser.RECORDS_UNLABELLED,
                shuffle_buffer_size=self._batch_size *
                self.NUM_PREPROCESSING_THREADS,
                batch_size=self._batch_size,
                num_preprocessing_threads=self.NUM_PREPROCESSING_THREADS,
                num_repeat=1,
                preprocessing_fn=self._get_resize_function(
                    self._image_height, self._image_width),
                preprocessing_kwargs={},
                drop_remainder=True,
                compute_bpp=False,
                shuffle=False,
                dtype_out=tf.float32)

            images = input_pipeline.next_batch()[0]

            # compression + inference op
            self.logger.info('* compression')
            with tf.name_scope('compression'):
                print(images.get_shape().as_list())
                images = nhwc_to_nchw(images)

                # create networks
                ae_cls = autoencoder.get_network_cls(ae_config)
                pc_cls = probclass.get_network_cls(pc_config)

                # instantiate models
                ae = ae_cls(ae_config)
                pc = pc_cls(pc_config, num_centers=ae_config.num_centers)

                enc_out_val = ae.encode(images, is_training=False)
                images_compressed = ae.decode(enc_out_val.qhard,
                                              is_training=False)

                bitcost_val = pc.bitcost(enc_out_val.qbar,
                                         enc_out_val.symbols,
                                         is_training=False,
                                         pad_value=pc.auto_pad_value(ae))
                avg_bits_per_pixel = bitcost_to_bpp(bitcost_val, images)
                images = nchw_to_nhwc(images)
                images_compressed = nchw_to_nhwc(images_compressed)

            # compute distortions
            self.logger.info('* distortions')
            with tf.name_scope('distortions'):
                distortions_obj = Distortions(
                    reconstructed_images=images_compressed,
                    original_images=tf.cast(images, tf.float32),
                    lambda_ms_ssim=1.0,
                    lambda_psnr=1.0,
                    lambda_feature_loss=1.0,
                    data_format=self.DATA_FORMAT,
                    loss_net_kwargs=None)

                distortions_ops = {
                    'ms_ssim': distortions_obj.compute_ms_ssim(),
                    'mse': distortions_obj.compute_mse(),
                    'psnr': distortions_obj.compute_psnr()
                }

            # cpm saver
            cpm_saver = Saver(
                cpm_checkpoint,
                var_list=Saver.get_var_list_of_ckpt_dir(cpm_checkpoint))
            ckpt_itr, cpm_ckpt_path = Saver.all_ckpts_with_iterations(
                cpm_checkpoint)[-1]
            self.logger.info('ckpt_itr={}'.format(ckpt_itr))
            self.logger.info('ckpt_path={}'.format(cpm_ckpt_path))

        graph.finalize()

        with tf.Session(config=get_sess_config(allow_growth=False),
                        graph=graph) as sess:

            cpm_saver.restore_ckpt(sess, cpm_ckpt_path)

            distortions_values = {key: list() for key in self.DISTORTION_KEYS}
            bpp_values = []
            n_images_processed = 0
            n_images_processed_per_second = deque(10 * [0.0], 10)
            progress(
                n_images_processed, self._dataset.NUM_VAL,
                '{}/{} images processed'.format(n_images_processed,
                                                self._dataset.NUM_VAL))

            try:
                while True:
                    batch_start_time = time.time()

                    # compute distortions and bpp
                    batch_bpp_mean_values, batch_distortions_values = sess.run(
                        [avg_bits_per_pixel, distortions_ops])

                    # collect values
                    bpp_values.append(batch_bpp_mean_values)
                    for key in self.DISTORTION_KEYS:
                        distortions_values[key].append(
                            batch_distortions_values[key])

                    n_images_processed += self._batch_size
                    n_images_processed_per_second.append(
                        self._batch_size / (time.time() - batch_start_time))

                    progress(n_images_processed,
                             self._dataset.NUM_VAL,
                             status='{}/{} images processed ({} img/s)'.format(
                                 n_images_processed, self._dataset.NUM_VAL,
                                 np.mean([
                                     t for t in n_images_processed_per_second
                                 ])))

            except tf.errors.OutOfRangeError:
                self.logger.info(
                    'reached end of dataset; processed {} images'.format(
                        n_images_processed))

            except KeyboardInterrupt:
                self.logger.info(
                    'manual interrupt; processed {}/{} images'.format(
                        n_images_processed, self._dataset.NUM_VAL))

                mean_bpp_values = np.mean(bpp_values)
                mean_dist_values = {
                    key: np.mean(arr)
                    for key, arr in distortions_values.items()
                }

                print('*** intermediate results:')
                print('bits per pixel: {}'.format(mean_bpp_values))
                for key in self.DISTORTION_KEYS:
                    print('{}: {}'.format(key, mean_dist_values[key]))

                return {key: np.nan for key in self.DISTORTION_KEYS}, np.nan

        mean_bpp_values = np.mean(bpp_values)
        mean_dist_values = {
            key: np.mean(arr)
            for key, arr in distortions_values.items()
        }

        return mean_dist_values, mean_bpp_values
예제 #5
0
    def eval_classifier_model(self,
                              cnn_model: any(
                                  [ImagenetClassifier, FGVCClassifier]),
                              ckpt_path=None):

        tf.reset_default_graph()

        print('')
        self.logger.info('===== building graph start: {} ====='.format(
            cnn_model.NAME))

        # assertions
        assert self._dataset.NUM_CLASSES == cnn_model.num_classes, 'incostent number of classes ({} != {})'.format(
            self._dataset.NUM_CLASSES, cnn_model.num_classes)

        # image shapes
        image_shape_classification = cnn_model.INPUT_SHAPE
        image_shape_compression = CompressionPreprocessing.pad_image_shape(
            image_shape=image_shape_classification,
            size_multiple_of=self.SIZE_MULTIPLE_OF,
            extra_padding_multiples=2)

        # log image sizes
        self.logger.info(
            'image_shape_classification={}'.format(image_shape_classification))
        self.logger.info(
            'image_shape_compression={}'.format(image_shape_compression))

        with tf.Graph().as_default() as graph:
            # datafeed
            self.logger.info('* datafeed')

            rnn_model = self._get_rnn_model(image_shape_compression[0],
                                            image_shape_compression[1])

            input_pipeline = InputPipeline(
                records=self.records_file,
                records_type=RecordsParser.RECORDS_LABELLED,
                shuffle_buffer_size=self.BATCH_SIZE *
                self.NUM_PREPROCESSING_THREADS,
                batch_size=self.BATCH_SIZE,
                num_preprocessing_threads=self.NUM_PREPROCESSING_THREADS,
                num_repeat=1,
                preprocessing_fn=CompressionPreprocessing.preprocess_image,
                preprocessing_kwargs={
                    'height': image_shape_compression[0],
                    'width': image_shape_compression[1],
                    'resize_side_min': min(image_shape_compression[:2]),
                    'is_training': False,
                    'dtype_out': tf.uint8
                },
                drop_remainder=False,
                compute_bpp=False,
                shuffle=False)

            images, labels = input_pipeline.next_batch()

            # compression + inference op
            self.logger.info('* compression')
            with tf.name_scope('rnn_compression'):
                image_batch_compressed = rnn_model.build_model(
                    images=images,
                    is_training=tf.cast(False, tf.bool),
                    reuse=tf.get_variable_scope().reuse)

            # inference kwargs
            self.logger.info('* inference')
            if self._dataset_name == Imagenet.NAME:

                def inference_kwargs(**kwargs):
                    return dict(graph=kwargs['graph'])
            else:

                def inference_kwargs(**kwargs):
                    return dict(
                        arg_scope=cnn_model.arg_scope(weight_decay=float(0)),
                        is_training=False,
                        return_predictions=True,
                        reuse=True if kwargs['j'] > 0 else False)

            predictions_per_compression = []

            with tf.name_scope('inference_rnn'):

                for rnn_iteration in range(self._num_iterations):
                    with tf.name_scope('iteration_{}'.format(rnn_iteration)):
                        image_batch_compressed_iteration = tf.cast(
                            image_batch_compressed[rnn_iteration], tf.float32)

                        # take central crop of images in batch
                        image_batch_compressed_iteration = tf.image.resize_image_with_crop_or_pad(
                            image=image_batch_compressed_iteration,
                            target_height=image_shape_classification[0],
                            target_width=image_shape_classification[1])

                        # standardize appropriately
                        image_batch_compressed_iteration = cnn_model.standardize_tensor(
                            image_batch_compressed_iteration)

                        # predict
                        preds = cnn_model.inference(
                            image_batch_compressed_iteration,
                            **inference_kwargs(graph=graph, j=rnn_iteration))
                        predictions_per_compression.append(preds)

                # aggregate
                predictions_per_compression_op = tf.stack(
                    predictions_per_compression, axis=0)
                self.logger.info('predictions_shape: {}'.format(
                    predictions_per_compression_op.get_shape().as_list()))

            # restorers
            if self._dataset_name == Imagenet.NAME:
                classifier_saver = None
            else:
                classifier_saver = tf.train.Saver(
                    var_list=cnn_model.model_variables())

            # rnn saver
            saver = tf.train.Saver(var_list=rnn_model.model_variables)

        graph.finalize()

        with tf.Session(config=get_sess_config(allow_growth=False),
                        graph=graph) as sess:

            if classifier_saver is not None:
                classifier_saver.restore(sess, ckpt_path)

            saver.restore(sess, self._rnn_checkpoint)

            labels_values = []
            predictions_all_iters_values = [
                list() for _ in range(self._num_iterations)
            ]
            n_images_processed = 0
            n_images_processed_per_second = deque(10 * [0.0], 10)
            progress(
                n_images_processed, self._dataset.NUM_VAL,
                '{}/{} images processed'.format(n_images_processed,
                                                self._dataset.NUM_VAL))

            try:
                while True:
                    batch_start_time = time.time()

                    # run inference
                    batch_predictions_all_iters_values, batch_label_values = sess.run(
                        [predictions_per_compression_op, labels])

                    # collect predictions
                    for rnn_itr, preds_itr in enumerate(
                            batch_predictions_all_iters_values):
                        predictions_all_iters_values[rnn_itr].append(preds_itr)

                    # collect labels and bpp
                    labels_values.append(
                        self.to_categorical(batch_label_values,
                                            Imagenet.NUM_CLASSES))

                    n_images_processed += len(batch_label_values)
                    n_images_processed_per_second.append(
                        len(batch_label_values) /
                        (time.time() - batch_start_time))

                    progress(n_images_processed,
                             self._dataset.NUM_VAL,
                             status='{}/{} images processed ({} img/s)'.format(
                                 n_images_processed, self._dataset.NUM_VAL,
                                 np.mean([
                                     t for t in n_images_processed_per_second
                                 ])))

            except tf.errors.OutOfRangeError:
                self.logger.info(
                    'reached end of dataset; processed {} images'.format(
                        n_images_processed))

            except KeyboardInterrupt:
                self.logger.info(
                    'manual interrupt; processed {}/{} images'.format(
                        n_images_processed, Imagenet.NUM_VAL))
                return [(np.nan, np.nan) for _ in range(self._num_iterations)
                        ], [np.nan for _ in range(self._num_iterations)]

        labels_values = np.concatenate(labels_values, axis=0)
        predictions_all_iters_values = [
            np.concatenate(preds_iter_values, axis=0)
            for preds_iter_values in predictions_all_iters_values
        ]

        accuracies = [(self.top_k_accuracy(labels_values, preds_iter_values,
                                           1),
                       self.top_k_accuracy(labels_values, preds_iter_values,
                                           5))
                      for preds_iter_values in predictions_all_iters_values]

        return accuracies
예제 #6
0
파일: eval_hvs_rnn.py 프로젝트: DS3Lab/odlc
    def run(self):

        tf.reset_default_graph()

        print('')
        self.logger.info('===== building graph start =====')

        # datafeed
        self.logger.info('* datafeed')

        with tf.Graph().as_default() as graph:

            image_height_compression, image_width_compression, _ = RNNCompressionModel.pad_image_shape(
                image_shape=[self._image_height, self._image_width, 3])

            rnn_model = self._get_rnn_model(image_height_compression,
                                            image_width_compression)

            input_pipeline = InputPipeline(
                records=self._records_file,
                records_type=RecordsParser.RECORDS_UNLABELLED,
                shuffle_buffer_size=0,
                batch_size=self._batch_size,
                num_preprocessing_threads=self.NUM_PREPROCESSING_THREADS,
                num_repeat=1,
                preprocessing_fn=self._get_resize_function(
                    self._image_height, self._image_width),
                preprocessing_kwargs={},
                drop_remainder=True,
                compute_bpp=False,
                shuffle=False)

            images = input_pipeline.next_batch()[0]
            if image_height_compression != self._image_height or image_width_compression != self._image_width:
                images = tf.image.resize_image_with_crop_or_pad(
                    images, image_height_compression, image_width_compression)

            num_images_in_batch_op = tf.shape(images)[0]
            self.logger.info('images shape for compression: {}'.format(
                images.get_shape().as_list()))

            # compress images
            self.logger.info('* compression')
            images_compressed = rnn_model.build_model(
                images=images,
                is_training=tf.cast(False, tf.bool),
                reuse=tf.get_variable_scope().reuse)
            images_compressed.set_shape([
                self._num_iterations, self._batch_size,
                image_height_compression, image_width_compression, 3
            ])
            self.logger.info('compressed images shape: {}'.format(
                images_compressed.get_shape().as_list()))

            # compute distortions
            self.logger.info('* distortions')
            distortions_obj_per_compression = [
                Distortions(reconstructed_images=tf.image.
                            resize_image_with_crop_or_pad(
                                image=images_compressed[ii],
                                target_width=self._image_width,
                                target_height=self._image_height),
                            original_images=tf.cast(images, tf.float32),
                            lambda_ms_ssim=1.0,
                            lambda_psnr=1.0,
                            lambda_feature_loss=1.0,
                            data_format=self.DATA_FORMAT,
                            loss_net_kwargs=None)
                for ii in range(self._num_iterations)
            ]

            distortions_ops_per_compression = [{
                'ms_ssim': d.compute_ms_ssim()
            } for d in distortions_obj_per_compression]

            # savers
            rnn_saver = tf.train.Saver(var_list=rnn_model.model_variables)

        graph.finalize()

        with tf.Session(config=get_sess_config(allow_growth=True),
                        graph=graph) as sess:

            rnn_saver.restore(sess, self._rnn_checkpoint)

            distortions_values_per_compression = [{
                key: list()
                for key in self.DISTORTION_KEYS
            } for _ in range(self._num_iterations)]
            bpp_values_per_compression = [
                list() for _ in range(self._num_iterations)
            ]
            n_images_processed = 0
            n_images_processed_per_second = deque(10 * [0.0], 10)
            progress(
                n_images_processed, Cub200.NUM_VAL,
                '{}/{} images processed'.format(n_images_processed,
                                                self._dataset.NUM_VAL))

            try:
                while True:
                    batch_start_time = time.time()

                    # compute distortions and bpp
                    batch_distortions_values_per_compression, num_images_in_batch = sess.run(
                        [
                            distortions_ops_per_compression,
                            num_images_in_batch_op
                        ])

                    # collect values
                    for comp_level, dist_comp in enumerate(
                            batch_distortions_values_per_compression):
                        bpp_values_per_compression[comp_level].extend(
                            [0.125 * (comp_level + 1)])
                        for key in self.DISTORTION_KEYS:
                            distortions_values_per_compression[comp_level][
                                key].append(dist_comp[key])

                    n_images_processed += num_images_in_batch
                    n_images_processed_per_second.append(
                        num_images_in_batch / (time.time() - batch_start_time))

                    progress(n_images_processed,
                             self._dataset.NUM_VAL,
                             status='{}/{} images processed ({} img/s)'.format(
                                 n_images_processed, self._dataset.NUM_VAL,
                                 np.mean([
                                     t for t in n_images_processed_per_second
                                 ])))

            except tf.errors.OutOfRangeError:
                self.logger.info(
                    'reached end of dataset; processed {} images'.format(
                        n_images_processed))

            except KeyboardInterrupt:
                self.logger.info(
                    'manual interrupt; processed {}/{} images'.format(
                        n_images_processed, self._dataset.NUM_VAL))
                return

            mean_bpp_values_per_compression = [
                np.mean(bpp_vals) for bpp_vals in bpp_values_per_compression
            ]
            mean_dist_values_per_compression = [{
                key: np.mean(arr)
                for key, arr in dist_dict.items()
            } for dist_dict in distortions_values_per_compression]

            self._save_results(mean_bpp_values_per_compression,
                               mean_dist_values_per_compression,
                               self._rnn_unit + '_' + self._loss_name,
                               [q + 1 for q in range(self._num_iterations)])
예제 #7
0
    def eval_classifier_model(self, ae_config, pc_config, cpm_checkpoint,
                              cnn_model: any([ImagenetClassifier, FGVCClassifier]), fgvc_ckpt_path=None):

        tf.reset_default_graph()

        print('')
        self.logger.info('===== building graph start: {} ====='.format(cnn_model.NAME))

        # assertions
        assert self._dataset.NUM_CLASSES == cnn_model.num_classes, 'incostent number of classes ({} != {})'.format(
            self._dataset.NUM_CLASSES, cnn_model.num_classes)

        # image shapes
        image_shape_classification = cnn_model.INPUT_SHAPE
        image_shape_compression = CompressionPreprocessing.pad_image_shape(image_shape=image_shape_classification,
                                                                           size_multiple_of=self.SIZE_MULTIPLE_OF,
                                                                           extra_padding_multiples=2)

        # log image sizes
        self.logger.info('image_shape_classification={}'.format(image_shape_classification))
        self.logger.info('image_shape_compression={}'.format(image_shape_compression))

        with tf.Graph().as_default() as graph:
            # datafeed
            self.logger.info('* datafeed')

            input_pipeline = InputPipeline(records=self.records_file,
                                           records_type=RecordsParser.RECORDS_LABELLED,
                                           shuffle_buffer_size=self.BATCH_SIZE * self.NUM_PREPROCESSING_THREADS,
                                           batch_size=self.BATCH_SIZE,
                                           num_preprocessing_threads=self.NUM_PREPROCESSING_THREADS,
                                           num_repeat=1,
                                           preprocessing_fn=CompressionPreprocessing.preprocess_image,
                                           preprocessing_kwargs={'height': image_shape_compression[0],
                                                                 'width': image_shape_compression[1],
                                                                 'resize_side_min': min(image_shape_compression[:2]),
                                                                 'is_training': False,
                                                                 'dtype_out': tf.uint8},
                                           drop_remainder=False,
                                           compute_bpp=False,
                                           shuffle=False, dtype_out=tf.float32)

            images, labels = input_pipeline.next_batch()

            # compression + inference op
            self.logger.info('* compression')
            with tf.name_scope('compression'):

                images = nhwc_to_nchw(images)

                # create networks
                ae_cls = autoencoder.get_network_cls(ae_config)
                pc_cls = probclass.get_network_cls(pc_config)

                # instantiate models
                ae = ae_cls(ae_config)
                pc = pc_cls(pc_config, num_centers=ae_config.num_centers)

                enc_out_val = ae.encode(images, is_training=False)
                images_compressed = ae.decode(enc_out_val.qhard, is_training=False)

                bitcost_val = pc.bitcost(enc_out_val.qbar, enc_out_val.symbols, is_training=False,
                                         pad_value=pc.auto_pad_value(ae))
                avg_bits_per_pixel = bitcost_to_bpp(bitcost_val, images)
                images_compressed = nchw_to_nhwc(images_compressed)

                # inference kwargs
            self.logger.info('* inference')
            if self._dataset_name == Imagenet.NAME:
                def inference_kwargs(**kwargs):
                    return dict(graph=kwargs['graph'])
            else:
                def inference_kwargs(**kwargs):
                    return dict(arg_scope=cnn_model.arg_scope(weight_decay=float(0)),
                                is_training=False,
                                return_predictions=True,
                                reuse=None)

            with tf.name_scope('inference_rnn'):

                # take central crop of images in batch
                images_compressed = tf.image.resize_image_with_crop_or_pad(
                    image=images_compressed,
                    target_height=image_shape_classification[0],
                    target_width=image_shape_classification[1])

                # standardize appropriately
                images_compressed = cnn_model.standardize_tensor(
                    images_compressed)

                # predict
                predictions = cnn_model.inference(images_compressed, **inference_kwargs(graph=graph))

                # aggregate
                self.logger.info('predictions_shape: {}'.format(predictions.get_shape().as_list()))

            # restorers
            if self._dataset_name == Imagenet.NAME:
                classifier_saver = None
            else:
                classifier_saver = tf.train.Saver(var_list=cnn_model.model_variables())

            # cpm saver
            cpm_saver = Saver(cpm_checkpoint, var_list=Saver.get_var_list_of_ckpt_dir(cpm_checkpoint))
            ckpt_itr, cpm_ckpt_path = Saver.all_ckpts_with_iterations(cpm_checkpoint)[-1]
            self.logger.info('ckpt_itr={}'.format(ckpt_itr))
            self.logger.info('ckpt_path={}'.format(cpm_ckpt_path))

        graph.finalize()

        with tf.Session(config=get_sess_config(allow_growth=False), graph=graph) as sess:

            cpm_saver.restore_ckpt(sess, cpm_ckpt_path)

            if classifier_saver is not None:
                classifier_saver.restore(sess, fgvc_ckpt_path)

            labels_values = []
            predictions_values = []
            bpp_values = []
            n_images_processed = 0
            n_images_processed_per_second = deque(10 * [0.0], 10)
            progress(n_images_processed, self._dataset.NUM_VAL,
                     '{}/{} images processed'.format(n_images_processed, self._dataset.NUM_VAL))

            try:
                while True:
                    batch_start_time = time.time()

                    # run inference
                    batch_predictions_values, batch_label_values, batch_avg_bpp_values = sess.run(
                        [predictions, labels, avg_bits_per_pixel])

                    # collect predictions
                    predictions_values.append(batch_predictions_values)

                    # collect labels and bpp
                    labels_values.append(self.to_categorical(batch_label_values, Imagenet.NUM_CLASSES))
                    bpp_values.append(batch_avg_bpp_values)

                    n_images_processed += len(batch_label_values)
                    n_images_processed_per_second.append(len(batch_label_values) / (time.time() - batch_start_time))

                    progress(n_images_processed, self._dataset.NUM_VAL,
                             status='{}/{} images processed ({} img/s)'.format(
                                 n_images_processed, self._dataset.NUM_VAL,
                                 np.mean([t for t in n_images_processed_per_second])))

            except tf.errors.OutOfRangeError:
                self.logger.info('reached end of dataset; processed {} images'.format(n_images_processed))

            except KeyboardInterrupt:
                self.logger.info(
                    'manual interrupt; processed {}/{} images'.format(n_images_processed, self._dataset.NUM_VAL))

                labels_values = np.concatenate(labels_values, axis=0)
                predictions_values = np.concatenate(predictions_values, axis=0)
                bpp_values_mean = np.mean(bpp_values)

                accuracies = (self.top_k_accuracy(labels_values, predictions_values, 1),
                              self.top_k_accuracy(labels_values, predictions_values, 5))

                print('*** intermediate results:')
                print('bits per pixel: {}'.format(bpp_values_mean))
                print('Top-1 Accuracy: {}'.format(accuracies[0]))
                print('Top-5 Accuracy: {}'.format(accuracies[1]))

                return (np.nan, np.nan), np.nan

        labels_values = np.concatenate(labels_values, axis=0)
        predictions_values = np.concatenate(predictions_values, axis=0)
        bpp_values_mean = np.mean(bpp_values)

        accuracies = (self.top_k_accuracy(labels_values, predictions_values, 1),
                      self.top_k_accuracy(labels_values, predictions_values, 5))

        return accuracies, bpp_values_mean
예제 #8
0
    def run(self):

        tf.reset_default_graph()

        print('')
        self.logger.info('===== building graph start =====')

        with tf.Graph().as_default() as graph:
            # datafeed
            self.logger.info('* datafeed')

            input_pipeline = InputPipeline(
                records=self._records_file,
                records_type=RecordsParser.RECORDS_UNLABELLED,
                shuffle_buffer_size=0,
                batch_size=self._batch_size,
                num_preprocessing_threads=self.NUM_PREPROCESSING_THREADS,
                num_repeat=1,
                preprocessing_fn=self._get_resize_function(
                    self._image_height, self._image_width),
                preprocessing_kwargs={},
                drop_remainder=True,
                compute_bpp=False,
                shuffle=False)

            images = input_pipeline.next_batch()[0]

            image_shape = images.get_shape().as_list()
            self.logger.info('image_shape: {}'.format(image_shape))

            # compression op
            self.logger.info('* compression')

            images_per_compression = []
            bpp_op_per_compression = []
            for j, compression_level in enumerate(self.COMPRESSION_LEVELS):
                # compress batch
                with tf.name_scope(
                        'compression_webp_{}'.format(compression_level)):
                    with tf.device(CPU_DEVICE):  # -> webp compression on cpu
                        img_batch_compressed, _bpp = TFWebp.tf_encode_decode_image_batch(
                            image_batch=tf.cast(images, tf.uint8),
                            quality=compression_level)

                    img_batch_compressed.set_shape(
                        images.get_shape().as_list())
                    images_per_compression.append(
                        tf.cast(img_batch_compressed, tf.float32))
                    bpp_op_per_compression.append(_bpp)

            # compute distortions
            self.logger.info('* distortions')
            distortions_obj_per_compression = [
                Distortions(reconstructed_images=c_img_batch,
                            original_images=tf.cast(images, tf.float32),
                            lambda_ms_ssim=1.0,
                            lambda_psnr=1.0,
                            lambda_feature_loss=1.0,
                            data_format=self.DATA_FORMAT,
                            loss_net_kwargs=None)
                for c_img_batch in images_per_compression
            ]

            distortions_ops_per_compression = [{
                'ms_ssim': d.compute_ms_ssim()
            } for d in distortions_obj_per_compression]

        graph.finalize()

        with tf.Session(config=get_sess_config(allow_growth=True),
                        graph=graph) as sess:

            distortions_values_per_compression = [{
                key: list()
                for key in self.DISTORTION_KEYS
            } for _ in self.COMPRESSION_LEVELS]
            bpp_values_per_compression = [
                list() for _ in self.COMPRESSION_LEVELS
            ]
            n_images_processed = 0
            n_images_processed_per_second = deque(10 * [0.0], 10)
            progress(
                n_images_processed, Cub200.NUM_VAL,
                '{}/{} images processed'.format(n_images_processed,
                                                self._dataset.NUM_VAL))

            try:
                while True:
                    batch_start_time = time.time()

                    # compute distortions and bpp
                    batch_bpp_values_per_compression, batch_distortions_values_per_compression = sess.run(
                        [
                            bpp_op_per_compression,
                            distortions_ops_per_compression
                        ])

                    # collect values
                    for comp_level, (dist_comp, bpp_comp) in enumerate(
                            zip(batch_distortions_values_per_compression,
                                batch_bpp_values_per_compression)):
                        bpp_values_per_compression[comp_level].extend(bpp_comp)
                        for key in self.DISTORTION_KEYS:
                            distortions_values_per_compression[comp_level][
                                key].append(dist_comp[key])

                    n_images_processed += len(
                        batch_bpp_values_per_compression[0])
                    n_images_processed_per_second.append(
                        len(batch_bpp_values_per_compression[0]) /
                        (time.time() - batch_start_time))

                    progress(n_images_processed,
                             self._dataset.NUM_VAL,
                             status='{}/{} images processed ({} img/s)'.format(
                                 n_images_processed, self._dataset.NUM_VAL,
                                 np.mean([
                                     t for t in n_images_processed_per_second
                                 ])))

            except tf.errors.OutOfRangeError:
                self.logger.info(
                    'reached end of dataset; processed {} images'.format(
                        n_images_processed))

            except KeyboardInterrupt:
                self.logger.info(
                    'manual interrupt; processed {}/{} images'.format(
                        n_images_processed, self._dataset.NUM_VAL))
                return [(np.nan, np.nan) for _ in self.COMPRESSION_LEVELS]

            mean_bpp_values_per_compression = [
                np.mean(bpp_vals) for bpp_vals in bpp_values_per_compression
            ]
            mean_dist_values_per_compression = [{
                key: np.mean(arr)
                for key, arr in dist_dict.items()
            } for dist_dict in distortions_values_per_compression]

            self._save_results(mean_bpp_values_per_compression,
                               mean_dist_values_per_compression, 'webp',
                               self.COMPRESSION_LEVELS)
예제 #9
0
파일: eval_hvs_bpg.py 프로젝트: DS3Lab/odlc
    def run(self):

        tf.reset_default_graph()

        print('')
        self.logger.info('===== building graph start =====')

        with tf.Graph().as_default() as graph:

            # datafeed
            self.logger.info('* datafeed')

            ip0 = InputPipeline(
                records=self._records_file,
                records_type=RecordsParser.RECORDS_UNLABELLED,
                shuffle_buffer_size=0,
                batch_size=self._batch_size,
                num_preprocessing_threads=self.NUM_PREPROCESSING_THREADS,
                num_repeat=1,
                preprocessing_fn=CompressionPreprocessing.
                preprocess_image_for_eval,
                preprocessing_kwargs={
                    'height': self._image_height,
                    'width': self._image_width,
                    'resize_side_min': min(self._image_height,
                                           self._image_width)
                },
                drop_remainder=True,
                compute_bpp=False,
                shuffle=False)

            original_images = ip0.next_batch()[0]

            image_batches, bpp_op_per_compression = [], []
            for records in self._bpg_records_files:
                ip = InputPipeline(
                    records=records,
                    records_type=RecordsParser.RECORDS_BPP,
                    shuffle_buffer_size=0,
                    batch_size=self._batch_size,
                    num_preprocessing_threads=self.NUM_PREPROCESSING_THREADS,
                    num_repeat=1,
                    preprocessing_fn=CompressionPreprocessing.
                    preprocess_image_with_identity,
                    preprocessing_kwargs={
                        'height': self._image_height,
                        'width': self._image_width,
                        'dtype_out': tf.uint8
                    },
                    drop_remainder=True,
                    compute_bpp=False,
                    shuffle=False)

                images, bpp = ip.next_batch()

                image_batches.append(images)
                bpp_op_per_compression.append(bpp)

            # compute distortions
            self.logger.info('* distortions')
            distortions_obj_per_compression = [
                Distortions(reconstructed_images=c_img_batch,
                            original_images=original_images,
                            lambda_ms_ssim=1.0,
                            lambda_psnr=1.0,
                            lambda_feature_loss=1.0,
                            data_format=self.DATA_FORMAT,
                            loss_net_kwargs=None)
                for c_img_batch in image_batches
            ]

            distortions_ops_per_compression = [{
                'ms_ssim': d.compute_ms_ssim()
            } for d in distortions_obj_per_compression]

        graph.finalize()

        with tf.Session(config=get_sess_config(allow_growth=True),
                        graph=graph) as sess:

            distortions_values_per_compression = [{
                key: list()
                for key in self.DISTORTION_KEYS
            } for _ in range(self._num_compression_levels)]
            bpp_values_per_compression = [
                list() for _ in range(self._num_compression_levels)
            ]
            n_images_processed = 0
            n_images_processed_per_second = deque(10 * [0.0], 10)
            progress(
                n_images_processed, Cub200.NUM_VAL,
                '{}/{} images processed'.format(n_images_processed,
                                                self._dataset.NUM_VAL))

            try:
                while True:
                    batch_start_time = time.time()

                    # compute distortions and bpp
                    batch_bpp_values_per_compression, batch_distortions_values_per_compression = sess.run(
                        [
                            bpp_op_per_compression,
                            distortions_ops_per_compression
                        ])

                    # collect values
                    for comp_level, (dist_comp, bpp_comp) in enumerate(
                            zip(batch_distortions_values_per_compression,
                                batch_bpp_values_per_compression)):
                        bpp_values_per_compression[comp_level].extend(bpp_comp)
                        for key in self.DISTORTION_KEYS:
                            distortions_values_per_compression[comp_level][
                                key].append(dist_comp[key])

                    n_images_processed += len(
                        batch_bpp_values_per_compression[0])
                    n_images_processed_per_second.append(
                        len(batch_bpp_values_per_compression[0]) /
                        (time.time() - batch_start_time))

                    progress(n_images_processed,
                             self._dataset.NUM_VAL,
                             status='{}/{} images processed ({} img/s)'.format(
                                 n_images_processed, self._dataset.NUM_VAL,
                                 np.mean([
                                     t for t in n_images_processed_per_second
                                 ])))

            except tf.errors.OutOfRangeError:
                self.logger.info(
                    'reached end of dataset; processed {} images'.format(
                        n_images_processed))

            except KeyboardInterrupt:
                self.logger.info(
                    'manual interrupt; processed {}/{} images'.format(
                        n_images_processed, self._dataset.NUM_VAL))
                return [(np.nan, np.nan)
                        for _ in range(self._num_compression_levels)]

            mean_bpp_values_per_compression = [
                np.mean(bpp_vals) for bpp_vals in bpp_values_per_compression
            ]
            mean_dist_values_per_compression = [{
                key: np.mean(arr)
                for key, arr in dist_dict.items()
            } for dist_dict in distortions_values_per_compression]

            self._save_results(mean_bpp_values_per_compression,
                               mean_dist_values_per_compression, 'bpg', None)
예제 #10
0
    def eval_classifier_model(self,
                              cnn_model: any(
                                  [ImagenetClassifier, FGVCClassifier]),
                              ckpt_path=None):

        tf.reset_default_graph()

        print('')
        self.logger.info('===== building graph start: {} ====='.format(
            cnn_model.NAME))

        # assertions
        assert self._dataset.NUM_CLASSES == cnn_model.num_classes, 'incostent number of classes ({} != {})'.format(
            self._dataset.NUM_CLASSES, cnn_model.num_classes)

        # image shapes
        image_shape_classification = cnn_model.INPUT_SHAPE
        image_shape_compression = CompressionPreprocessing.pad_image_shape(
            image_shape=image_shape_classification,
            size_multiple_of=self.SIZE_MULTIPLE_OF,
            extra_padding_multiples=2)

        # log image sizes
        self.logger.info(
            'image_shape_classification={}'.format(image_shape_classification))
        self.logger.info(
            'image_shape_compression={}'.format(image_shape_compression))

        # records files depending on inference resolution
        if image_shape_classification[0] < 256:
            bpg_records_files = list(self._bpg_records_files256)
        else:
            bpg_records_files = list(self._bpg_records_files336)

        self.logger.info('bpg_records_files: {}'.format(bpg_records_files))

        with tf.Graph().as_default() as graph:

            # datafeed
            self.logger.info('* datafeed')
            image_batches, labels_batches, bpp_batches = [], [], []
            for records in bpg_records_files:
                ip = InputPipeline(
                    records=records,
                    records_type=RecordsParser.RECORDS_LABELLED_BPP,
                    shuffle_buffer_size=1,
                    batch_size=self.BATCH_SIZE,
                    num_preprocessing_threads=self.NUM_PREPROCESSING_THREADS,
                    num_repeat=1,
                    preprocessing_fn=CompressionPreprocessing.
                    preprocess_image_with_identity,
                    preprocessing_kwargs={
                        'height': image_shape_compression[0],
                        'width': image_shape_compression[1],
                        'dtype_out': tf.uint8
                    },
                    drop_remainder=True,
                    compute_bpp=False,
                    shuffle=False,
                    dtype_out=tf.uint8)

                images, labels, bpp = ip.next_batch()

                image_batches.append(images)
                labels_batches.append(labels)
                bpp_batches.append(bpp)

            # compression + inference op
            self.logger.info('* inference')

            predictions_per_compression = []

            # inference kwargs
            if self._dataset_name == Imagenet.NAME:

                def inference_kwargs(**kwargs):
                    return dict(graph=kwargs['graph'])
            else:

                def inference_kwargs(**kwargs):
                    return dict(
                        arg_scope=cnn_model.arg_scope(weight_decay=float(0)),
                        is_training=False,
                        return_predictions=True,
                        reuse=True if kwargs['j'] > 0 else False)

            for j, image_batch_compressed in enumerate(image_batches):
                with tf.name_scope('inference_bpg{}'.format(j)):
                    # crop center
                    image_batch_compressed = tf.image.resize_image_with_crop_or_pad(
                        image_batch_compressed, image_shape_classification[0],
                        image_shape_classification[1])

                    # standardize appropriately
                    image_batch_for_inference = cnn_model.standardize_tensor(
                        image_batch_compressed)

                    # predict
                    preds = cnn_model.inference(
                        image_batch_for_inference,
                        **inference_kwargs(graph=graph, j=j))
                    predictions_per_compression.append(preds)

            # aggregate
            predictions_per_compression_op = tf.stack(
                predictions_per_compression, axis=0)
            self.logger.info('predictions_shape: {}'.format(
                predictions_per_compression_op.get_shape().as_list()))

            # restore
            if self._dataset_name == Imagenet.NAME:
                classifier_saver = None
            else:
                classifier_saver = tf.train.Saver(
                    var_list=cnn_model.model_variables())

        graph.finalize()

        with tf.Session(config=get_sess_config(allow_growth=False),
                        graph=graph) as sess:

            if classifier_saver is not None:
                classifier_saver.restore(sess, ckpt_path)

            labels_all_comp_values = [list() for _ in range(self._num_records)]
            predictions_all_comp_values = [
                list() for _ in range(self._num_records)
            ]
            bpp_all_comp_values = [list() for _ in range(self._num_records)]
            n_images_processed = 0
            n_images_processed_per_second = deque(10 * [0.0], 10)
            progress(
                n_images_processed, self._dataset.NUM_VAL,
                '{}/{} images processed'.format(n_images_processed,
                                                self._dataset.NUM_VAL))

            try:
                while True:
                    batch_start_time = time.time()

                    # run inference
                    (batch_predictions_all_comp_values,
                     batch_label_all_comp_values,
                     batch_bpp_all_comp_values) = sess.run([
                         predictions_per_compression_op, labels_batches,
                         bpp_batches
                     ])

                    # collect predictions
                    for comp_level, (preds_comp, bpp_comp,
                                     labels_comp) in enumerate(
                                         zip(batch_predictions_all_comp_values,
                                             batch_bpp_all_comp_values,
                                             batch_label_all_comp_values)):
                        predictions_all_comp_values[comp_level].append(
                            preds_comp)
                        bpp_all_comp_values[comp_level].append(bpp_comp)
                        labels_all_comp_values[comp_level].append(
                            self.to_categorical(labels_comp,
                                                cnn_model.num_classes))

                    n_images_processed += len(batch_label_all_comp_values[0])
                    n_images_processed_per_second.append(
                        len(batch_label_all_comp_values[0]) /
                        (time.time() - batch_start_time))

                    progress(n_images_processed,
                             self._dataset.NUM_VAL,
                             status='{}/{} images processed ({} img/s)'.format(
                                 n_images_processed, self._dataset.NUM_VAL,
                                 np.mean([
                                     t for t in n_images_processed_per_second
                                 ])))

            except tf.errors.OutOfRangeError:
                self.logger.info(
                    'reached end of dataset; processed {} images'.format(
                        n_images_processed))

            except KeyboardInterrupt:
                self.logger.info(
                    'manual interrupt; processed {}/{} images'.format(
                        n_images_processed, Imagenet.NUM_VAL))
                return [(np.nan, np.nan) for _ in range(self._num_records)
                        ], [np.nan for _ in range(self._num_records)]

        labels_all_comp_values = [
            np.concatenate(labels_comp_values, axis=0)
            for labels_comp_values in labels_all_comp_values
        ]
        bpp_all_comp_values = [
            np.mean(np.concatenate(bpp_values, 0))
            for bpp_values in bpp_all_comp_values
        ]
        predictions_all_comp_values = [
            np.concatenate(preds_comp_values, axis=0)
            for preds_comp_values in predictions_all_comp_values
        ]

        accuracies = [(self.top_k_accuracy(labels_values, preds_comp_values,
                                           1),
                       self.top_k_accuracy(labels_values, preds_comp_values,
                                           5))
                      for preds_comp_values, labels_values in zip(
                          predictions_all_comp_values, labels_all_comp_values)]

        return accuracies, bpp_all_comp_values