コード例 #1
0
def _build_detection_graph(input_type, detection_model, input_shape,
                           output_collection_name, graph_hook_fn, quantize):
    """Build the detection graph."""
    if input_type not in input_placeholder_fn_map:
        raise ValueError('Unknown input type: {}'.format(input_type))
    placeholder_args = {}
    if input_shape is not None:
        if input_type != 'image_tensor':
            raise ValueError('Can only specify input shape for `image_tensor` '
                             'inputs.')
        placeholder_args['input_shape'] = input_shape
    placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type](
        **placeholder_args)
    outputs = _get_outputs_from_inputs(
        input_tensors=input_tensors,
        detection_model=detection_model,
        output_collection_name=output_collection_name)

    # Add global step to the graph.
    slim.get_or_create_global_step()

    if graph_hook_fn: graph_hook_fn()

    if quantize:
        from tensorflow.contrib.quantize import experimental_create_eval_graph
        experimental_create_eval_graph()
        # g = tf.get_default_graph()
        # print(g.get_operations())

    return outputs, placeholder_tensor
コード例 #2
0
    def _check_quantization(self, model_fn):
        checkpoint_dir = self.get_temp_dir()

        with tf.Graph().as_default() as training_graph:
            model_fn(is_training=True)
            contrib_quantize.experimental_create_training_graph(training_graph)
            with self.session(graph=training_graph) as sess:
                sess.run(tf.global_variables_initializer())
                tf.train.Saver().save(sess, checkpoint_dir)

        with tf.Graph().as_default() as eval_graph:
            model_fn(is_training=False)
            contrib_quantize.experimental_create_eval_graph(eval_graph)
            with self.session(graph=eval_graph) as sess:
                tf.train.Saver().restore(sess, checkpoint_dir)
コード例 #3
0
    def graph_rewrite_fn():
        """Function to quantize weights and activation of the default graph."""
        if (graph_rewriter_config.quantization.weight_bits != 8
                or graph_rewriter_config.quantization.activation_bits != 8):
            raise ValueError('Only 8bit quantization is supported')

        # Quantize the graph by inserting quantize ops for weights and activations
        if is_training:
            contrib_quantize.experimental_create_training_graph(
                input_graph=tf.get_default_graph(),
                quant_delay=graph_rewriter_config.quantization.delay)
        else:
            contrib_quantize.experimental_create_eval_graph(
                input_graph=tf.get_default_graph())
        slim.summarize_collection('quant_vars')
コード例 #4
0
 def model_func(image):
     K.set_learning_phase(mode == "train")
     with tf_compat.forward_compatibility_horizon(2019, 6, 5):
         m = tf.keras.models.clone_model(model, input_tensors=image)
         if quant_type != "none":
             with tf.variable_scope('quants', reuse=tf.AUTO_REUSE):
                 if mode == "train":
                     tf_quantize.experimental_create_training_graph(
                         tf.get_default_graph(),
                         quant_delay=quant_delay,
                         quant_type=quant_type)
                 else:
                     tf_quantize.experimental_create_eval_graph(
                         tf.get_default_graph(), quant_type=quant_type)
     return m
コード例 #5
0
def main(_):
    if not FLAGS.output_file:
        raise ValueError(
            'You must supply the path to save to with --output_file')
    if FLAGS.is_video_model and not FLAGS.num_frames:
        raise ValueError(
            'Number of frames must be specified for video models with --num_frames'
        )
    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default() as graph:
        dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
                                              FLAGS.dataset_dir)
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            num_classes=(dataset.num_classes - FLAGS.labels_offset),
            is_training=FLAGS.is_training)
        image_size = FLAGS.image_size or network_fn.default_image_size
        num_channels = 1 if FLAGS.use_grayscale else 3
        if FLAGS.is_video_model:
            input_shape = [
                FLAGS.batch_size, FLAGS.num_frames, image_size, image_size,
                num_channels
            ]
        else:
            input_shape = [
                FLAGS.batch_size, image_size, image_size, num_channels
            ]
        placeholder = tf.placeholder(name='input',
                                     dtype=tf.float32,
                                     shape=input_shape)
        network_fn(placeholder)

        if FLAGS.quantize:
            #contrib_quantize.create_eval_graph()
            contrib_quantize.experimental_create_eval_graph(symmetric=True,
                                                            weight_bits=8,
                                                            activation_bits=8)

        graph_def = graph.as_graph_def()
        if FLAGS.write_text_graphdef:
            tf.io.write_graph(graph_def,
                              os.path.dirname(FLAGS.output_file),
                              os.path.basename(FLAGS.output_file),
                              as_text=True)
        else:
            with gfile.GFile(FLAGS.output_file, 'wb') as f:
                f.write(graph_def.SerializeToString())
コード例 #6
0
    def graph_rewrite_fn():
        """Function to quantize weights and activation of the default graph."""
        if (graph_rewriter_config.quantization.weight_bits != 8
                or graph_rewriter_config.quantization.activation_bits != 8):
            raise ValueError('Only 8bit quantization is supported')

        graph = tf.get_default_graph()

        # Insert custom quant ops.
        if quant_overrides_config is not None:
            input_to_ops_map = input_to_ops.InputToOps(graph)
            for q in quant_overrides_config.quant_configs:
                producer = graph.get_operation_by_name(q.op_name)
                if producer is None:
                    raise ValueError('Op name does not exist in graph.')
                context = _get_context_from_op(producer)
                consumers = input_to_ops_map.ConsumerOperations(producer)
                if q.fixed_range:
                    _insert_fixed_quant_op(
                        context,
                        q.quant_op_name,
                        producer,
                        consumers,
                        init_min=q.min,
                        init_max=q.max,
                        quant_delay=q.delay if is_training else 0)
                else:
                    raise ValueError('Learned ranges are not yet supported.')

        # Quantize the graph by inserting quantize ops for weights and activations
        if is_training:
            contrib_quantize.experimental_create_training_graph(
                input_graph=graph,
                quant_delay=graph_rewriter_config.quantization.delay,
                freeze_bn_delay=graph_rewriter_config.quantization.delay)
        else:
            contrib_quantize.experimental_create_eval_graph(
                input_graph=graph,
                quant_delay=graph_rewriter_config.quantization.delay
                if not is_export else 0)

        contrib_layers.summarize_collection('quant_vars')
コード例 #7
0
def _extract_anchros_and_losses(model,
                                create_input_dict_fn,
                                quantize=False,
                                ignore_groundtruth=False):
    """Constructs tensorflow detection graph and returns output tensors.

  Args:
    model: model to perform predictions with.
    create_input_dict_fn: function to create input tensor dictionaries.
    ignore_groundtruth: whether groundtruth should be ignored.

  Returns:
    prediction_groundtruth_dict: A dictionary with postprocessed tensors (keyed
      by standard_fields.DetectionResultsFields) and optional groundtruth
      tensors (keyed by standard_fields.InputDataFields).
    losses_dict: A dictionary containing detection losses. This is empty when
      ignore_groundtruth is true.
  """
    input_dict = create_input_dict_fn()
    prefetch_queue = prefetcher.prefetch(input_dict, capacity=500)
    input_dict = prefetch_queue.dequeue()
    original_image = tf.expand_dims(input_dict[fields.InputDataFields.image],
                                    0)
    preprocessed_image, true_image_shapes = model.preprocess(
        tf.to_float(original_image))
    prediction_dict = model.predict(preprocessed_image, true_image_shapes)
    detections = model.postprocess(prediction_dict, true_image_shapes)

    if quantize:
        from tensorflow.contrib.quantize import experimental_create_eval_graph
        experimental_create_eval_graph()
        # g = tf.get_default_graph()
        # print(g.get_operations())

    groundtruth = None
    losses_dict = {}
    if not ignore_groundtruth:
        groundtruth = {
            fields.InputDataFields.groundtruth_boxes:
            input_dict[fields.InputDataFields.groundtruth_boxes],
            fields.InputDataFields.groundtruth_classes:
            input_dict[fields.InputDataFields.groundtruth_classes],
            fields.InputDataFields.groundtruth_area:
            input_dict[fields.InputDataFields.groundtruth_area],
            fields.InputDataFields.groundtruth_is_crowd:
            input_dict[fields.InputDataFields.groundtruth_is_crowd],
            fields.InputDataFields.groundtruth_difficult:
            input_dict[fields.InputDataFields.groundtruth_difficult]
        }
        if fields.InputDataFields.groundtruth_group_of in input_dict:
            groundtruth[fields.InputDataFields.groundtruth_group_of] = (
                input_dict[fields.InputDataFields.groundtruth_group_of])
        if fields.DetectionResultFields.detection_masks in detections:
            groundtruth[fields.InputDataFields.groundtruth_instance_masks] = (
                input_dict[fields.InputDataFields.groundtruth_instance_masks])
        label_id_offset = 1
        model.provide_groundtruth(
            [input_dict[fields.InputDataFields.groundtruth_boxes]], [
                tf.one_hot(
                    input_dict[fields.InputDataFields.groundtruth_classes] -
                    label_id_offset,
                    depth=model.num_classes)
            ])
        losses_dict.update(model.loss(prediction_dict, true_image_shapes))

    result_dict = eval_util.result_dict_for_single_example(
        original_image,
        input_dict[fields.InputDataFields.source_id],
        detections,
        groundtruth,
        class_agnostic=(fields.DetectionResultFields.detection_classes
                        not in detections),
        scale_to_absolute=True)

    # model.preprocess
    result_dict['preprocessed_image'] = preprocessed_image
    result_dict['true_image_shapes'] = true_image_shapes

    # model.predict
    result_dict['class_predictions_with_background'] = (
        prediction_dict['class_predictions_with_background'])
    result_dict['feature_maps'] = prediction_dict['feature_maps']
    result_dict['preprocessed_inputs'] = prediction_dict['preprocessed_inputs']
    result_dict['box_encodings'] = prediction_dict['box_encodings']
    result_dict['anchors'] = prediction_dict['anchors']

    # model.detections DEBUG ONLY
    # result_dict['detection_boxes_all'] = detections['detection_boxes_all']

    # print('YMK in _extract_anchros_and_losses')
    # import ipdb
    # ipdb.set_trace()
    return result_dict, losses_dict
コード例 #8
0
if args.train:
    quantize.experimental_create_training_graph(weight_bits=8,
                                                activation_bits=8,
                                                quant_delay=5000)

    model.compile(optimizer=tf.keras.optimizers.Adam(),
                  loss=tf.keras.losses.binary_crossentropy,
                  metrics=['accuracy'])
    sess.run(tf.global_variables_initializer())
    model.fit(data, labels, epochs=10000)
    tf.train.Saver().save(sess, checkpoint_prefix)

    test_loss, test_acc = model.evaluate(data, labels)

    print('Test accuracy:', test_acc)
else:
    quantize.experimental_create_eval_graph(weight_bits=8, activation_bits=8)
    tf.train.Saver().restore(sess, checkpoint_prefix)
    print('Prediction:', model.predict(data))

if args.dump_variables:
    variables = {}
    for tf_variable in tf.global_variables():
        variables[tf_variable.name] = sess.run(tf_variable)
    with open(args.dump_variables, 'w') as f:
        json.dump(variables,
                  f,
                  ensure_ascii=False,
                  default=serialization.get_json_type)
コード例 #9
0
ファイル: freeze.py プロジェクト: 1opc/USTC_SSE_AI
x = ReLU(name='relu1')(x)
x = Conv2D(64, (3, 3), name='conv2')(x)
x = ReLU(name='relu2')(x)
x = MaxPooling2D(pool_size=(2, 2), name='maxpool')(x)
x = Flatten(name='flatten')(x)
x = Dense(128)(x)
x = ReLU(name='relu3')(x)
output_tensor = Dense(10, name='output_tensor')(x)

model = Model(inputs=input_tensor, outputs=output_tensor)

sess = K.get_session()

# add fake quantize ops to the eval garph
experimental_create_eval_graph(input_graph=sess.graph,
                               weight_bits=8,
                               activation_bits=8)

# check if add the fake quantize ops successfully
for node in sess.graph.as_graph_def().node:
    if 'AssignMaxLast' in node.name or 'AssignMinLast' in node.name:
        print('node name: {}'.format(node.name))

# load the quantize-aware trained model weights
saver = tf.train.Saver()
saver.restore(sess, './models/quant_aware_trained/model.ckpt')

# freeze the graph
# 转变量为常量
const_graph = tf.graph_util.convert_variables_to_constants(
    sess=sess,