示例#1
0
def _run_function_for_calibration_graph_mode(
        sess: session.Session, signature_def: meta_graph_pb2.SignatureDef,
        representative_dataset: repr_dataset.RepresentativeDataset) -> None:
    """Runs the representative dataset through a function for calibration.

  NOTE: This is intended to be run in graph mode (TF1).

  The function is identified by the SignatureDef.

  Args:
    sess: The Session object to run the function in.
    signature_def: A SignatureDef that identifies a function by specifying the
      inputs and outputs.
    representative_dataset: The representative dataset to run through the
      function.
  """
    output_tensor_names = [
        output_tensor_info.name
        for output_tensor_info in signature_def.outputs.values()
    ]

    sample_validator = _create_sample_validator(
        expected_input_keys=signature_def.inputs.keys())
    for sample in map(sample_validator, representative_dataset):
        # Create a mapping from input tensor name to the input tensor value.
        # ex) "Placeholder:0" -> [0, 1, 2]
        feed_dict = _create_feed_dict_from_input_data(sample, signature_def)
        sess.run(output_tensor_names, feed_dict=feed_dict)
def copy_variable_to_graph(org_instance, to_graph, scope=""):
    """Given a `Variable` instance from one `Graph`, initializes and returns
    a copy of it from another `Graph`, under the specified scope
    (default `""`).

    Args:
    org_instance: A `Variable` from some `Graph`.
    to_graph: The `Graph` to copy the `Variable` to.
    scope: A scope for the new `Variable` (default `""`).

    Returns:
        The copied `Variable` from `to_graph`.

    Raises:
        TypeError: If `org_instance` is not a `Variable`.
    """

    if not isinstance(org_instance, Variable):
        raise TypeError(str(org_instance) + " is not a Variable")

    #The name of the new variable
    if scope != "":
        new_name = (scope + '/' +
                    org_instance.name[:org_instance.name.index(':')])
    else:
        new_name = org_instance.name[:org_instance.name.index(':')]

    #Get the collections that the new instance needs to be added to.
    #The new collections will also be a part of the given scope,
    #except the special ones required for variable initialization and
    #training.
    collections = []
    for name, collection in org_instance.graph._collections.items():
        if org_instance in collection:
            if (name == ops.GraphKeys.VARIABLES
                    or name == ops.GraphKeys.TRAINABLE_VARIABLES
                    or scope == ''):
                collections.append(name)
            else:
                collections.append(scope + '/' + name)

    #See if its trainable.
    trainable = (org_instance in org_instance.graph.get_collection(
        ops.GraphKeys.TRAINABLE_VARIABLES))
    #Get the initial value
    with org_instance.graph.as_default():
        temp_session = Session()
        init_value = temp_session.run(org_instance.initialized_value())

    #Initialize the new variable
    with to_graph.as_default():
        new_var = Variable(init_value,
                           trainable,
                           name=new_name,
                           collections=collections,
                           validate_shape=False)

    return new_var
示例#3
0
def copy_variable_to_graph(org_instance, to_graph, scope=''):
  """Given a `Variable` instance from one `Graph`, initializes and returns
  a copy of it from another `Graph`, under the specified scope
  (default `""`).

  Args:
    org_instance: A `Variable` from some `Graph`.
    to_graph: The `Graph` to copy the `Variable` to.
    scope: A scope for the new `Variable` (default `""`).

  Returns:
    The copied `Variable` from `to_graph`.

  Raises:
    TypeError: If `org_instance` is not a `Variable`.
  """

  if not isinstance(org_instance, Variable):
    raise TypeError(str(org_instance) + ' is not a Variable')

  #The name of the new variable
  if scope != '':
    new_name = (scope + '/' + org_instance.name[:org_instance.name.index(':')])
  else:
    new_name = org_instance.name[:org_instance.name.index(':')]

  #Get the collections that the new instance needs to be added to.
  #The new collections will also be a part of the given scope,
  #except the special ones required for variable initialization and
  #training.
  collections = []
  for name, collection in org_instance.graph._collections.items():
    if org_instance in collection:
      if (name == ops.GraphKeys.GLOBAL_VARIABLES or
          name == ops.GraphKeys.TRAINABLE_VARIABLES or scope == ''):
        collections.append(name)
      else:
        collections.append(scope + '/' + name)

  #See if it's trainable.
  trainable = (
      org_instance in org_instance.graph.get_collection(
          ops.GraphKeys.TRAINABLE_VARIABLES))
  #Get the initial value
  with org_instance.graph.as_default():
    temp_session = Session()
    init_value = temp_session.run(org_instance.initialized_value())

  #Initialize the new variable
  with to_graph.as_default():
    new_var = Variable(
        init_value,
        trainable,
        name=new_name,
        collections=collections,
        validate_shape=False)

  return new_var
def create_resource_split_graph(
        sess: Session,
        input_value: Any,
        input_dtype: Any,
        num_outputs: int,
        num_splits: List[int],
        paddings: Optional[List[int]] = None) -> List[Tensor]:
    variable = resource_variable_ops.ResourceVariable(
        initial_value=input_value, dtype=input_dtype)
    sess.run(variables.variables_initializer([variable]))
    return gen_tpu_ops.read_variable_xla_split_nd(variable.handle,
                                                  input_dtype,
                                                  num_outputs,
                                                  num_splits,
                                                  paddings=paddings)
    def __init__(self, keras_model_path, inputshape, in_nodes, dest_nodes):
        if LooseVersion(tensorflow.__version__) < LooseVersion('1.8.0'):
            raise ImportError(
                'Your TensorFlow version %s is outdated. '
                'MMdnn requires tensorflow>=1.8.0' % tensorflow.__version__)

        super(TensorflowParser2, self).__init__()
        self.weight_loaded = True

        import tensorflow as tf
        from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
        model = tf.keras.models.load_model(keras_model_path, compile=False)
        full_model = tf.function(lambda x: model(x))
        full_model = full_model.get_concrete_function(tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))
        frozen_func = convert_variables_to_constants_v2(full_model)
        frozen_func.graph.as_graph_def()
        g = frozen_func.graph

        from tensorflow.python.client.session import Session
        from tensorflow.python.training.saver import export_meta_graph
        with Session(graph = g) as sess:
            tempdir = tempfile.mkdtemp()
            meta_graph_def = export_meta_graph(filename=os.path.join(tempdir, 'my-model.meta'))
            model = meta_graph_def.graph_def
            shutil.rmtree((tempdir))

        self.tf_graph = TensorflowGraph(model)
        self.tf_graph.build()
示例#6
0
 def config_tensorflow():
     config = ConfigProto(
         gpu_options=GPUOptions(per_process_gpu_memory_fraction=0.8)
     )
     config.gpu_options.allow_growth = True
     session = Session(config=config)
     set_session(session)
 def test_run_cond_tf(self):
   true_fn = lambda: (constant(2),)
   false_fn = lambda: (constant(3),)
   with Session() as sess:
     out = multiple_dispatch.run_cond(constant(True), true_fn, false_fn)
     self.assertEqual(sess.run(out), 2)
     out = multiple_dispatch.run_cond(constant(False), true_fn, false_fn)
     self.assertEqual(sess.run(out), 3)
def create_resource_roundtrip_graph(
        sess: Session,
        value: Any,
        dtype: Any,
        num_partitions: List[int],
        paddings: Optional[List[int]] = None) -> Tensor:
    variable = resource_variable_ops.ResourceVariable(initial_value=value,
                                                      dtype=dtype)
    sess.run(variables.variables_initializer([variable]))
    split = gen_tpu_ops.read_variable_xla_split_nd(variable.handle,
                                                   dtype,
                                                   np.prod(num_partitions),
                                                   num_partitions,
                                                   paddings=paddings)
    concat = gen_tpu_ops.assign_variable_xla_concat_nd(variable.handle, split,
                                                       num_partitions,
                                                       paddings)
    with control_dependencies([concat]):
        return math_ops.equal(variable.read_value(),
                              constant_op.constant(value, dtype=dtype))
def create_resource_concat_graph(
        sess: Session,
        input_values: List[Any],
        input_dtype: Any,
        num_concats: List[int],
        paddings: Optional[List[int]] = None,
        output_shape: Optional[List[int]] = None) -> Tensor:
    variable_shape = [] if output_shape is None else output_shape
    variable = resource_variable_ops.ResourceVariable(initial_value=np.zeros(
        variable_shape, dtype=input_dtype),
                                                      dtype=input_dtype)
    sess.run(variables.variables_initializer([variable]))
    const_input_ops = [
        constant_op.constant(i, dtype=input_dtype) for i in input_values
    ]
    concat = gen_tpu_ops.assign_variable_xla_concat_nd(variable.handle,
                                                       const_input_ops,
                                                       num_concats, paddings)
    with control_dependencies([concat]):
        return variable.read_value()
示例#10
0
    def test_run_while_tf(self):
        cond_fn = lambda x, t, s: x > t
        body_fn = lambda x, t, s: (x * s, t, s)

        with Session() as sess:
            x, _, _ = multiple_dispatch.run_while(cond_fn, body_fn,
                                                  [constant(3.0), 1.0, 0.5])
            self.assertEqual(sess.run(x), 0.75)

            x, _, _ = multiple_dispatch.run_while(cond_fn, body_fn,
                                                  [constant(3.0), 4.0, 0.5])
            self.assertEqual(sess.run(x), 3.0)
    def __init__(self, tarball_path):
        # """Creates and loads pretrained deeplab model."""
        self.graph = tf.Graph()
        graph_def = None
        # Extract frozen graph from tar archive.
        tar_file = tarfile.open(tarball_path)
        for tar_info in tar_file.getmembers():
            if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
                file_handle = tar_file.extractfile(tar_info)
                graph_def = GraphDef.FromString(file_handle.read())
                break

        tar_file.close()

        if graph_def is None:
            raise RuntimeError('Cannot find inference graph in tar archive.')

        with self.graph.as_default():
            tf.import_graph_def(graph_def, name = '')

        self.sess = Session(graph = self.graph)
示例#12
0
def test_hash(hash_func):
    # random_str = generate_random_ascii_str()
    random_str = []
    with open(RANDOM_STRING_FILE, 'rb') as f:
        for line in f:
            random_str.append(line.strip())
    nums = len(random_str)
    num_batches = int(nums / BATCH_SIZE)

    # result = hash_method(input=random_str, num_buckets=HASH_SIZE, name="test_hash")
    print("Num of random string is {}".format(len(random_str)))
    print("Hash size is {}".format(HASH_SIZE))
    print("HASH_SIZE / NUMS_RANDOM_STR is {}".format(HASH_SIZE / nums))

    x_input_str = placeholder(dtype=tf_string,
                              shape=[
                                  None,
                              ],
                              name='input_str')
    result = hash_func(input=x_input_str,
                       num_buckets=HASH_SIZE,
                       name="test_hash")

    RESULT = []
    sess = Session()
    t0 = time.time()
    for i in xrange(num_batches):
        if i != num_batches - 1:
            batch_i = random_str[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
        else:
            batch_i = random_str[i * BATCH_SIZE:]
        result_i = sess.run(result, feed_dict={x_input_str: batch_i})
        RESULT += result_i.tolist()

    cost_t = time.time() - t0
    conflict_nums = len(RESULT) - len(set(RESULT))
    print("Calculate time is {}s".format(cost_t))
    print("Conflict nums is {}".format(conflict_nums))
    print("Conflict rate is {}".format(conflict_nums / len(RESULT)))
    sess.close()
示例#13
0
class DetectionApiNmsPerformer(NmsPerformerBase):
    def __init__(self):
        self.sess = None
        self.config = ConfigProto()
        self.config.gpu_options.allow_growth = True

    def create_session(self):
        """
        create tensorflow session
        """

        self.sess = Session(config=self.config)

    def nms_single_class(self, boxes, scores, input_metadata):
        """ see NmsPerformerBase """
        # define nms function params
        # This is not supposed to be in the request! we insert a default value for safety.
        score_thresh = SCORE_THRESH
        iou_thresh = input_metadata.get('nmsThresh', 0.6)
        max_output_size = boxes.shape[0]

        # expand to adapt to expected input dimensions
        boxes = boxes[:, np.newaxis, :]
        scores = scores[:, np.newaxis]

        # convert to tensors
        boxes = tf.constant(boxes, tf.float32)
        scores = tf.constant(scores, tf.float32)

        # create nms graph node
        nms_node = multiclass_non_max_suppression(boxes, scores, score_thresh,
                                                  iou_thresh, max_output_size)

        # run nms evaluation
        self.sess.run(nms_node.get())
        nms_boxes = nms_node.data['boxes'].eval(session=self.sess)
        nms_scores = nms_node.data['scores'].eval(session=self.sess)

        return nms_boxes, nms_scores
示例#14
0
 def test_dynamic_is_tf(self):
   with Session().as_default():
     a = constant([2.0])
     also_a = a
     not_actually_a = constant([2.0])
     should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
     should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
     should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
     should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
     self.assertTrue(should_be_true1)
     self.assertTrue(should_be_true2)
     self.assertFalse(should_be_false1)
     self.assertFalse(should_be_false2)
示例#15
0
def _default_session():
    """ Returns the default session or a newly created session

    If no default session is available, creates a new session.

    Returns:
        ``Session``: returns the default session if available or a newly created session otherwise.

    """
    session = ops.get_default_session()
    if session is None:
        session = Session()
    return session
class DeepLabModel(object):
    """Class to load deeplab model and run inference."""

    INPUT_TENSOR_NAME = 'ImageTensor:0'
    OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
    INPUT_SIZE = 513
    FROZEN_GRAPH_NAME = 'frozen_inference_graph'

    def __init__(self, tarball_path):
        # """Creates and loads pretrained deeplab model."""
        self.graph = tf.Graph()
        graph_def = None
        # Extract frozen graph from tar archive.
        tar_file = tarfile.open(tarball_path)
        for tar_info in tar_file.getmembers():
            if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
                file_handle = tar_file.extractfile(tar_info)
                graph_def = GraphDef.FromString(file_handle.read())
                break

        tar_file.close()

        if graph_def is None:
            raise RuntimeError('Cannot find inference graph in tar archive.')

        with self.graph.as_default():
            tf.import_graph_def(graph_def, name = '')

        self.sess = Session(graph = self.graph)

    def run(self, image):
        """Runs inference on a single image.

        Args:
          image: A PIL.Image object, raw input image.

        Returns:
          resized_image: RGB image resized from original input image.
          seg_map: Segmentation map of `resized_image`.
        """
        width, height = image.size
        resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
        target_size = (int(resize_ratio * width), int(resize_ratio * height))
        resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
        batch_seg_map = self.sess.run(
            self.OUTPUT_TENSOR_NAME,
            feed_dict = {self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
        seg_map = batch_seg_map[0]
        return resized_image, seg_map
示例#17
0
    def end(self, session: session_lib.Session):
        if self._save_thread:
            logging.info("Waiting for any pending checkpoints to finish.")
            self._save_thread.join()
        if self._write_graph_thread:
            logging.info("Waiting for any pending write_graph to finish.")
            self._write_graph_thread.join()

        last_step = session.run(self._global_step_tensor)

        if self._last_checkpoint_step != last_step:
            self._save(session, last_step, asynchronous=False)

        for l in self._listeners:
            l.end(session, last_step)
示例#18
0
 def prepare_session(self, master, checkpoint_dir=None, saver=None, config=None, **_):
     logger = get_logger()
     logger.info('prepare_session')
     session = Session(master, graph=self._graph, config=config)
     self._session_init_fn(session)
     if saver and checkpoint_dir:
         ckpt = get_checkpoint_state(checkpoint_dir)
         if ckpt and ckpt.model_checkpoint_path:  # pylint: disable=no-member
             logger.info('restoring from %s',
                         ckpt.model_checkpoint_path)  # pylint: disable=no-member
             saver.restore(session, ckpt.model_checkpoint_path)  # pylint: disable=no-member
             saver.recover_last_checkpoints(
                 ckpt.all_model_checkpoint_paths)  # pylint: disable=no-member
         else:
             logger.info('no valid checkpoint in %s', checkpoint_dir)
     return session
示例#19
0
 def test_list_append_tf(self):
   a = constant(3.0)
   l = tl.TensorList(a.shape, a.dtype)
   l.append(a)
   c1 = l.count()
   l.append(a)
   c2 = l.count()
   _ = l.pop()
   c3 = l.count()
   a2 = l.pop()
   c4 = l.count()
   with Session() as sess:
     c1, c2, c3, c4, a, a2 = self.evaluate([c1, c2, c3, c4, a, a2])
     self.assertEqual(c1, 1)
     self.assertEqual(c2, 2)
     self.assertEqual(c3, 1)
     self.assertEqual(c4, 0)
     self.assertEqual(a, a2)
示例#20
0
    def train(self):
        learning_rate = self._learning_rate
        init_learning_rate = self._init_learning_rate

        logits, labels = self.model()
        print('labels: ', labels)
        print('logits: ', logits)
        equals = tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(logits, axis=1)), dtype=tf.float32)
        accuracy = tf.reduce_mean(equals)

        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits))

        optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        train_op = optimizer.minimize(loss)

        with Session() as sess:
            import time
            sess.run(tf.global_variables_initializer())
            tmp_learning_rate = init_learning_rate

            feed_dict = {self._is_training: True, self._batch_size:128}
            sess.run([self._train_init_op.initializer, self._test_init_op.initializer], feed_dict=feed_dict)

            start_time = time.time()
            for iteration in range(100000):
                if iteration % 25000 == 0 or iteration % 50000 == 0 or iteration % 75000 == 0:
                    tmp_learning_rate *= 0.1

                feed_dict = {self._is_training: True, learning_rate: tmp_learning_rate, self._batch_size: 128}
                _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
                train_accuracy_value = sess.run([accuracy], feed_dict=feed_dict)

                if iteration % 1000 == 0:
                    sess.run(self._test_init_op.initializer)

                    feed_dict = {self._is_training: False, learning_rate: tmp_learning_rate, self._batch_size: 10000}

                    test_accuracy_value = sess.run([accuracy], feed_dict=feed_dict)
                    print('iteration: %d, learning rate: %f, loss: %f, train accuracy: %f, test accuracy: %f' %
                          (iteration, tmp_learning_rate, loss_value, train_accuracy_value[0], test_accuracy_value[0]))

            print('total time: %d' % (time.time() - start_time))
示例#21
0
def check_float_graph(input_graph_def, input_fn, q_config, s_config):
    """Check if float graph and input_fn is validate before quantization"""
    graph = ops.Graph()
    with graph.as_default():
        importer.import_graph_def(input_graph_def, name='')
        print("INFO: Checking Float Graph...")
        input_tensors = [
            op.outputs[0] for op in graph.get_operations()
            if op.type == 'Placeholder'
        ]
        output_tensors = [
            graph.get_tensor_by_name(name + ':0')
            for name in gen_quantized_node_names(graph, q_config.output_nodes)
        ]

        with Session(graph=graph, config=s_config) as sess:
            inputs = input_fn(iter=0)
            feed_dict = gen_feed_dict(input_tensors, inputs)
            sess.run(output_tensors, feed_dict)
    print("INFO: Float Graph Check Done.")
示例#22
0
 def setUp(self):
     super(QuantizeTest, self).setUp()
     with Session(graph=Graph()) as session:
         with session.graph.as_default():
             # initial [-1, 1) random matrix
             x = constant(2 * random((1, 4096)) - 1,
                          dtype=float32,
                          name='c1')
             y = constant(2 * random((4096, 1)) - 1,
                          dtype=float32,
                          name='c2')
             # matmul to scalar
             z = matmul(x, y, name='c3')
         self._desire_z = session.run(z)
         self._quantized_raw = quantize_graph_def(session.graph_def,
                                                  output_nodes=['c3'],
                                                  only='raw')
         self._quantized_simple = quantize_graph_def(session.graph_def,
                                                     only='simple')
         self._quantized_full = quantize_graph_def(session.graph_def)
示例#23
0
def calibrate_frozen(input_graph_def, input_fn, q_config, s_config):
    """Transform float graph to quantized graph and do calibration"""

    temp_path = os.path.join(q_config.output_dir, "temp")
    if not os.path.exists(temp_path):
        os.makedirs(temp_path)

    # Calibration
    calib_graph_def = CreateQuantizeCalibrationGraphDef(
        input_graph_def, q_config)
    graph = ops.Graph()
    with graph.as_default():
        importer.import_graph_def(calib_graph_def, name='')
        print("INFO: Calibrating for {} iterations...".format(
            q_config.calib_iter))
        input_tensors = [
            op.outputs[0] for op in graph.get_operations()
            if op.type == 'Placeholder'
        ]
        output_tensors = [
            graph.get_tensor_by_name(name + ':0')
            for name in gen_quantized_node_names(graph, q_config.output_nodes)
        ]
        with Session(graph=graph, config=s_config) as sess:
            progress = ProgressBar()
            for i in progress(range(0, q_config.calib_iter)):
                inputs = input_fn(iter=i)
                feed_dict = gen_feed_dict(input_tensors, inputs)
                sess.run(output_tensors, feed_dict)
    print("INFO: Calibration Done.")

    # Quantized Evaluation
    quantize_eval_graph_def = CreateQuantizeEvaluationGraphDef(
        calib_graph_def, q_config)
    save_pb_file(quantize_eval_graph_def,
                 os.path.join(q_config.output_dir, "quantize_eval_model.pb"))
    shutil.rmtree(temp_path)
    return quantize_eval_graph_def
示例#24
0
def deploy_checkpoint(input_meta_graph_def, input_checkpoint, q_config):

    if not checkpoint_management.checkpoint_exists(input_checkpoint):
        raise ValueError("Input checkpoint '" + input_checkpoint +
                         "' does not exits.")

    if gfile.IsDirectory(input_checkpoint):
        input_checkpoint = checkpoint_management.latest_checkpoint(
            input_checkpoint)

    if not os.path.exists(q_config.output_dir):
        os.makedirs(q_config.output_dir)

    quantize_eval_graph_def = None
    if input_meta_graph_def:
        quantize_eval_graph_def = input_meta_graph_def.graph_def
    else:
        raise ValueError("You need to provide a `MetaGraphDef` for deploy.")

    q_config.output_nodes = get_quantized_nodes(quantize_eval_graph_def,
                                                q_config.output_nodes)
    saver = saver_lib.import_meta_graph(input_meta_graph_def,
                                        clear_devices=True)
    with Session() as sess:
        saver.restore(sess, input_checkpoint)
        frozen_graph_def = graph_util.convert_variables_to_constants(
            sess, quantize_eval_graph_def, q_config.output_nodes)

    if not os.path.exists(os.path.join(q_config.output_dir, "deploy")):
        os.makedirs(os.path.join(q_config.output_dir, "deploy"))
    quantize_deploy_graph_def = CreateQuantizeDeployGraphDef(
        frozen_graph_def, q_config)
    save_pb_file(quantize_deploy_graph_def,
                 os.path.join(q_config.output_dir, "deploy/deploy_model.pb"))

    print("INFO: Quantize deploy graph are generated in: {}".format(
        os.path.join(q_config.output_dir, "deploy")))
    return
示例#25
0
    def after_create_session(self, session: session_lib.Session, coord: Any):
        global_step = session.run(self._global_step_tensor)

        # We do write graph and saver_def at the first call of before_run.
        # We cannot do this in begin, since we let other hooks to change graph and
        # add variables in begin. Graph is finalized after all begin calls.
        def _write_graph_fn(self):
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir, "graph.pbtxt")

        self._write_graph_thread = threading.Thread(target=_write_graph_fn,
                                                    args=[self])
        self._write_graph_thread.start()

        saver_def = self._get_saver().saver_def if self._get_saver() else None
        graph = ops.get_default_graph()
        meta_graph_def = meta_graph.create_meta_graph_def(
            graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
        self._summary_writer.add_graph(graph)
        self._summary_writer.add_meta_graph(meta_graph_def)
        # The checkpoint saved here is the state at step "global_step".
        self._save(session, global_step)
        self._timer.update_last_triggered_step(global_step)
示例#26
0
        batch_size = self._batch_size
        with tf.name_scope(name=name_scope):
            dataset = Dataset.from_tensor_slices((x, y))
            dataset = dataset.shuffle(buffer_size=batch_size * 10) \
                .repeat() \
                .map(map_func=map_fn, num_parallel_calls=8) \
                .batch(batch_size=batch_size) \
                .prefetch(buffer_size=batch_size * 10)

            init_op = dataset.make_initializable_iterator(shared_name='init_op')
            next_op = init_op.get_next(name='next_op')
        return init_op, next_op


if __name__=='__main__':
    is_training = tf.placeholder(tf.bool, shape=(), name='is_training')
    dataset = Cifar10Dataset(batch_size=128)
    train_init_op, train_next_op = dataset.get_train_dataset()
    test_init_op, test_next_op = dataset.get_test_dataset()
    print(train_next_op)
    print(test_next_op)
    input = cond(is_training, true_fn=lambda: train_next_op, false_fn=lambda: test_next_op, name='input')

    import time
    with Session() as sess:
        sess.run([train_init_op.initializer, test_init_op.initializer])
        start_time = time.time()
        for _ in range(1000):
            input_value=sess.run([input], feed_dict={is_training: True})
            print(input_value[0][0].shape)
        print('total time: %d' % (time.time() - start_time))
示例#27
0
import numpy as np
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from tensorflow.python.client.session import Session
from tensorflow.python.framework.ops import Graph
from .serializers import FormSubmitSerializer, MedicineDetectSerializer

MODEL_PATH = 'model/model_inception.h5'

model_graph = Graph()
with model_graph.as_default():
    tf_session = Session()
    with tf_session.as_default():
        model = load_model(MODEL_PATH)


@api_view(['POST'])
def formsubmit(request):

    serializer = FormSubmitSerializer(data=request.data)
    if serializer.is_valid():
        serializer.save()

    MedicineName = request.POST['MedicineName']
    Introduction = request.POST['Introduction']
    WhenToTake = request.POST['WhenToTake']
    SideEffects = request.POST['SideEffects']
示例#28
0
import gym
import numpy as np
from tensorflow.core.protobuf.config_pb2 import ConfigProto, GPUOptions
from tensorflow.python.client.session import Session
from tensorflow.python.keras.backend import set_session
from tensorflow.python.keras.models import load_model

if __name__ == "__main__":
    config = ConfigProto(gpu_options=GPUOptions(
        per_process_gpu_memory_fraction=0.8))
    config.gpu_options.allow_growth = True
    session = Session(config=config)
    set_session(session)

    model = load_model("working_model")

    env = gym.make("CartPole-v1")

    for t in range(200):

        state = np.reshape(env.reset(), [1, 4])

        step = 0
        while True:
            step += 1
            env.render()

            action = np.argmax(model.predict(state))
            observation, _, done, _ = env.step(action)
            observation = np.reshape(observation, [1, 4])
            state = observation
示例#29
0
def CreateQuantizeDeployGraph(graph=None, checkpoint='', config=None):
    """Python wrapper for the decent_q create deploy graph tool.

  Args:
    graph: the graph to be quantized, default graph will be used if set None.
    checkpoint: the checkpoint path
    config: the QuantizeConfig

  Returns:
    Transformed Graph(as default) for quantize deploy.
  """
    if config is None:
        raise ValueError("Please set the QuantizeConfig.")
    elif not isinstance(config, QuantizeConfig):
        raise ValueError("Config shoulb be a QuantizeConfig object.")

    # Create the output_dir
    if not os.path.exists(config.output_dir):
        try:
            os.makedirs(config.output_dir)
        except Exception as e:
            print(e)

    if graph is None:
        graph = get_default_graph()
    quantize_eval_graph_def = graph.as_graph_def()

    if os.path.isdir(checkpoint):
        checkpoint = checkpoint_management.latest_checkpoint(checkpoint)
    else:
        pass
    print("INFO: Creating quantize eval model from: {}".format(checkpoint))
    step_in_ckpt = checkpoint.rsplit("-")[-1]

    # Freeze the checkpoint into the graph
    config.output_nodes = get_quantized_nodes(quantize_eval_graph_def,
                                              config.output_nodes)
    saver = saver_lib.Saver()
    with Session() as sess:
        saver.restore(sess, checkpoint)
        frozen_graph_def = graph_util.convert_variables_to_constants(
            sess, quantize_eval_graph_def, config.output_nodes)

    # Convert folded batchnorms
    frozen_quantize_eval_graph_def = ConvertFoldedBatchnorms(
        frozen_graph_def, config)

    # Deploy
    quantize_deploy_graph_def = CreateQuantizeDeployGraphDef(
        frozen_quantize_eval_graph_def, config)

    # Save the model
    # for quantize finetune model, replace input node with placeholder
    # replaced_graph_def = frozen_quantize_eval_graph_def
    for target_node_name, shape in zip(config.input_nodes,
                                       config.input_shapes):
        frozen_quantize_eval_graph_def = SetInputNodesAsPlaceholder(
            frozen_quantize_eval_graph_def, target_node_name, shape)

    frozen_quantize_eval_path = os.path.join(
        config.output_dir, "quantize_eval_model_{}_{}.pb".format(
            step_in_ckpt, time.strftime("%Y%m%d%H%M%S", time.localtime())))
    save_pb_file(frozen_quantize_eval_graph_def, frozen_quantize_eval_path)
    print("INFO: Quantize eval model is generated in: {}".format(
        frozen_quantize_eval_path))

    deploy_path = os.path.join(
        config.output_dir, "deploy_model_{}_{}.pb".format(
            step_in_ckpt, time.strftime("%Y%m%d%H%M%S", time.localtime())))
    save_pb_file(quantize_deploy_graph_def, deploy_path)
    print("INFO: Deploy model is generated in: {}".format(deploy_path))
    return
示例#30
0
    def create_session(self):
        """
        create tensorflow session
        """

        self.sess = Session(config=self.config)
示例#31
0
def dump(input_graph_def,
         input_fn,
         output_dir,
         max_dump_batches,
         dump_float,
         s_config,
         dump_input_tensors=''):
    """Dump weights and activation data"""
    w_q_map = dict()
    a_q_map = dict()
    for node in input_graph_def.node:
        if node.op == "FixNeuron":
            if node.name.endswith("wquant"):
                w_q_map[node.name] = int(node.attr['quantize_pos'].i)
            else:
                a_q_map[node.name] = int(node.attr['quantize_pos'].i)

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    graph = ops.Graph()
    with graph.as_default():
        if dump_input_tensors:
            # TODO: Support multi input tensors
            image = array_ops.placeholder(dtypes.float32,
                                          shape=(None, None, None, 3),
                                          name="image_tensor")
            importer.import_graph_def(input_graph_def,
                                      name='',
                                      input_map={dump_input_tensors: image})
        else:
            importer.import_graph_def(input_graph_def, name='')

        # Get fetches
        w_fetch_tensors = []
        w_fetch_names = []
        a_fetch_tensors = []
        a_fetch_names = []
        for op in graph.get_operations():
            if dump_float:
                try:
                    a_fetch_tensors.append(op.outputs[0])
                    a_fetch_names.append(op.name)
                except KeyError:
                    continue
            elif op.type == "FixNeuron":
                if op.name.endswith("wquant"):
                    w_fetch_tensors.append(op.outputs[0])
                    w_fetch_names.append(op.name)
                else:
                    a_fetch_tensors.append(op.outputs[0])
                    a_fetch_names.append(op.name)

        # Dump weights/biases
        print("INFO: Start Dumping for {} batches".format(max_dump_batches))
        with Session(config=s_config) as sess:
            dump_folder = os.path.join(output_dir, "dump_results_weights")
            if not os.path.exists(dump_folder):
                os.makedirs(dump_folder)

            print("INFO: Dumping weights/biases...")
            w_fetch_results = sess.run(w_fetch_tensors)

            index = 0
            for name, res in zip(w_fetch_names, w_fetch_results):
                index = index + 1
                filename = os.path.join(dump_folder, name.replace("/", "_"))
                print("INFO: Dumping ({}/{}): {}".format(
                    index, len(w_fetch_names), name))
                res = res.flatten()

                if name in w_q_map:
                    res = res * 2**w_q_map[name]
                    res.astype(np.int8).tofile(filename + ".bin")
                    np.savetxt(filename + ".txt",
                               res.astype(np.int8),
                               fmt="%s",
                               delimiter=",")

        # Build feed_dict
        input_tensors = [
            op.outputs[0] for op in graph.get_operations()
            if op.type == 'Placeholder'
        ]

        # Run inference and dump activations
        print("INFO: Start Dumping for {} batches".format(max_dump_batches))
        with Session(config=s_config) as sess:
            for i in range(max_dump_batches):
                dump_folder = os.path.join(output_dir,
                                           "dump_results_" + str(i))
                if not os.path.exists(dump_folder):
                    os.makedirs(dump_folder)

                print("INFO: Dumping for batch: {}/{} ...".format(
                    i + 1, max_dump_batches))
                inputs = input_fn(iter=i)
                feed_dict = gen_feed_dict(input_tensors, inputs)
                a_fetch_results = sess.run(a_fetch_tensors, feed_dict)

                index = 0
                for name, res in zip(a_fetch_names, a_fetch_results):
                    index = index + 1
                    filename = os.path.join(dump_folder,
                                            name.replace("/", "_"))
                    print("INFO: Dumping ({}/{}): {}".format(
                        index, len(a_fetch_names), name))
                    res = res.flatten()

                    if dump_float:
                        res.tofile(filename + "_float.bin")
                        np.savetxt(filename + "_float.txt",
                                   res,
                                   fmt="%s",
                                   delimiter=",")

                    if name in a_q_map:
                        res = res * 2**a_q_map[name]
                        res.astype(np.int8).tofile(filename + ".bin")
                        np.savetxt(filename + ".txt",
                                   res.astype(np.int8),
                                   fmt="%s",
                                   delimiter=",")
    print("INFO: Dump results are saved in {}.".format(output_dir))
    return