Ejemplo n.º 1
0
    def test_new_graph_in_same_process(self):
        def build_and_run_model():
            def my_net(x):
                return x * x

            v = array_ops.placeholder(dtype=np.float32, shape=(2, ))
            with ipu.scopes.ipu_scope("/device:IPU:0"):
                [result] = ipu.ipu_compiler.compile(my_net, inputs=[v])

            with session.Session() as sess:
                report = ReportJSON(
                    self, sess, set_opts_fn=_use_offline_compilation_if_needed)
                try:
                    res = sess.run(result, {v: [1.0, 2.0]})
                except errors.InvalidArgumentError as e:
                    if offline_compilation_needed and "compilation only" in e.message:
                        res = []
                    else:
                        raise
                events = report.get_event_trace(sess)
                return res, events

        with _temporary_executable_cache():
            # Since each Graph will have its own XLA compilation cache,
            # the cache we test is the last-level Poplar executable cache.

            with ops.Graph().as_default():
                result0, events0 = build_and_run_model()

            with ops.Graph().as_default():
                result1, events1 = build_and_run_model()

            self.assertAllEqual(result0, result1)
            self.assertEqual(1, _count_ipu_compilations(events0))
            self.assertEqual(0, _count_ipu_compilations(events1))
def get_graph_def_from_file(graph_filepath):
  tf.compat.v1.reset_default_graph()
  with ops.Graph().as_default():
    with tf.compat.v1.gfile.GFile(graph_filepath, 'rb') as f:
      graph_def = tf.compat.v1.GraphDef()
      graph_def.ParseFromString(f.read())
      return graph_def
Ejemplo n.º 3
0
def test_serialization(tmpdir):
    g1 = graph_item.GraphItem(graph=ops.Graph())
    with g1.as_default():
        model = model_keras_dense_and_sparse()
        trainable_variables = model.trainable_variables
        optimizer = adagrad.Adagrad()
        grads = optimizer.get_gradients(model.outputs[0], trainable_variables)
        optimizer.apply_gradients(zip(grads, trainable_variables))
        g1.prepare()
    path = tmpdir.join('tmp_item.pb')
    g1.serialize(path)
    g2 = graph_item.GraphItem.deserialize(path)

    def compare(g1, g2):
        gd1 = g1.graph.as_graph_def()
        gd2 = g2.graph.as_graph_def()
        assert gd1 is not gd2
        assert gd1 == gd2
        d1 = {n.name: n for n in gd1.node}
        d2 = {n.name: n for n in gd2.node}
        assert d1 == d2
        assert g1._grad_target_pairs == g2._grad_target_pairs
        assert g1.info == g2.info

    compare(g1, g2)
Ejemplo n.º 4
0
 def assertAllRanksEqual(self, local_value, name=None):
     """Assert that the current rank has the same value as the root rank."""
     with ops.Graph().as_default(), session.Session():
         local_tensor = constant_op.constant(local_value)
         root_tensor = hvd.broadcast(local_tensor, root_rank=0)
         root_value = root_tensor.eval()
         np.testing.assert_equal(local_value, root_value, name)
Ejemplo n.º 5
0
def get_graph_def_from_file(graph_filepath):
    from tensorflow.python import ops

    with ops.Graph().as_default():
        with tf.gfile.GFile(graph_filepath, "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

            return graph_def
Ejemplo n.º 6
0
def test_graph_item_context_scope():
    g1 = ops.Graph()
    i1 = graph_item.GraphItem(graph=g1)
    assert graph_item._default_graph_item is None
    with i1.as_default() as item:
        assert graph_item._default_graph_item == i1
        assert item._graph == g1
        assert ops.get_default_graph() == g1
        setattr(item, 'new_attr', 'new_value')
    assert graph_item._default_graph_item is None
    assert getattr(i1, 'new_attr') == 'new_value'
Ejemplo n.º 7
0
def test_update_ops_for_optimizers(optimizer_class, kwargs):
    item = graph_item.GraphItem(graph=ops.Graph())
    with item.as_default():
        model = model_keras_dense_and_sparse()
        trainable_variables = model.trainable_variables
        kwargs = kwargs or {}
        optimizer = optimizer_class(**kwargs)
        print(optimizer)
        grads = optimizer.get_gradients(model.outputs[0], trainable_variables)
        optimizer.apply_gradients(zip(grads, trainable_variables))
        assert len(item.var_op_name_to_grad_info) == len(trainable_variables)
Ejemplo n.º 8
0
def test_parse_name_scope():
    with ops.Graph().as_default():
        name_scope = 'name_scope/child_name_scope'
        a = constant_op.constant(5)
        new_name = ops.prepend_name_scope(a.name, name_scope)
        assert new_name == 'name_scope/child_name_scope/Const:0'
        assert name_scope == utils.parse_name_scope(new_name)
        assert '' == utils.parse_name_scope(a.name)

        with ops.control_dependencies([no_op(name='my_op')]):
            b = constant_op.constant(6)
        name_scope = 'name_scope'
        new_name = ops.prepend_name_scope(b.op.node_def.input[0], name_scope)
        assert new_name == '^name_scope/my_op'
        assert name_scope == utils.parse_name_scope(new_name)
Ejemplo n.º 9
0
    def get_graph_def_from_file(self, graph_filepath):
        """Read frozen TF GraphDef file

			Args
			----
				graph_filepath(string)

			Returns
			-------
				tf.GraphDef object
		"""
        with ops.Graph().as_default():
            with tf.gfile.GFile(graph_filepath, "rb") as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())
                return graph_def
Ejemplo n.º 10
0
def convert_graph_def_to_saved_model(export_dir, graph_filepath):
    graph_def = get_graph_def_from_file(graph_filepath)
    sess = tf.Session(graph=ops.Graph())
    with sess as session:
        tf.import_graph_def(graph_def, name='')
        tf.saved_model.simple_save(
            session,
            export_dir,
            inputs={
                'input_image':
                session.graph.get_tensor_by_name('{}:0'.format(node.name))
                for node in graph_def.node if node.op == 'Placeholder'
            },
            outputs={t: session.graph.get_tensor_by_name(t)
                     for t in outputs})
        print('Optimized graph converted to SavedModel!')
Ejemplo n.º 11
0
def model_simple():
    with ops.Graph().as_default() as g:
        _TRUE_W = 3.0
        _TRUE_b = 2.0
        _NUM_EXAMPLES = 1000
        inputs = np.random.randn(_NUM_EXAMPLES)
        noises = np.random.randn(_NUM_EXAMPLES)
        desired_y = inputs * _TRUE_W + _TRUE_b + noises

        W = tf.Variable(5.0, name='W', dtype=tf.float64)
        b = tf.Variable(0.0, name='b', dtype=tf.float64)
        variables = [W, b]
        with tf.GradientTape() as tape:
            predicted_y = W * inputs + b
            loss = tf.reduce_mean(tf.square(predicted_y - desired_y))
            gradients = tape.gradient(loss, variables)
    return g, gradients, variables
Ejemplo n.º 12
0
import tensorflow.compat.v1 as tf
from tensorflow.python import ops
import shutil

graph_filepath = 'dbface.pb'
tf.reset_default_graph()
with ops.Graph().as_default():
    with tf.gfile.GFile(graph_filepath, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

graph = tf.get_default_graph()

with tf.Session(graph=graph) as session:

    # Examine the OP in NCHW format
    tf.import_graph_def(graph_def, name='')
    target_ops = []
    for op in graph.get_operations():
        if 'data_format' in op.node_def.attr and op.node_def.attr['data_format'].s == b'NCHW':
            target_ops.append(op)

    # for target_op in target_ops:
    #     print(f'op: {target_op.name} ({target_op.type})')

    # ---- NCHW to NHWC Convert
    # 1. Conv2D
    # 2. FusedBatchNormV3
    # 3. DepthwiseConv2dNative
    # 4. BiasAdd
    nodes = []
Ejemplo n.º 13
0
    def replicate(self, graph_item):
        """
        Replicate the entire graph as many times as num_replica.

        Args:
            graph_item: the original graph item

        Returns: The new graph item
        """
        item = GraphItem(graph=ops.Graph())
        fwd_ctx, bwd_ctx = self._collect_while_context(graph_item.graph)
        with item.graph.as_default():
            gdef = graph_item.graph.as_graph_def()
            for i in range(self._num_local_replicas):
                # Replicate ops
                with ops.device(self._replica_device_placer(replica_id=i)):
                    import_graph_def(gdef, name=replica_prefix(i))

                # Replicate while_loop context (control_flow) if needed.
                # The order matters -- We must replicate bwd context first, then forward context.
                # TODO(Zeya): To handle cases when there are nested while loops, in which we must replicate
                #  parent context first and then child context. See:
                #  https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/control_flow_ops.py#L938
                if bwd_ctx:
                    for ctx in bwd_ctx:
                        _ = WhileContext(context_def=ctx.to_proto(), grad_state=ctx._grad_state,
                                         import_scope=replica_prefix(i))
                if fwd_ctx:
                    for ctx in fwd_ctx:
                        _ = WhileContext(context_def=ctx.to_proto(), grad_state=ctx._grad_state,
                                         import_scope=replica_prefix(i))

            # update saver
            master_replica = 0
            if graph_item.info.savers:
                item.info.update_savers(
                    [Saver.from_proto(proto, import_scope=replica_prefix(master_replica)).to_proto()
                        for proto in graph_item.info.savers],
                    replace=False
                )

            # update gradient info
            for i in range(self._num_local_replicas):
                for g_name, t_name in graph_item.grad_target_name_pairs.items():
                    if isinstance(g_name, tuple):
                        new_g_name = (
                            ops.prepend_name_scope(g_name[0], replica_prefix(i)),
                            ops.prepend_name_scope(g_name[1], replica_prefix(i)),
                            ops.prepend_name_scope(g_name[2], replica_prefix(i)))
                    else:
                        new_g_name = ops.prepend_name_scope(g_name, replica_prefix(i))
                    new_t_name = ops.prepend_name_scope(t_name, replica_prefix(i))
                    item.extend_gradient_info_by_names(
                        grads=[new_g_name],
                        targets=[new_t_name]
                    )
                item.info.update_variables(
                    [_from_proto_fn(proto, import_scope=replica_prefix(i)).to_proto()
                        for proto in graph_item.info.variables],
                    replace=False
                )
                item.info.update_table_initializers(
                    [ops.prepend_name_scope(tb_init, replica_prefix(i))
                        for tb_init in graph_item.info.table_initializers],
                    replace=False
                )
        return item