def testMergeShapes(self):
    # All inputs unknown.
    p1 = tf.placeholder(tf.float32)
    p2 = tf.placeholder(tf.float32)
    p3 = tf.placeholder(tf.float32)
    m, index = control_flow_ops.merge([p1, p2, p3])
    self.assertIs(None, m.get_shape().ndims)
    self.assertEqual([], index.get_shape())

    # All inputs known but different.
    p1 = tf.placeholder(tf.float32, shape=[1, 2])
    p2 = tf.placeholder(tf.float32, shape=[2, 1])
    m, index = control_flow_ops.merge([p1, p2])
    self.assertIs(None, m.get_shape().ndims)
    self.assertEqual([], index.get_shape())

    # All inputs known but same.
    p1 = tf.placeholder(tf.float32, shape=[1, 2])
    p2 = tf.placeholder(tf.float32, shape=[1, 2])
    m, index = control_flow_ops.merge([p1, p2])
    self.assertEqual([1, 2], m.get_shape())
    self.assertEqual([], index.get_shape())

    # Possibly the same but not guaranteed.
    p1 = tf.placeholder(tf.float32, shape=[1, 2])
    p2 = tf.placeholder(tf.float32)
    p2.set_shape([None, 2])
    m, index = control_flow_ops.merge([p1, p2])
    self.assertIs(None, m.get_shape().ndims)
    self.assertEqual([], index.get_shape())
    def testLoop_1(self):
        with self.test_session():
            zero = tf.convert_to_tensor(0)
            one = tf.convert_to_tensor(1)
            n = tf.constant(10)

            enter_zero = control_flow_ops.enter(zero, "foo_1", False)
            enter_one = control_flow_ops.enter(one, "foo_1", False)
            enter_n = control_flow_ops.enter(n, "foo_1", False)
            merge_zero = control_flow_ops.merge([enter_zero, enter_zero], name="merge_zero")[0]
            merge_one = control_flow_ops.merge([enter_one, enter_one], name="merge_one")[0]
            merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
            less_op = tf.less(merge_n, merge_n)
            cond_op = control_flow_ops.loop_cond(less_op)
            switch_zero = control_flow_ops.switch(merge_zero, cond_op)
            switch_one = control_flow_ops.switch(merge_one, cond_op)
            switch_n = control_flow_ops.switch(merge_n, cond_op)
            next_zero = control_flow_ops.next_iteration(switch_zero[1])
            next_one = control_flow_ops.next_iteration(switch_one[1])
            next_n = control_flow_ops.next_iteration(switch_n[1])
            merge_zero.op._update_input(1, next_zero)
            merge_one.op._update_input(1, next_one)
            merge_n.op._update_input(1, next_n)
            exit_n = control_flow_ops.exit(switch_n[0])

            result = exit_n.eval()
        self.assertAllEqual(10, result)
  def testMergeShapes(self):
    # All inputs unknown.
    p1 = tf.placeholder(tf.float32)
    p2 = tf.placeholder(tf.float32)
    p3 = tf.placeholder(tf.float32)
    m, index = control_flow_ops.merge([p1, p2, p3])
    self.assertIs(None, m.get_shape().ndims)
    self.assertEqual([], index.get_shape())

    # All inputs known but different.
    p1 = tf.placeholder(tf.float32, shape=[1, 2])
    p2 = tf.placeholder(tf.float32, shape=[2, 1])
    m, index = control_flow_ops.merge([p1, p2])
    self.assertIs(None, m.get_shape().ndims)
    self.assertEqual([], index.get_shape())

    # All inputs known but same.
    p1 = tf.placeholder(tf.float32, shape=[1, 2])
    p2 = tf.placeholder(tf.float32, shape=[1, 2])
    m, index = control_flow_ops.merge([p1, p2])
    self.assertEqual([1, 2], m.get_shape())
    self.assertEqual([], index.get_shape())

    # Possibly the same but not guaranteed.
    p1 = tf.placeholder(tf.float32, shape=[1, 2])
    p2 = tf.placeholder(tf.float32)
    p2.set_shape([None, 2])
    m, index = control_flow_ops.merge([p1, p2])
    self.assertIs(None, m.get_shape().ndims)
    self.assertEqual([], index.get_shape())
    def testLoop_1(self):
        with self.test_session():
            zero = tf.convert_to_tensor(0)
            one = tf.convert_to_tensor(1)
            n = tf.constant(10)

            enter_zero = control_flow_ops.enter(zero, "foo_1", False)
            enter_one = control_flow_ops.enter(one, "foo_1", False)
            enter_n = control_flow_ops.enter(n, "foo_1", False)
            merge_zero = control_flow_ops.merge([enter_zero, enter_zero],
                                                name="merge_zero")[0]
            merge_one = control_flow_ops.merge([enter_one, enter_one],
                                               name="merge_one")[0]
            merge_n = control_flow_ops.merge([enter_n, enter_n],
                                             name="merge_n")[0]
            less_op = tf.less(merge_n, merge_n)
            cond_op = control_flow_ops.loop_cond(less_op)
            switch_zero = control_flow_ops.switch(merge_zero, cond_op)
            switch_one = control_flow_ops.switch(merge_one, cond_op)
            switch_n = control_flow_ops.switch(merge_n, cond_op)
            next_zero = control_flow_ops.next_iteration(switch_zero[1])
            next_one = control_flow_ops.next_iteration(switch_one[1])
            next_n = control_flow_ops.next_iteration(switch_n[1])
            merge_zero.op._update_input(1, next_zero)
            merge_one.op._update_input(1, next_one)
            merge_n.op._update_input(1, next_n)
            exit_n = control_flow_ops.exit(switch_n[0])

            result = exit_n.eval()
        self.assertAllEqual(10, result)
Exemple #5
0
def apply_with_random_selector(img_1, img_2, func, num_cases):
    sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
    # Pass the real x only to one of the func calls.
    img_1_results = []
    img_2_results = []
    for case in range(num_cases):
        img_1_results.append(
            func(control_flow_ops.switch(img_1, tf.equal(sel, case))[1], case))
        img_2_results.append(
            func(control_flow_ops.switch(img_2, tf.equal(sel, case))[1], case))

    img_1 = control_flow_ops.merge(img_1_results)[0]
    img_2 = control_flow_ops.merge(img_2_results)[0]

    return img_1, img_2
def apply_with_random_selector(image, func, num_cases):
    """random select a mode case to func(image, case)"""
    # random select a mode
    sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
    return control_flow_ops.merge([
        func(control_flow_ops.switch(image, tf.equal(case, sel))[1], case)
         for case in range(num_cases)])[0]
  def testLoop_2(self):
    with self.test_session():
      zero = tf.constant(0)
      one = tf.constant(1)
      n = tf.constant(10)

      enter_i = control_flow_ops.enter(zero, "foo", False)
      enter_one = control_flow_ops.enter(one, "foo", True)
      enter_n = control_flow_ops.enter(n, "foo", True)

      merge_i = control_flow_ops.merge([enter_i, enter_i])[0]

      less_op = tf.less(merge_i, enter_n)
      cond_op = control_flow_ops.loop_cond(less_op)
      switch_i = control_flow_ops.switch(merge_i, cond_op)

      add_i = tf.add(switch_i[1], enter_one)

      with tf.device("/gpu:0"):
        next_i = control_flow_ops.next_iteration(add_i)
      merge_i.op._update_input(1, next_i)

      exit_i = control_flow_ops.exit(switch_i[0])
      result = exit_i.eval()
    self.assertAllEqual(10, result)
  def testLoop_2(self):
    with self.test_session():
      zero = tf.constant(0)
      one = tf.constant(1)
      n = tf.constant(10)

      enter_i = control_flow_ops.enter(zero, "foo", False)
      enter_one = control_flow_ops.enter(one, "foo", True)
      enter_n = control_flow_ops.enter(n, "foo", True)

      merge_i = control_flow_ops.merge([enter_i, enter_i])[0]

      less_op = tf.less(merge_i, enter_n)
      cond_op = control_flow_ops.loop_cond(less_op)
      switch_i = control_flow_ops.switch(merge_i, cond_op)

      add_i = tf.add(switch_i[1], enter_one)

      with tf.device("/gpu:0"):
        next_i = control_flow_ops.next_iteration(add_i)
      merge_i.op._update_input(1, next_i)

      exit_i = control_flow_ops.exit(switch_i[0])
      result = exit_i.eval()
    self.assertAllEqual(10, result)
Exemple #9
0
    def _process_switch(self, switch_op, ops_which_must_run,
                        last_op_using_resource_tensor, merge_for_resource):
        """Processes a switch node for a resource input.

    When tensorflow creates a cond, it creates a control flow context for each
    branch of the cond. Each external tensor accessed by that branch is routed
    through a switch op, which gets created in the graph _after_ the op which
    uses that tensor get created.

    If the resource comes from another switch op we process that one first.

    _process_switch creates a corresponding merge node for the switch node. This
    merge node is added to the outer control flow context of the switch
    node. We also ensure that:

      1. The switch node executes after the previous op which used the resource
         tensor

      2. Any op which uses a resource output of the switch node executes before
         the merge for the switch node.

      3. The next op which uses the input resource to the switch node (which
         might be another switch node for the other branch of the conditional)
         will execute after the merge node is done.

      4. The merge node is marked as must_run so it will run even if no
         subsequent operation uses the resource.

    Args:
      switch_op: the switch op to be processed
      ops_which_must_run: the set of ops which must run
      last_op_using_resource_tensor: map from resource tensor to last op using
        it
      merge_for_resource: map from resource tensor to merge which must follow
        all usages of it.
    """
        inp = switch_op.inputs[0]
        if inp.dtype == dtypes_module.resource and inp.op.type == "Switch":
            self._process_switch(inp.op, ops_which_must_run,
                                 last_op_using_resource_tensor,
                                 merge_for_resource)
        if switch_op.outputs[0] in merge_for_resource:
            return
        new_merge = control_flow_ops.merge(switch_op.outputs,
                                           name="artificial_merge")
        new_merge[0].op._control_flow_context = (  # pylint: disable=protected-access
            switch_op._control_flow_context.outer_context)  # pylint: disable=protected-access
        # Ensures the merge always runs
        ops_which_must_run.add(new_merge[0].op)
        if inp in last_op_using_resource_tensor:
            # Ensures the switch exectutes after the previous op using the resource.
            switch_op._add_control_input(last_op_using_resource_tensor[inp])  # pylint: disable=protected-access
        # Ensure the next op outside the cond happens after the merge.
        last_op_using_resource_tensor[inp] = new_merge[0].op
        if inp in merge_for_resource:
            merge_for_resource[inp]._add_control_input(new_merge[0].op)  # pylint: disable=protected-access
        for o in switch_op.outputs:
            # Ensures the merge will execute after all ops inside the cond
            merge_for_resource[o] = new_merge[0].op
Exemple #10
0
def random_resize(im, size):
    choice = tf.random_uniform([], maxval=4, dtype=tf.int32)
    im = control_flow_ops.merge([
        tf.image.resize_images(
            control_flow_ops.switch(im, tf.equal(choice, method))[1], size,
            method) for method in range(4)
    ])[0]
    return im
Exemple #11
0
  def _process_switch(self, switch_op, ops_which_must_run,
                      last_op_using_resource_tensor, merge_for_resource):
    """Processes a switch node for a resource input.

    When tensorflow creates a cond, it creates a control flow context for each
    branch of the cond. Each external tensor accessed by that branch is routed
    through a switch op, which gets created in the graph _after_ the op which
    uses that tensor get created.

    If the resource comes from another switch op we process that one first.

    _process_switch creates a corresponding merge node for the switch node. This
    merge node is added to the outer control flow context of the switch
    node. We also ensure that:

      1. The switch node executes after the previous op which used the resource
         tensor

      2. Any op which uses a resource output of the switch node executes before
         the merge for the switch node.

      3. The next op which uses the input resource to the switch node (which
         might be another switch node for the other branch of the conditional)
         will execute after the merge node is done.

      4. The merge node is marked as must_run so it will run even if no
         subsequent operation uses the resource.

    Args:
      switch_op: the switch op to be processed
      ops_which_must_run: the set of ops which must run
      last_op_using_resource_tensor: map from resource tensor to last op using
        it
      merge_for_resource: map from resource tensor to merge which must follow
        all usages of it.
    """
    inp = switch_op.inputs[0]
    if inp.dtype == dtypes_module.resource and inp.op.type == "Switch":
      self._process_switch(inp.op, ops_which_must_run,
                           last_op_using_resource_tensor, merge_for_resource)
    if switch_op.outputs[0] in merge_for_resource:
      return
    new_merge = control_flow_ops.merge(switch_op.outputs,
                                       name="artificial_merge")
    new_merge[0].op._control_flow_context = (  # pylint: disable=protected-access
        switch_op._control_flow_context.outer_context)  # pylint: disable=protected-access
    # Ensures the merge always runs
    ops_which_must_run.add(new_merge[0].op)
    if inp in last_op_using_resource_tensor:
      # Ensures the switch exectutes after the previous op using the resource.
      switch_op._add_control_input(last_op_using_resource_tensor[inp])  # pylint: disable=protected-access
    # Ensure the next op outside the cond happens after the merge.
    last_op_using_resource_tensor[inp] = new_merge[0].op
    if inp in merge_for_resource:
      merge_for_resource[inp]._add_control_input(new_merge[0].op)  # pylint: disable=protected-access
    for o in switch_op.outputs:
      # Ensures the merge will execute after all ops inside the cond
      merge_for_resource[o] = new_merge[0].op
  def _testSwitchMerge_1(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      ports = tf.convert_to_tensor(True, name="ports")
      switch_op = control_flow_ops.switch(data, ports)
      merge_op = control_flow_ops.merge(switch_op)[0]

      result = merge_op.eval()
    self.assertAllEqual(np.arange(1, 7), result)
  def _testSwitchMerge_1(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      ports = tf.convert_to_tensor(True, name="ports")
      switch_op = control_flow_ops.switch(data, ports)
      merge_op = control_flow_ops.merge(switch_op)[0]

      result = merge_op.eval()
    self.assertAllEqual(np.arange(1, 7), result)
  def testSwitchMergeIdentity_1(self):
    with self.test_session():
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      ports = tf.convert_to_tensor(True, name="ports")
      switch_op = control_flow_ops.switch(data, ports)
      merge_op = control_flow_ops.merge(switch_op)[0]
      id_op = tf.identity(merge_op)

      result = id_op.eval()
    self.assertAllEqual(np.arange(1, 7), result)
  def testSwitchMergeIdentity_1(self):
    with self.test_session():
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      ports = tf.convert_to_tensor(True, name="ports")
      switch_op = control_flow_ops.switch(data, ports)
      merge_op = control_flow_ops.merge(switch_op)[0]
      id_op = tf.identity(merge_op)

      result = id_op.eval()
    self.assertAllEqual(np.arange(1, 7), result)
Exemple #16
0
def injectFaultMerge(inputs, b):
    "Function to call injectFault on Merge"
    # FIXME: Implement this functionality
    logging.debug("Calling Operator Merge")

    op = control_flow_ops.merge(inputs=[inputs, b])
    sess = tf.Session()
    res = sess.run(op)

    return res
  def testSwitchMergeLess_1(self):
    with self.test_session():
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      zero = tf.convert_to_tensor(0)
      one = tf.convert_to_tensor(1)
      less_op = tf.less(zero, one)
      switch_op = control_flow_ops.switch(data, less_op)
      merge_op = control_flow_ops.merge(switch_op)[0]

      result = merge_op.eval()
    self.assertAllEqual(np.arange(1, 7), result)
  def testSwitchMergeLess_1(self):
    with self.test_session():
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      zero = tf.convert_to_tensor(0)
      one = tf.convert_to_tensor(1)
      less_op = tf.less(zero, one)
      switch_op = control_flow_ops.switch(data, less_op)
      merge_op = control_flow_ops.merge(switch_op)[0]

      result = merge_op.eval()
    self.assertAllEqual(np.arange(1, 7), result)
  def testSwitchMergeAddIdentity_1(self):
    with self.test_session():
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      ports = tf.convert_to_tensor(True, name="ports")
      switch_op = control_flow_ops.switch(data, ports)
      one = tf.constant(1)
      add_op = tf.add(switch_op[0], one)
      id_op = tf.identity(switch_op[1])
      merge_op = control_flow_ops.merge([add_op, id_op])[0]

      result = merge_op.eval()
    self.assertAllEqual(np.arange(1, 7), result)
  def testSwitchMergeAddIdentity_1(self):
    with self.test_session():
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      ports = tf.convert_to_tensor(True, name="ports")
      switch_op = control_flow_ops.switch(data, ports)
      one = tf.constant(1)
      add_op = tf.add(switch_op[0], one)
      id_op = tf.identity(switch_op[1])
      merge_op = control_flow_ops.merge([add_op, id_op])[0]

      result = merge_op.eval()
    self.assertAllEqual(np.arange(1, 7), result)
  def testSwitchMergeAddMul_1(self):
    with self.test_session():
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      ports = tf.convert_to_tensor(True, name="ports")
      switch_op = control_flow_ops.switch(data, ports)
      one = tf.constant(1)
      add_op = tf.add(switch_op[0], one)
      five = tf.constant(5)
      mul_op = tf.mul(switch_op[1], five)
      merge_op = control_flow_ops.merge([add_op, mul_op])[0]

      result = merge_op.eval()
    self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
Exemple #22
0
def random_grayscale(im):
    def convert(im, flag):
        if flag == 0:
            im = tf.image.rgb_to_grayscale(im)
            im = tf.stack([im, im, im], axis=-1)
        return im

    choice = tf.random_uniform([], maxval=4, dtype=tf.int32)
    im = control_flow_ops.merge([
        convert(control_flow_ops.switch(im, tf.equal(choice, flag))[1], flag)
        for flag in range(2)
    ])[0]
    return im
  def testSwitchMergeIndexedSlices(self):
    with self.test_session():
      values = tf.constant([1, 2, 3, 4, 5, 6])
      indices = tf.constant([0, 2, 4, 6, 8, 10])
      data = tf.IndexedSlices(values, indices)
      pred = tf.convert_to_tensor(True)
      switch_op = control_flow_ops.switch(data, pred)
      merge_op = control_flow_ops.merge(switch_op)[0]

      val = merge_op.values.eval()
      ind = merge_op.indices.eval()
    self.assertAllEqual(np.arange(1, 7), val)
    self.assertAllEqual(np.arange(0, 12, 2), ind)
  def testSwitchMergeAddMul_1(self):
    with self.test_session():
      data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
      ports = tf.convert_to_tensor(True, name="ports")
      switch_op = control_flow_ops.switch(data, ports)
      one = tf.constant(1)
      add_op = tf.add(switch_op[0], one)
      five = tf.constant(5)
      mul_op = tf.mul(switch_op[1], five)
      merge_op = control_flow_ops.merge([add_op, mul_op])[0]

      result = merge_op.eval()
    self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
  def testSwitchMergeIndexedSlices(self):
    with self.test_session():
      values = tf.constant([1, 2, 3, 4, 5, 6])
      indices = tf.constant([0, 2, 4, 6, 8, 10])
      data = tf.IndexedSlices(values, indices)
      pred = tf.convert_to_tensor(True)
      switch_op = control_flow_ops.switch(data, pred)
      merge_op = control_flow_ops.merge(switch_op)[0]

      val = merge_op.values.eval()
      ind = merge_op.indices.eval()
    self.assertAllEqual(np.arange(1, 7), val)
    self.assertAllEqual(np.arange(0, 12, 2), ind)
    def testLoop_false(self):
        with self.test_session():
            false = tf.convert_to_tensor(False)
            n = tf.constant(10)

            enter_false = control_flow_ops.enter(false, "foo_1", False)
            enter_n = control_flow_ops.enter(n, "foo_1", False)

            merge_n = control_flow_ops.merge([enter_n], name="merge_n")[0]
            switch_n = control_flow_ops.switch(merge_n, enter_false)
            exit_n = control_flow_ops.exit(switch_n[0])

            result = exit_n.eval()
        self.assertAllEqual(10, result)
    def testLoop_false(self):
        with self.test_session():
            false = tf.convert_to_tensor(False)
            n = tf.constant(10)

            enter_false = control_flow_ops.enter(false, "foo_1", False)
            enter_n = control_flow_ops.enter(n, "foo_1", False)

            merge_n = control_flow_ops.merge([enter_n], name="merge_n")[0]
            switch_n = control_flow_ops.switch(merge_n, enter_false)
            exit_n = control_flow_ops.exit(switch_n[0])

            result = exit_n.eval()
        self.assertAllEqual(10, result)
def apply_with_random_selector(x, func, num_cases):
  """Computes func(x, sel), with sel sampled from [0...num_cases-test].
      Args:
        x: input Tensor.
        func: Python function to apply.
        num_cases: Python int32, number of cases to sample sel from.
      Returns:
        The result of func(x, sel), where func receives the value of the
        selector as a python integer, but sel is sampled dynamically.
      """
  sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
  # Pass the real x only to one of the func calls.
  return control_flow_ops.merge([
      func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
      for case in range(num_cases)
  ])[0]
  def create_q_net(self, state_inputs,  # NHWC format.
                   action_inputs_training_q,
                   scope, trainable,
                   action_inputs_training_policy=None,  # None for target net.
                   cond_training_q=None  # bool to control switch. can be None for target net.
                   ):
    with tf.variable_scope(scope):
      # 输入归一化层
      prev_layer=self.input_normalizer(state_inputs,**self.input_norm_params)

      ##fc layers
      l = 1  # start from fc-1 as 1
      for n_unit, activation, initializer, normalizer, norm_param, regularizer in zip(
              self.n_fc_units, self.fc_activations, self.fc_initializers,
              self.fc_normalizers, self.fc_norm_params, self.fc_regularizers):
        # 传入动作数据
        if l == DDPG_CFG.include_action_fc_layer:
          if action_inputs_training_policy is None:  # target net
            actions = action_inputs_training_q
          else:  # add logic for selecting online net action inputs
            # switch return :(output_false, output_true)
            (_, sw_action_training_q) = switch(data=action_inputs_training_q,
                                                                   pred=cond_training_q,
                                                                   name='switch_actions_training_q')
            (sw_action_training_policy, _) = switch(data=action_inputs_training_policy,
                                                                        pred=cond_training_q,
                                                                        name='switch_actions_training_policy')
            (actions, _) = merge([sw_action_training_q, sw_action_training_policy])

          prev_layer = tf.concat([prev_layer, actions], axis=1)
        l += 1
        prev_layer = fully_connected(prev_layer, num_outputs=n_unit, activation_fn=activation,
                                     weights_initializer=initializer,
                                     weights_regularizer=regularizer,
                                     normalizer_fn=normalizer, #when specify norm , bias will be ignored.
                                     normalizer_params=norm_param,
                                     trainable=trainable)

      # output layer. fully_connected will create bias which is not wanted in output layer.
      output_layer = fully_connected(inputs=prev_layer,num_outputs=1,
                                     activation_fn=None,
                                     weights_initializer=self.output_layer_initializer,
                                     weights_regularizer=self.output_layer_regularizer,
                                     biases_initializer=None, # to skip bias in output layer
                                    trainable=trainable)

    return output_layer
Exemple #30
0
def Test():
    data = tf.constant([1, 2, 3, 4, 5, 6])
    zero = tf.convert_to_tensor(0)
    one = tf.convert_to_tensor(1)
    less_op = tf.less(zero, one)
    switch_op = control_flow_ops.switch(data, less_op)
    merge_op = control_flow_ops.merge(switch_op)[0]
    result = tf.transpose(merge_op)

    tensor_info_result = tf.compat.v1.saved_model.utils.build_tensor_info(
        result)

    signature_def = tf.saved_model.signature_def_utils.build_signature_def(
        inputs=None,
        outputs={'result': tensor_info_result},
        method_name='some_function')

    return {'key': signature_def}
Exemple #31
0
def Test():
    data = tf.constant([1, 2, 3, 4, 5, 6])
    # Create placeholders to prevent constant folding.
    x_op = tf.placeholder(dtype=tf.int32)
    y_op = tf.placeholder(dtype=tf.int32)
    less_op = tf.less(x_op, y_op)
    switch_op = control_flow_ops.switch(data, less_op)
    merge_op = control_flow_ops.merge(switch_op)[0]
    result = tf.transpose(merge_op)

    tensor_info_result = tf.compat.v1.saved_model.utils.build_tensor_info(
        result)

    signature_def = tf.saved_model.signature_def_utils.build_signature_def(
        inputs=None,
        outputs={'result': tensor_info_result},
        method_name='some_function')

    return {'key': signature_def}, None, None
    def testSwitchMerge(self):
        with self.cached_session() as sess:
            predicate = array_ops.placeholder(dtypes.bool)
            with self.test_scope():
                false_output, true_output = control_flow_ops.switch(
                    data=constant_op.constant(42.0), pred=predicate)
                with ops.control_dependencies(
                    [array_ops.identity(false_output)]):
                    five = constant_op.constant(5.0)
                with ops.control_dependencies(
                    [array_ops.identity(true_output)]):
                    ten = constant_op.constant(10.0)
                result = control_flow_ops.merge([five, ten])

        with_true = sess.run(result, {predicate: True})
        self.assertEquals(with_true.output, 10.0)
        self.assertEquals(with_true.value_index, 1)

        with_false = sess.run(result, {predicate: False})
        self.assertEquals(with_false.output, 5.0)
        self.assertEquals(with_false.value_index, 0)
Exemple #33
0
def apply_with_random_selector(x, func, num_cases):
  """Computes func(x, sel), with sel sampled from [0...num_cases-1].

  TODO(coreylynch): add as a dependency, when slim or tensorflow/models are
  pipfied.
  Source:
  https://raw.githubusercontent.com/tensorflow/models/a9d0e6e8923a4/slim/preprocessing/inception_preprocessing.py

  Args:
    x: input Tensor.
    func: Python function to apply.
    num_cases: Python int32, number of cases to sample sel from.
  Returns:
    The result of func(x, sel), where func receives the value of the
    selector as a python integer, but sel is sampled dynamically.
  """
  sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
  # Pass the real x only to one of the func calls.
  return control_flow_ops.merge([
      func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
      for case in range(num_cases)])[0]
Exemple #34
0
def random_distort_color(im):
    def distort(im, order):
        if order == 0:
            im = random_contrast(
                random_hue(random_saturation(random_brightness(im))))
        elif order == 1:
            im = random_hue(
                random_contrast(random_brightness(random_saturation(im))))
        elif order == 2:
            im = random_saturation(
                random_brightness(random_hue(random_contrast(im))))
        else:
            im = random_brightness(
                random_contrast(random_saturation(random_hue(im))))
        return im

    choice = tf.random_uniform([], maxval=4, dtype=tf.int32)
    im = control_flow_ops.merge([
        distort(
            control_flow_ops.switch(im, tf.equal(choice, order))[1], order)
        for order in range(4)
    ])[0]
    return im
  def create_q_net(self, state_inputs,  # NHWC format.
                   action_inputs_training_q,
                   scope, trainable,
                   action_inputs_training_policy=None,  # None for target net.
                   cond_training_q=None  # bool to control switch. can be None for target net.
                   ):
    prev_layer = state_inputs
    conv_layers = []
    fc_layers = []

    with tf.variable_scope(scope):
      ##conv layers
      # TODO add batch_norm to input process.
      for n_maps, kernel_size, stride, padding, activation, initializer, \
          normalizer, norm_param, regularizer in zip(
              self.conv_n_feature_maps, self.conv_kernel_sizes,
              self.conv_strides, self.conv_padding, self.conv_activations, self.conv_initializers,
              self.conv_normalizers, self.conv_norm_params, self.conv_regularizers):
        prev_layer = conv2d(prev_layer, num_outputs=n_maps, kernel_size=kernel_size,
                            stride=stride, padding=padding,
                            activation_fn=activation,
                            data_format='NHWC',
                            normalizer_fn=normalizer,
                            normalizer_params=norm_param,
                            weights_initializer=initializer,
                            weights_regularizer=regularizer,
                            trainable=trainable)
        conv_layers.append(prev_layer)
      # end conv layer

      ##fc layers
      # flat the output of last conv layer to (batch_size, n_fc_in)
      # TODO calc n_fc_in from the prev_layer tensor shape.
      prev_layer = tf.reshape(conv_layers[-1], shape=[-1, self.n_fc_in])
      l = 1  # start from fc-1 as 1
      for n_unit, activation, initializer, normalizer, norm_param,regularizer in zip(
              self.n_fc_units, self.fc_activations, self.fc_initializers,
              self.fc_normalizers, self.fc_norm_params, self.fc_regularizers):
        # include action_inputs
        if l == DDPG_CFG.include_action_fc_layer:
          # prev_layer is fc-1-out,shape (batch_size, n_fc1_units)
          # action_inputs shape ( batch_size, a_dim)
          # NOTE: online q and target q are different now, but
          # different part is without params, so softupdate and copy-init
          # from online to target will still work.
          if action_inputs_training_policy is None:  # target net
            actions = action_inputs_training_q
          else:  # add logic for selecting online net action inputs
            # switch return :(output_false, output_true)
            (_, sw_action_training_q) = switch(data=action_inputs_training_q,
                                                                   pred=cond_training_q,
                                                                   name='switch_actions_training_q')
            (sw_action_training_policy, _) = switch(data=action_inputs_training_policy,
                                                                        pred=cond_training_q,
                                                                        name='switch_actions_training_policy')
            (actions, _) = merge([sw_action_training_q, sw_action_training_policy])

          prev_layer = tf.concat([prev_layer, actions], axis=1)
        l += 1
        prev_layer = fully_connected(prev_layer, num_outputs=n_unit, activation_fn=activation,
                                     weights_initializer=initializer,
                                     normalizer_fn=normalizer,
                                     normalizer_params=norm_param,
                                     weights_regularizer=regularizer,
                                     trainable=trainable)
        fc_layers.append(prev_layer)

      # end fc layers

      ##output layer
      # num_output = 1 , means Q(s,a) is a scalar.
      output_layer = fully_connected(fc_layers[-1], num_outputs=1, activation_fn=None,
                                     weights_initializer=self.output_layer_initializer,
                                     weights_regularizer=self.output_layer_regularizer,
                                     trainable=trainable)
      # # linear layer connected to 1 output representing Q(s,a)
      # # Weights are init to Uniform[-3e-3, 3e-3]
      # w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
      # out = tflearn.fully_connected(net, 1, weights_init=w_init)
      # return inputs, action, out
    # == == end with variable_scope() ==
    return output_layer
Exemple #36
0
    def create_q_net(
        self,
        state_inputs,  # NHWC format.
        action_inputs_training_q,
        scope,
        trainable,
        action_inputs_training_policy=None,  # None for target net.
        cond_training_q=None  # bool to control switch. can be None for target net.
    ):
        with tf.variable_scope(scope):
            #input norm layer
            prev_layer = self.input_normalizer(state_inputs,
                                               **self.input_norm_params)

            ##fc layers
            # flat the output of last conv layer to (batch_size, n_fc_in)
            l = 1  # start from fc-1 as 1
            for n_unit, activation, initializer, normalizer, norm_param, regularizer in zip(
                    self.n_fc_units, self.fc_activations, self.fc_initializers,
                    self.fc_normalizers, self.fc_norm_params,
                    self.fc_regularizers):
                # include action_inputs
                if l == DDPG_CFG.include_action_fc_layer:
                    # prev_layer is fc-1-out,shape (batch_size, n_fc1_units)
                    # action_inputs shape ( batch_size, a_dim)
                    # NOTE: online q and target q are different now, but
                    # different part is without params, so softupdate and copy-init
                    # from online to target will still work.
                    if action_inputs_training_policy is None:  # target net
                        actions = action_inputs_training_q
                    else:  # add logic for selecting online net action inputs
                        # switch return :(output_false, output_true)
                        (_, sw_action_training_q) = switch(
                            data=action_inputs_training_q,
                            pred=cond_training_q,
                            name='switch_actions_training_q')
                        (sw_action_training_policy,
                         _) = switch(data=action_inputs_training_policy,
                                     pred=cond_training_q,
                                     name='switch_actions_training_policy')
                        (actions, _) = merge(
                            [sw_action_training_q, sw_action_training_policy])

                    prev_layer = tf.concat([prev_layer, actions], axis=1)
                l += 1
                prev_layer = fully_connected(
                    prev_layer,
                    num_outputs=n_unit,
                    activation_fn=activation,
                    weights_initializer=initializer,
                    weights_regularizer=regularizer,
                    normalizer_fn=
                    normalizer,  #when specify norm , bias will be ignored.
                    normalizer_params=norm_param,
                    trainable=trainable)

            # end fc layers

            ##output layer. fully_connected will create bias which is not wanted in output layer.
            output_layer = fully_connected(
                inputs=prev_layer,
                num_outputs=1,
                activation_fn=None,
                weights_initializer=self.output_layer_initializer,
                weights_regularizer=self.output_layer_regularizer,
                biases_initializer=None,  # to skip bias in output layer
                trainable=trainable)

            # num_output = 1 , means Q(s,a) is a scalar.
            # output_layer = fully_connected(prev_layer, num_outputs=1, activation_fn=None,
            #                                weights_initializer=self.output_layer_initializer,
            #                                weights_regularizer=self.output_layer_regularizer,
            #                                trainable=trainable)

        # == == end with variable_scope() ==
        return output_layer
Exemple #37
0
    def __network(self,
                  input,
                  netname,
                  online=True,
                  reg=None,
                  bn_training=False):
        collections = [
            tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, netname + '_train_vars'
        ]

        with tf.variable_scope(netname, reuse=tf.AUTO_REUSE):
            policyLayers = [self.__state_dim, 64, 128, 256, 128, 64]
            QLayers = [
                self.__state_dim + self.__action_dim, 64, 128, 256, 128, 64
            ]
            with tf.variable_scope('policy', reuse=tf.AUTO_REUSE):
                i = input
                #i = tf.layers.batch_normalization(input,training=bn_training)
                for index, layer in enumerate(policyLayers):
                    i = self.__dense(i,
                                     layer,
                                     collections=collections,
                                     name='dense_' + str(index),
                                     reg=reg)

                act_x = self.__dense(i,
                                     self.__action_dim,
                                     collections=collections,
                                     active=False,
                                     name='dense_action',
                                     reg=reg)
                with tf.variable_scope('act'):
                    act = tf.tanh(act_x)

            if online:
                (sw_policy, _) = switch(act,
                                        self.con_training_q,
                                        name='sw_policy')
                (_, sw_action) = switch(self.action,
                                        self.con_training_q,
                                        name='sw_action')
                (act, _) = merge([sw_policy, sw_action], name='merge')

            with tf.variable_scope('Q', reuse=tf.AUTO_REUSE):
                i = tf.concat([act, input], 1)
                #i = tf.layers.batch_normalization(i,training=bn_training)
                for index, layer in enumerate(QLayers):
                    i = self.__dense(i,
                                     layer,
                                     collections=collections,
                                     name='dense_' + str(index),
                                     reg=reg)
                i = self.__dense(i,
                                 1,
                                 collections=collections,
                                 active=False,
                                 name='q_value',
                                 reg=reg)

        if online:
            return [act_x, act, i]
        else:
            return i
Exemple #38
0
  """
    num_inputs = len(x)
    rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
    # Pass the real x only to one of the func calls.

    tuples = [list() for t in x]
    for case in range(num_cases):
        new_x = [
            control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x
        ]
        output = func(tuple(new_x), case)
        for j in range(num_inputs):
            tuples[j].append(output[j])

    for i in range(num_inputs):
        tuples[i] = control_flow_ops.merge(tuples[i])[0]
    return tuple(tuples)


def _random_integer(minval, maxval, seed):
    """Returns a random 0-D tensor between minval and maxval.

  Args:
    minval: minimum value of the random tensor.
    maxval: maximum value of the random tensor.
    seed: random seed.

  Returns:
    A random 0-D tensor between minval and maxval.
  """
    return tf.random_uniform([],
Exemple #39
0
def handler(cmd_queue, chan):
    global object_dict
    global object_id
    global callback_stack

    global initialized
    if not initialized:
        callback_stack = []
        object_dict = dict()
        object_id = 1
        # TODO: forward logging or disable it in test
        tf.logging.set_verbosity(tf.logging.INFO)
        initialized = True
        print("handler is initialized")

    while True:
        cmd = cmd_queue.get(block=True)
        cmd_id = cmd.__get_cmd_id()
        print("new command id %d" % cmd_id)

        try:
            if cmd_id == TF_PY_NW_CALLBACK_DONE:
                param.base.done = STATUS_TASK_DONE
                ret = fcntl.ioctl(kvm_fd, IOCTL_KVM_NOTIFY_TASK_FINISHED, task.node_id)
                if ret < 0:
                    print("notify task completion failed: %d\n" % ret);
                if callback_stack and \
                   callback_stack[-1]["callback_id"] == param.base.object_id:
                    print("callback is finished")
                    return STATUS_CALLBACK_DONE
                else:
                    print("callback is error")
                    return STATUS_CALLBACK_ERROR

            if cmd_id == TF_PY_SESSION_INIT:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                sess = tf.Session(param0, param1, param2)

                # assign object_id
                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = sess
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            #elif cmd_id == TF_PY_SESSION_ENTER:
            #    sess = object_dict[param.base.object_id]
            #    ctx_sess = sess.__enter__()
            #    if sess is ctx_sess:
            #        pass
            #    else: # unlikely
            #        print("unlikely to search for sess")
            #        param.base.object_id = next(obj_id for obj_id, obj in
            #                object_dict.items() if obj is ctx_sess)

            #elif cmd_id == TF_PY_SESSION_EXIT:
            #    param1 = parse_param(vm_id, mm, param, param.param1)
            #    param2 = parse_param(vm_id, mm, param, param.param2)
            #    param3 = parse_param(vm_id, mm, param, param.param3)

            #    sess = object_dict[param.base.object_id]
            #    sess.__exit__(param1, param2, param3)

            #elif cmd_id == TF_PY_SESSION_DEL:
            #    sess = object_dict[param.base.object_id]
            #    sess.__del__()

            # deprecated
            #elif cmd_id == TF_PY_SESSION_RUN:
            #    sess = object_dict[param.base.object_id]
            #    param1 = parse_param(vm_id, mm, param, param.param1)

            #    if type(param1) == NwObject:
            #        print("get NwObject=%d" % param1.object_id())
            #        param1 = object_dict[param1.object_id()]
            #        print(param1)

            #    ret_val = sess.run(param1)
            #    print(ret_val)

            #    writeback_result(vm_id, mm, param, param.ret_val1, ret_val);

            elif cmd_id == TF_PY_TPU_CLUSTER_RESOLVER_INIT:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                print("TPUClusterResolver", param0, param1, param2)
                tpu_grpc = tf.contrib.cluster_resolver.TPUClusterResolver(
                        tpu=param0, zone=param1, project=param2)

                # assign object_id
                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = tpu_grpc
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            # deprecated
            elif cmd_id == TF_PY_TPU_CLUSTER_RESOLVER_MASTER:
                tpu_grpc = object_dict[cmd.__get_object_id()]
                # FIXED: may have parameters
                tpu_grpc_url = tpu_grpc.master()

                # serialize return value
                dump_ret, len_ret = pickle_arg(tpu_grpc_url)
                total_buffer_size = chan.buffer_size(len_ret)
                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size)
                ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_TPU_CLUSTER_RESOLVER_MASTER_RET)
                offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret)
                ret_cmd.__set_tf_args([(len_ret, offset_ret)])
                ret_cmd.send()

            elif cmd_id == TF_PY_TPU_INITIALIZE_SYSTEM:
                # TODO: may have parameters
                ts = tpu.initialize_system()

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = ts
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_TPU_SHUTDOWN_SYSTEM:
                # TODO: may have parameters
                ts = tpu.shutdown_system()

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = ts
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_GLOBAL_VARIABLES_INITIALIZER:
                # TODO: may have parameters
                ts = tf.global_variables_initializer()

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = ts
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_ONES:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                print(param0)
                if param1 is None:
                    param1 = dtypes.float32
                print(param1)
                var = tf.ones(param0, param1)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = var
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_RANDOM_UNIFORM:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                param4 = unpickle_arg(cmd, 4)
                param5 = unpickle_arg(cmd, 5)
                if param1 is None:
                    param1 = 0
                if param3 is None:
                    param3 = dtypes.float32
                print(param0, param1, param2, param3)
                var = tf.random_uniform(param0, param1, param2, param3, param4, param5)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = var
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_TRANSPOSE:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                param0 = object_dict[param0.object_id()]
                if param2 is None:
                    param2 = "transpose"
                if param3 is None:
                    param3 = False
                print("transpose", param0, param1, param2, param3)
                var = tf.transpose(param0, param1, param2, param3)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = var
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_CAST:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param0 = object_dict[param0.object_id()]
                print("cast", param0, param1, param2)
                var = tf.cast(param0, param1, param2)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = var
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_EXPAND_DIMS:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                param0 = object_dict[param0.object_id()]
                print("expand_dims", param0, param1, param2, param3)
                var = tf.expand_dims(param0, param1, param2, param3)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = var
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_CONCAT:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param0 = object_dict[param0.object_id()]
                if param2 is None:
                    param2 = "concat"
                print("concat", param0, param1, param2)
                var = tf.concat(param0, param1, param2)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = var
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_EQUAL:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param0 = object_dict[param0.object_id()]
                print("equal", param0, param1, param2)
                if isinstance(param1, NwObject):
                    param1 = object_dict[param1.object_id()]
                result = tf.equal(param0, param1, param2)
                print(result)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = result
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_FIXED_LEN_FEATURE:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)

                feature = tf.FixedLenFeature(param0, param1, param2)
                print(feature)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = feature
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_VAR_LEN_FEATURE:
                param0 = unpickle_arg(cmd, 0)

                feature = tf.VarLenFeature(param0)
                print(feature)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = feature
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_PARSE_SINGLE_EXAMPLE:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                print(param1, param2)

                # expand embedded NwObject
                if isinstance(param0, NwObject):
                    param0 = object_dict[param0.object_id()]
                dict_walker(param1)
                print("after translation", param0, param1)

                result = tf.parse_single_example(param0, param1, param2, param3)
                print(result)
                dict_mapper(result)
                print(result)

                dump_ret, len_ret = pickle_arg(result)
                total_buffer_size = chan.buffer_size(len_ret)
                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size)
                ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_PARSE_SINGLE_EXAMPLE_RET)
                offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret)
                ret_cmd.__set_tf_args([(len_ret, offset_ret)])
                ret_cmd.send()

            elif cmd_id == TF_PY_CONTROL_FLOW_OPS_SWITCH:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                param0 = object_dict[param0.object_id()]
                param1 = object_dict[param1.object_id()]
                print("switch", param0, param1, param2, param3)
                result = control_flow_ops.switch(param0, param1, param2, param3)
                print(result)

                mapped_tuple = tuple_mapper(result, [0, 1])
                print(mapped_tuple)

                dump_ret, len_ret = pickle_arg(mapped_tuple)
                total_buffer_size = chan.buffer_size(len_ret)
                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size)
                ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_CONTROL_FLOW_OPS_SWITCH_RET)
                offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret)
                ret_cmd.__set_tf_args([(len_ret, offset_ret)])
                ret_cmd.send()

            elif cmd_id == TF_PY_CONTROL_FLOW_OPS_MERGE:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param0 = object_dict[param0.object_id()]
                print("merge", param0, param1)
                list_walker(param0)
                print("merge-new", param0, param1)
                result = control_flow_ops.merge(param0, param1)
                print(result)

                mapped_tuple = tuple_mapper(result, [0])
                print(mapped_tuple)
                dump_ret, len_ret = pickle_arg(mapped_tuple)
                total_buffer_size = chan.buffer_size(len_ret)
                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size)
                ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_CONTROL_FLOW_OPS_MERGE_RET)
                offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret)
                ret_cmd.__set_tf_args([(len_ret, offset_ret)])
                ret_cmd.send()

            elif cmd_id == TF_PY_TPU_REWRITE:
                # TODO: may have more parameters
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                # default parameter
                if param1 is None:
                    param1 = None
                # expand embedded NwObject
                list_walker(param1)
                func = tpu.rewrite(param0, param1)
                print("Rewrite result:", func, " object id =", object_id)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = func
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_TPU_RUN_CONFIG:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                param4 = unpickle_arg(cmd, 4)
                # default parameter
                if param0 is None:
                    param0 = None
                if param1 is None:
                    param1 = None
                if param2 is None:
                    param2 = None
                if param3 is None:
                    param3 = None

                # expand embedded NwObject
                param3 = object_dict[param3.object_id()]
                print(param3, param4)
                func = tpu.RunConfig(param0, param1, param2, param3, **param4)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = func
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_TPU_TPU_ESTIMATOR:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                param4 = unpickle_arg(cmd, 4)
                param5 = unpickle_arg(cmd, 5)
                param6 = unpickle_arg(cmd, 6)
                param7 = unpickle_arg(cmd, 7)
                param8 = unpickle_arg(cmd, 8)
                param9 = unpickle_arg(cmd, 9)
                param10 = unpickle_arg(cmd, 10)
                param11 = unpickle_arg(cmd, 11)
                # default parameter
                if param0 is None:
                    param0 = None
                if param1 is None:
                    param1 = None
                if param2 is None:
                    param2 = None
                if param3 is None:
                    param3 = None
                if param4 is None:
                    param4 = True
                if param5 is None:
                    param5 = None
                if param6 is None:
                    param6 = None
                if param7 is None:
                    param7 = None
                if param8 is None:
                    param8 = None
                if param9 is None:
                    param9 = True
                if param10 is None:
                    param10 = True
                if param11 is None:
                    param11 = None

                # expand embedded NwObject
                param2 = object_dict[param2.object_id()]
                print(param2)
                func = tpu.TPUEstimator(param0, param1, param2, param3, param4,
                                        param5, param6, param7, param8, param9,
                                        param10, param11)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = func
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_IMAGE_RESIZE_IMAGES:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                param4 = unpickle_arg(cmd, 4)
                # default parameter
                if param2 is None:
                    param2 =ResizeMethod.BILINEAR
                if param3 is None:
                    param3 = False
                if param4 is None:
                    param4 = False

                # expand embedded NwObject
                param0 = object_dict[param0.object_id()]
                print(param0)
                img = tf.image.resize_images(param0, param1, param2, param3, param4)

                # TODO: it may return a float
                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = img
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_SLICE:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)

                # expand embedded NwObject
                print(param0, param1, param2, param3)
                param0 = object_dict[param0.object_id()]
                ret = tf.slice(param0, param1, param2, param3)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = ret
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_SHAPE:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                if param2 is None:
                    param2 = dtypes.int32

                # expand embedded NwObject
                print(param0, param1, param2)
                param0 = object_dict[param0.object_id()]
                ret = tf.shape(param0, param1, param2)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = ret
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_IMAGE_SAMPLE_DISTORTED_BOUNDING_BOX:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                param4 = unpickle_arg(cmd, 4)
                param5 = unpickle_arg(cmd, 5)
                param6 = unpickle_arg(cmd, 6)
                param7 = unpickle_arg(cmd, 7)
                param8 = unpickle_arg(cmd, 8)
                param9 = unpickle_arg(cmd, 9)
                # default parameter
                if param4 is None:
                    param4 = 0.1

                print("sample_distorted_bounding_box", param0, param1)
                result = tf.image.sample_distorted_bounding_box(
                        param0, param1, param2, param3, param4, param5, param6,
                        param7, param8, param9)
                print(result)

                mapped_tuple = tuple_mapper(result, [0, 1, 2])
                print(mapped_tuple)
                dump_ret, len_ret = pickle_arg(mapped_tuple)
                total_buffer_size = chan.buffer_size(len_ret)
                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size)
                ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_CONTROL_FLOW_OPS_MERGE_RET)
                offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret)
                ret_cmd.__set_tf_args([(len_ret, offset_ret)])
                ret_cmd.send()

            elif cmd_id == TF_PY_IMAGE_DRAW_BOUNDING_BOXES:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)

                # expand embedded NwObject
                print(param0, param1, param2)
                param0 = object_dict[param0.object_id()]
                param1 = object_dict[param1.object_id()]
                ret = tf.image.draw_bounding_boxes(param0, param1, param2)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = ret
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_IMAGE_DECODE_JPEG:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)
                param4 = unpickle_arg(cmd, 4)
                param5 = unpickle_arg(cmd, 5)
                param6 = unpickle_arg(cmd, 6)
                param7 = unpickle_arg(cmd, 7)

                if param1 is None:
                    param1 = 0
                if param2 is None:
                    param2 = 1
                if param3 is None:
                    param3 = True
                if param4 is None:
                    param4 = False
                if param5 is None:
                    param5 = 1
                if param6 is None:
                    param6 = ""
                param0 = object_dict[param0.object_id()]
                ret = tf.image.decode_jpeg(param0, param1, param2, param3,
                        param4, param5, param6, param7)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = ret
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_IMAGE_CONVERT_IMAGE_DTYPE:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)
                param3 = unpickle_arg(cmd, 3)

                # expand embedded NwObject
                print(param0, param1, param2, param3)
                param0 = object_dict[param0.object_id()]
                if param2 is None:
                    param2 = False
                ret = tf.image.convert_image_dtype(param0, param1, param2, param3)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = ret
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_DATA_DATASET_LIST_FILES:
                param0 = unpickle_arg(cmd, 0)
                param1 = unpickle_arg(cmd, 1)
                param2 = unpickle_arg(cmd, 2)

                print(param0, param1, param2)
                if isinstance(param0, NwObject):
                    param0 = object_dict[param0.object_id()]
                ret = tf.data.Dataset.list_files(param0, param1, param2)

                ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                object_dict[object_id] = ret
                ret_cmd.__set_object_id(object_id)
                object_id += 1
                ret_cmd.send()

            elif cmd_id == TF_PY_NW_OBJECT:
                obj = object_dict[cmd.__get_object_id()]
                name = unpickle_arg(cmd, 0)
                args = unpickle_arg(cmd, 1)
                kwargs = unpickle_arg(cmd, 2)
                print("NwObject", obj, name, args, kwargs)

                # expand embedded NwObject
                args = list(args)
                list_walker(args)
                args = tuple(args)
                dict_walker(kwargs)
                print("after translation", obj, name, args, kwargs)

                # run
                result = getattr(obj, name)(*(args or []), **(kwargs or {}))
                print("analyze type", type(result), result)

                # TODO: go through tuple, dict or list
                if isinstance(result, tuple):
                    result = tuple_mapper(result, range(len(result)))
                if isinstance(result, dict):
                    dict_mapper(result)
                if isinstance(result, list):
                    list_mapper(result)

                ret_cmd = None

                # serialize return value
#                if isinstance(result, list):
                    # Check whether a nested list pickles
                    # https://github.com/uqfoundation/dill/issues/307
#                    pickleable = pickle.pickles(reduce(operator.add, result))
#                else:
#                    pickleable = pickle.pickles(result)
                pickleable = True
                if is_unpickleable_type(result) or not pickleable:
                    ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0)
                    object_dict[object_id] = result
                    ret_cmd.__set_object_id(object_id)
                    object_id += 1

                else:
                    dump_ret, len_ret = pickle_arg(result)
                    total_buffer_size = chan.buffer_size(len_ret)
                    ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size)
                    ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_NW_OBJECT_RET)
                    offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret)
                    ret_cmd.__set_object_id(0)
                    ret_cmd.__set_tf_args([(len_ret, offset_ret)])

                ret_cmd.send()

            elif cmd_id == TF_PY_NW_METHOD:
                # Reuse as callback

                #ins = parse_param(vm_id, mm, param, param.param1)
                #name = parse_param(vm_id, mm, param, param.param2)
                #print(ins, name)

                #method = getattr(ins, name)
                #print(method)
                #object_dict[object_id] = method

                cw = callback_constructor(object_id, callback_param,
                        param, mm, vm_id, cmd_queue, kvm_fd)
                object_dict[object_id] = cw
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_NW_CALLBACK_TEST:
                nw_func = parse_param(vm_id, mm, param, param.param1)
                print(nw_func, nw_func.object_id())
                func = object_dict[nw_func.object_id()]
                print("callback func", func)
                x = parse_param(vm_id, mm, param, param.param2)
                y = parse_param(vm_id, mm, param, param.param3)
                result = func(x, y)
                print(result)
                writeback_result(vm_id, mm, param, param.ret_val1, result);

            else:
                print("unsupported Tensorflow API %d" % cmd_id)

        except Exception as error:
            print("fault: ", str(error))
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print(exc_type, fname, exc_tb.tb_lineno)
            traceback.print_stack()

        cmd.free_command()
        print("finished cmd %d" % cmd_id)
Exemple #40
0
  def create_op(self, *args, **kwargs):
    """Creates an `Operation`.

    For operations of the following form

      orig_value = op(*args, **kwargs)

    this function constructs the following subgraph :

      v = Variable()
      if v is not initialized:
        orig_value = op(*args, **kwargs)
        v.assign(orig_value) # Initializes v
        return orig_value
      else:
        return v

    The above transformation is not performed and the original op is returned
    as is if any of the following is true:
    * `_return_as_is` flag is set to true.
    * op_type is listed in _PASS_THROUGH_OPS
    * op has no outputs.
    * One of the op's return value has a ref type.

    Args:
      *args: Arguments for create_op()
      **kwargs: Keyword arguments for create_op(). Refer to
        tensorflow.python.framework.ops.Graph.create_op() for the mandatory
        and optional arguments.

    Returns:
      An Operation.

    Raises:
      UnimplementedError: if output type is a reference and the op's type
        is not one of the supported types in `_REF_OPS_WHITELIST`.
    """
    op_type = kwargs['op_type'] if 'op_type' in kwargs else args[0]
    output_dtypes = kwargs['dtypes'] if 'dtypes' in kwargs else args[2]
    output_dtypes = [dtypes.as_dtype(d) for d in output_dtypes]

    if self._return_as_is or op_type in _PASS_THROUGH_OPS:
      return self._wrap(super(ImperativeGraph, self).create_op(*args, **kwargs))

    if not output_dtypes:
      return self._wrap(
          super(ImperativeGraph, self).create_op(*args, **kwargs))

    output_has_ref = any([dtype._is_ref_dtype for dtype in output_dtypes])  # pylint: disable=protected-access

    if output_has_ref:
      if op_type not in _REF_OPS_WHITELIST:
        raise errors.UnimplementedError(None, None,
                                        op_type + ' op not supported in '
                                        'imperative graph')

      ret = super(ImperativeGraph, self).create_op(*args, **kwargs)

      if self._in_variable_creation:
        if op_type == 'Assign':
          self.add_pending_init(ret)

      return self._wrap(ret)

    with self.return_as_is():
      # Declares the variables to hold the output values of this op.
      op_output_var = [state_ops.variable_op_v2(
          tensor_shape.TensorShape(None), dtype, container=self._name)
                       for dtype in output_dtypes]
      # Ops to free the resources used by the temporary cache variables.
      # The following two ops are created for each cache variable,
      # having no control dependencies on any other ops :
      # var_handle_op ----> destroy_resource_op
      for dtype, v in zip(output_dtypes, op_output_var):
        with ops.control_dependencies(None):
          self._variable_cleanup_ops += [
              gen_resource_variable_ops.destroy_resource_op(
                  gen_resource_variable_ops.var_handle_op(
                      dtype, tensor_shape.TensorShape(None),
                      container=self._name, shared_name=v.op.name),
                  ignore_lookup_error=True)]

      # Create the conditional to run the original op only when the variable
      # corresponding to the first output is not initialized.
      inited = state_ops.is_variable_initialized(op_output_var[0])
      v_f, v_t = control_flow_ops.ref_switch(op_output_var[0], inited)
      # pylint: disable=protected-access
      v_f_op = gen_array_ops._ref_identity(v_f)
      v_t_op = gen_array_ops._ref_identity(v_t)
      # pylint: enable=protected-access

      with ops.control_dependencies([v_f_op.op]):
        # Create the original op
        orig_op = self._wrap(
            super(ImperativeGraph, self).create_op(*args, **kwargs))
      shapes = [val.get_shape() for val in orig_op.outputs]

      controls = []
      for var, val in zip(op_output_var, orig_op.outputs):
        if (not val.get_shape().is_fully_defined() or
            val.get_shape().num_elements() > 0):
          assign_op = state_ops.assign(var, val, validate_shape=False)
          assign_op.set_shape(val.get_shape())
          controls.append(assign_op)

      values = []
      if len(controls) > 1:
        if control_flow_ops.IsSwitch(orig_op):
          # pylint: disable=protected-access
          controls = gen_control_flow_ops._ref_merge(controls)
          # pylint: enable=protected-access
        else:
          controls = control_flow_ops.tuple(controls)

      for var, val in zip(op_output_var, orig_op.outputs):
        with ops.control_dependencies(controls):
          with self.colocate_with(v_f_op):
            real_val = array_ops.identity(val)
        with ops.control_dependencies([v_t_op.op]):
          with self.colocate_with(v_t_op):
            stored_val = array_ops.identity(var)
          stored_val.set_shape(val.get_shape())
          real_val, _ = control_flow_ops.merge([real_val, stored_val])
        real_val.op.node_def.attr['_gradient_op_type'].CopyFrom(
            attr_value_pb2.AttrValue(s=compat.as_bytes(self._merge_op_type)))
        values.append(real_val)

      for i, _ in enumerate(shapes):
        values[i].set_shape(shapes[i])
      self._outputs_map[orig_op.name] = values
      try:
        self._gradient_function_map[orig_op.name] = ops.get_gradient_function(
            orig_op)
      except (KeyError, LookupError):
        pass
      else:
        orig_op.node_def.attr['_gradient_op_type'].CopyFrom(
            attr_value_pb2.AttrValue(
                s=compat.as_bytes(self._imperative_op_type)))

      return MultiOutputOperation(values, orig_op)
    def create_op(self, *args, **kwargs):
        """Creates an `Operation`.

    For operations of the following form

      orig_value = op(*args, **kwargs)

    this function constructs the following subgraph :

      v = Variable()
      if v is not initialized:
        orig_value = op(*args, **kwargs)
        v.assign(orig_value) # Initializes v
        return orig_value
      else:
        return v

    The above transformation is not performed and the original op is returned
    as is if any of the following is true:
    * `_return_as_is` flag is set to true.
    * op_type is listed in _PASS_THROUGH_OPS
    * op has no outputs.
    * One of the op's return value has a ref type.

    Args:
      *args: Arguments for create_op()
      **kwargs: Keyword arguments for create_op(). Refer to
        tensorflow.python.framework.ops.Graph.create_op() for the mandatory
        and optional arguments.

    Returns:
      An Operation.

    Raises:
      UnimplementedError: if output type is a reference and the op's type
        is not one of the supported types in `_REF_OPS_WHITELIST`.
    """
        op_type = kwargs['op_type'] if 'op_type' in kwargs else args[0]
        output_dtypes = kwargs['dtypes'] if 'dtypes' in kwargs else args[2]
        output_dtypes = [dtypes.as_dtype(d) for d in output_dtypes]

        if self._return_as_is or op_type in _PASS_THROUGH_OPS:
            return self._wrap(
                super(ImperativeGraph, self).create_op(*args, **kwargs))

        if not output_dtypes:
            return self._wrap(
                super(ImperativeGraph, self).create_op(*args, **kwargs))

        output_has_ref = any([dtype._is_ref_dtype for dtype in output_dtypes])  # pylint: disable=protected-access

        if output_has_ref:
            if op_type not in _REF_OPS_WHITELIST:
                raise errors.UnimplementedError(
                    None, None, op_type + ' op not supported in '
                    'imperative graph')

            ret = super(ImperativeGraph, self).create_op(*args, **kwargs)

            if self._in_variable_creation:
                if op_type == 'Assign':
                    self.add_pending_init(ret)

            return self._wrap(ret)

        with self.return_as_is():
            # Declares the variables to hold the output values of this op.
            op_output_var = [
                state_ops.variable_op_v2(tensor_shape.TensorShape(None),
                                         dtype,
                                         container=self._name)
                for dtype in output_dtypes
            ]
            # Ops to free the resources used by the temporary cache variables.
            # The following two ops are created for each cache variable,
            # having no control dependencies on any other ops :
            # var_handle_op ----> destroy_resource_op
            for dtype, v in zip(output_dtypes, op_output_var):
                with ops.control_dependencies(None):
                    self._variable_cleanup_ops += [
                        gen_resource_variable_ops.destroy_resource_op(
                            gen_resource_variable_ops.var_handle_op(
                                dtype,
                                tensor_shape.TensorShape(None),
                                container=self._name,
                                shared_name=v.op.name),
                            ignore_lookup_error=True)
                    ]

            # Create the conditional to run the original op only when the variable
            # corresponding to the first output is not initialized.
            inited = state_ops.is_variable_initialized(op_output_var[0])
            v_f, v_t = control_flow_ops.ref_switch(op_output_var[0], inited)
            # pylint: disable=protected-access
            v_f_op = gen_array_ops._ref_identity(v_f)
            v_t_op = gen_array_ops._ref_identity(v_t)
            # pylint: enable=protected-access

            with ops.control_dependencies([v_f_op.op]):
                # Create the original op
                orig_op = self._wrap(
                    super(ImperativeGraph, self).create_op(*args, **kwargs))
            shapes = [val.get_shape() for val in orig_op.outputs]

            controls = []
            for var, val in zip(op_output_var, orig_op.outputs):
                if (not val.get_shape().is_fully_defined()
                        or val.get_shape().num_elements() > 0):
                    assign_op = state_ops.assign(var,
                                                 val,
                                                 validate_shape=False)
                    assign_op.set_shape(val.get_shape())
                    controls.append(assign_op)

            values = []
            if len(controls) > 1:
                if control_flow_ops.IsSwitch(orig_op):
                    # pylint: disable=protected-access
                    controls = gen_control_flow_ops._ref_merge(controls)
                    # pylint: enable=protected-access
                else:
                    controls = control_flow_ops.tuple(controls)

            for var, val in zip(op_output_var, orig_op.outputs):
                with ops.control_dependencies(controls):
                    with self.colocate_with(v_f_op):
                        real_val = array_ops.identity(val)
                with ops.control_dependencies([v_t_op.op]):
                    with self.colocate_with(v_t_op):
                        stored_val = array_ops.identity(var)
                    stored_val.set_shape(val.get_shape())
                    real_val, _ = control_flow_ops.merge(
                        [real_val, stored_val])
                real_val.op.node_def.attr['_gradient_op_type'].CopyFrom(
                    attr_value_pb2.AttrValue(
                        s=compat.as_bytes(self._merge_op_type)))
                values.append(real_val)

            for i, _ in enumerate(shapes):
                values[i].set_shape(shapes[i])
            self._outputs_map[orig_op.name] = values
            try:
                self._gradient_function_map[
                    orig_op.name] = ops.get_gradient_function(orig_op)
            except (KeyError, LookupError):
                pass
            else:
                orig_op.node_def.attr['_gradient_op_type'].CopyFrom(
                    attr_value_pb2.AttrValue(
                        s=compat.as_bytes(self._imperative_op_type)))

            return MultiOutputOperation(values)
Exemple #42
0
def handler(queue, kvm_fd, mm):
    global object_dict
    global object_id
    global callback_stack

    global initialized
    if not initialized:
        callback_stack = []
        object_dict = dict()
        object_id = 1
        # TODO: forward logging or disable it in test
        tf.logging.set_verbosity(tf.logging.INFO)
        initialized = True
        print("handler is initialized")

    while True:
        task = None
        task = queue.get(block=True)

        while task is None:
            try:
                task = queue.get(block=True, timeout=5)
            except Queue.Empty:
                task = None
            if callback_stack:
                if time.time() > callback_stack[-1]["deadline"]:
                    print("callback failed deadline")
                    return STATUS_CALLBACK_TIMEOUT

        vm_id = task.vm_id
        if vm_id == STOP_HANDLER:
            break
        param = TF_PY_PARAM.from_buffer(mm, task.data_ptr)
        callback_param = TF_PY_PARAM.from_buffer(
            mm, task.data_ptr + param.base.callback_param_offset)
        print(
            "retrieve [vm#%d] tensorflow task=%d cmd=%d, obj=%d, dstore=%lx, done=%d"
            % (task.vm_id, task.node_id, param.base.cmd_id,
               param.base.object_id, param.base.dstore_size, param.base.done))
        print(
            "retrieve [vm#%d] callback node cmd=%d, obj=%d, dstore=%lx, done=%d"
            % (task.vm_id, callback_param.base.cmd_id,
               callback_param.base.object_id, callback_param.base.dstore_size,
               callback_param.base.done))

        cmd_id = param.base.cmd_id

        try:
            if cmd_id == TF_PY_NW_CALLBACK_DONE:
                param.base.done = STATUS_TASK_DONE
                ret = fcntl.ioctl(kvm_fd, IOCTL_KVM_NOTIFY_TASK_FINISHED,
                                  task.node_id)
                if ret < 0:
                    print("notify task completion failed: %d\n" % ret)
                if callback_stack and \
                   callback_stack[-1]["callback_id"] == param.base.object_id:
                    print("callback is finished")
                    return STATUS_CALLBACK_DONE
                else:
                    print("callback is error")
                    return STATUS_CALLBACK_ERROR

            if cmd_id == TF_PY_SESSION_INIT:
                print("SessionInit!!!")
                param1 = parse_param(vm_id, mm, param, param.param1)
                print(param1)
                sess = tf.Session(param1)

                # assign object_id
                object_dict[object_id] = sess
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_SESSION_ENTER:
                sess = object_dict[param.base.object_id]
                ctx_sess = sess.__enter__()
                if sess is ctx_sess:
                    pass
                else:  # unlikely
                    print("unlikely to search for sess")
                    param.base.object_id = next(
                        obj_id for obj_id, obj in object_dict.items()
                        if obj is ctx_sess)

            elif cmd_id == TF_PY_SESSION_EXIT:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)

                sess = object_dict[param.base.object_id]
                sess.__exit__(param1, param2, param3)

            elif cmd_id == TF_PY_SESSION_DEL:
                sess = object_dict[param.base.object_id]
                sess.__del__()

            # deprecated
            elif cmd_id == TF_PY_SESSION_RUN:
                sess = object_dict[param.base.object_id]
                param1 = parse_param(vm_id, mm, param, param.param1)

                if type(param1) == NwObject:
                    print("get NwObject=%d" % param1.object_id())
                    param1 = object_dict[param1.object_id()]
                    print(param1)

                ret_val = sess.run(param1)
                print(ret_val)

                writeback_result(vm_id, mm, param, param.ret_val1, ret_val)

            elif cmd_id == TF_PY_TPU_CLUSTER_RESOLVER_INIT:
                print("resloverInit!!!")
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                if param1 is None:
                    param1 = None
                if param2 is None:
                    param2 = None
                if param3 is None:
                    param3 = None
                print("TPUClusterResolver", param1, param2, param3)
                tpu_grpc = tf.contrib.cluster_resolver.TPUClusterResolver(
                    tpu=param1, zone=param2, project=param3)

                # assign object_id
                object_dict[object_id] = tpu_grpc
                param.base.object_id = object_id
                print("assign obj_id=%d" % object_id)
                object_id += 1

            # deprecated
            elif cmd_id == TF_PY_TPU_CLUSTER_RESOLVER_MASTER:
                # FIXED: use __getattr__
                print("master!!")
                tpu_grpc = object_dict[param.base.object_id]
                # FIXED: may have parameters
                tpu_grpc_url = tpu_grpc.master()

                # serialize return value
                writeback_result(vm_id, mm, param, param.ret_val1,
                                 tpu_grpc_url)

            elif cmd_id == TF_PY_TPU_INITIALIZE_SYSTEM:
                # TODO: may have parameters
                ts = tpu.initialize_system()

                object_dict[object_id] = ts
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_TPU_SHUTDOWN_SYSTEM:
                # TODO: may have parameters
                ts = tpu.shutdown_system()

                object_dict[object_id] = ts
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_GLOBAL_VARIABLES_INITIALIZER:
                # TODO: may have parameters
                ts = tf.global_variables_initializer()

                object_dict[object_id] = ts
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_ONES:
                print("param1 size=%ld,offset=%ld" %
                      (param.param1.size, param.param1.offset))
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                if param2 is None:
                    param2 = dtypes.float32
                print(param2)
                var = tf.ones(param1, param2)

                object_dict[object_id] = var
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_RANDOM_UNIFORM:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                param5 = parse_param(vm_id, mm, param, param.param5)
                param6 = parse_param(vm_id, mm, param, param.param6)
                if param2 is None:
                    param2 = 0
                if param4 is None:
                    param4 = dtypes.float32
                print(param1, param2, param3, param4)
                var = tf.random_uniform(param1, param2, param3, param4, param5,
                                        param6)

                object_dict[object_id] = var
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_TRANSPOSE:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                param1 = object_dict[param1.object_id()]
                if param3 is None:
                    param3 = "transpose"
                if param4 is None:
                    param4 = False
                print("transpose", param1, param2, param3, param4)
                var = tf.transpose(param1, param2, param3, param4)

                object_dict[object_id] = var
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_CAST:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param1 = object_dict[param1.object_id()]
                print("cast", param1, param2, param3)
                var = tf.cast(param1, param2, param3)

                object_dict[object_id] = var
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_EXPAND_DIMS:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                param1 = object_dict[param1.object_id()]
                print("expand_dims", param1, param2, param3, param4)
                var = tf.expand_dims(param1, param2, param3, param4)

                object_dict[object_id] = var
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_CONCAT:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param1 = object_dict[param1.object_id()]
                if param3 is None:
                    param3 = "concat"
                print("concat", param1, param2, param3)
                var = tf.concat(param1, param2, param3)

                object_dict[object_id] = var
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_EQUAL:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param1 = object_dict[param1.object_id()]
                print("equal", param1, param2, param3)
                if isinstance(param2, NwObject):
                    param2 = object_dict[param2.object_id()]
                result = tf.equal(param1, param2, param3)
                print(result)

                object_dict[object_id] = result
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_FIXED_LEN_FEATURE:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)

                feature = tf.FixedLenFeature(param1, param2, param3)
                print(feature)

                object_dict[object_id] = feature
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_VAR_LEN_FEATURE:
                param1 = parse_param(vm_id, mm, param, param.param1)

                feature = tf.VarLenFeature(param1)
                print(feature)

                object_dict[object_id] = feature
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_PARSE_SINGLE_EXAMPLE:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                print(param1, param2)

                # expand embedded NwObject
                if isinstance(param1, NwObject):
                    param1 = object_dict[param1.object_id()]
                dict_walker(param2)
                print("after translation", param1, param2)

                result = tf.parse_single_example(param1, param2, param3,
                                                 param4)
                print(result)
                dict_mapper(result)
                print(result)
                writeback_result(vm_id, mm, param, param.ret_val1, result)

            elif cmd_id == TF_PY_CONTROL_FLOW_OPS_SWITCH:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                param1 = object_dict[param1.object_id()]
                param2 = object_dict[param2.object_id()]
                print("switch", param1, param2, param3, param4)
                result = control_flow_ops.switch(param1, param2, param3,
                                                 param4)
                print(result)

                mapped_tuple = tuple_mapper(result, [0, 1])
                print(mapped_tuple)
                writeback_result(vm_id, mm, param, param.ret_val1,
                                 mapped_tuple)

            elif cmd_id == TF_PY_CONTROL_FLOW_OPS_MERGE:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param1 = object_dict[param1.object_id()]
                print("merge", param1, param2)
                list_walker(param1)
                print("merge-new", param1, param2)
                result = control_flow_ops.merge(param1, param2)
                print(result)

                mapped_tuple = tuple_mapper(result, [0])
                print(mapped_tuple)
                writeback_result(vm_id, mm, param, param.ret_val1,
                                 mapped_tuple)

            elif cmd_id == TF_PY_TPU_REWRITE:
                # TODO: may have parameters
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                # default parameter
                if param2 is None:
                    param2 = None
                # expand embedded NwObject
                list_walker(param2)
                func = tpu.rewrite(param1, param2)

                object_dict[object_id] = func
                param.base.object_id = object_id
                print("rewrite object_id=%d" % object_id)
                object_id += 1

            elif cmd_id == TF_PY_TPU_RUN_CONFIG:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                param5 = parse_param(vm_id, mm, param, param.param5)
                # default parameter
                if param1 is None:
                    param1 = None
                if param2 is None:
                    param2 = None
                if param3 is None:
                    param3 = None
                if param4 is None:
                    param4 = None

                # expand embedded NwObject
                param4 = object_dict[param4.object_id()]
                print(param4, param5)
                func = tpu.RunConfig(param1, param2, param3, param4, **param5)

                object_dict[object_id] = func
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_TPU_TPU_ESTIMATOR:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                param5 = parse_param(vm_id, mm, param, param.param5)
                param6 = parse_param(vm_id, mm, param, param.param6)
                param7 = parse_param(vm_id, mm, param, param.param7)
                param8 = parse_param(vm_id, mm, param, param.param8)
                param9 = parse_param(vm_id, mm, param, param.param9)
                param10 = parse_param(vm_id, mm, param, param.param10)
                param11 = parse_param(vm_id, mm, param, param.param11)
                param12 = parse_param(vm_id, mm, param, param.param12)
                # default parameter
                if param1 is None:
                    param1 = None
                if param2 is None:
                    param2 = None
                if param3 is None:
                    param3 = None
                if param4 is None:
                    param4 = None
                if param5 is None:
                    param5 = True
                if param6 is None:
                    param6 = None
                if param7 is None:
                    param7 = None
                if param8 is None:
                    param8 = None
                if param9 is None:
                    param9 = None
                if param10 is None:
                    param10 = True
                if param11 is None:
                    param11 = True
                if param12 is None:
                    param12 = None

                # expand embedded NwObject
                param3 = object_dict[param3.object_id()]
                print(param3)
                func = tpu.TPUEstimator(param1, param2, param3, param4, param5,
                                        param6, param7, param8, param9,
                                        param10, param11, param12)

                object_dict[object_id] = func
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_IMAGE_RESIZE_IMAGES:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                param5 = parse_param(vm_id, mm, param, param.param5)
                # default parameter
                if param3 is None:
                    param3 = ResizeMethod.BILINEAR
                if param4 is None:
                    param4 = False
                if param5 is None:
                    param5 = False

                # expand embedded NwObject
                param1 = object_dict[param1.object_id()]
                print(param1)
                img = tf.image.resize_images(param1, param2, param3, param4,
                                             param5)

                # TODO: it may return a float
                object_dict[object_id] = img
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_SLICE:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)

                # expand embedded NwObject
                print(param1, param2, param3)
                param1 = object_dict[param1.object_id()]
                ret = tf.slice(param1, param2, param3, param4)

                object_dict[object_id] = ret
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_SHAPE:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                if param3 is None:
                    param3 = dtypes.int32

                # expand embedded NwObject
                print(param1, param2, param3)
                param1 = object_dict[param1.object_id()]
                ret = tf.shape(param1, param2, param3)

                object_dict[object_id] = ret
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_IMAGE_SAMPLE_DISTORTED_BOUNDING_BOX:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                param5 = parse_param(vm_id, mm, param, param.param5)
                param6 = parse_param(vm_id, mm, param, param.param6)
                param7 = parse_param(vm_id, mm, param, param.param7)
                param8 = parse_param(vm_id, mm, param, param.param8)
                param9 = parse_param(vm_id, mm, param, param.param9)
                param10 = parse_param(vm_id, mm, param, param.param10)
                # default parameter
                if param5 is None:
                    param5 = 0.1

                print("sample_distorted_bounding_box", param1, param2)
                result = tf.image.sample_distorted_bounding_box(
                    param1, param2, param3, param4, param5, param6, param7,
                    param8, param9, param10)
                print(result)

                mapped_tuple = tuple_mapper(result, [0, 1, 2])
                print(mapped_tuple)
                writeback_result(vm_id, mm, param, param.ret_val1,
                                 mapped_tuple)

            elif cmd_id == TF_PY_IMAGE_DRAW_BOUNDING_BOXES:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)

                # expand embedded NwObject
                print(param1, param2, param3)
                param1 = object_dict[param1.object_id()]
                param2 = object_dict[param2.object_id()]
                ret = tf.image.draw_bounding_boxes(param1, param2, param3)

                object_dict[object_id] = ret
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_IMAGE_DECODE_JPEG:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)
                param5 = parse_param(vm_id, mm, param, param.param5)
                param6 = parse_param(vm_id, mm, param, param.param6)
                param7 = parse_param(vm_id, mm, param, param.param7)
                param8 = parse_param(vm_id, mm, param, param.param8)

                if param2 is None:
                    param2 = 0
                if param3 is None:
                    param3 = 1
                if param4 is None:
                    param4 = True
                if param5 is None:
                    param5 = False
                if param6 is None:
                    param6 = 1
                if param7 is None:
                    param7 = ""
                param1 = object_dict[param1.object_id()]
                img = tf.image.decode_jpeg(param1, param2, param3, param4,
                                           param5, param6, param7, param8)

                object_dict[object_id] = img
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_IMAGE_CONVERT_IMAGE_DTYPE:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)
                param4 = parse_param(vm_id, mm, param, param.param4)

                # expand embedded NwObject
                print(param1, param2, param3)
                param1 = object_dict[param1.object_id()]
                if param3 is None:
                    param3 = False
                ret = tf.image.convert_image_dtype(param1, param2, param3,
                                                   param4)

                object_dict[object_id] = ret
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_DATA_DATASET_LIST_FILES:
                param1 = parse_param(vm_id, mm, param, param.param1)
                param2 = parse_param(vm_id, mm, param, param.param2)
                param3 = parse_param(vm_id, mm, param, param.param3)

                print(param1, param2, param3)
                if isinstance(param1, NwObject):
                    param1 = object_dict[oaram1.object_id()]
                ret = tf.data.Dataset.list_files(param1, param2, param3)

                object_dict[object_id] = ret
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_NW_OBJECT:
                print("nw_object!! id = %d" % param.base.object_id)
                obj = object_dict[param.base.object_id]
                name = parse_param(vm_id, mm, param, param.param1)
                args = parse_param(vm_id, mm, param, param.param2)
                kwargs = parse_param(vm_id, mm, param, param.param3)
                print("NwObject", obj, name, args, kwargs)

                # expand embedded NwObject
                args = list(args)
                list_walker(args)
                args = tuple(args)
                dict_walker(kwargs)
                print("after translation", obj, name, args, kwargs)

                # run
                result = getattr(obj, name)(*(args or []), **(kwargs or {}))
                param.base.object_id = -1
                param.ret_val1.size = 0
                print("analyze type", type(result), result)

                # TODO: go through tuple, dict or list
                if isinstance(result, tuple):
                    result = tuple_mapper(result, range(len(result)))
                if isinstance(result, dict):
                    dict_mapper(result)
                if isinstance(result, list):
                    list_mapper(result)

                # serialize return value
                if is_unpickleable_type(result) or \
                   pickle.pickles(result) is False:
                    object_dict[object_id] = result
                    param.base.object_id = object_id
                    object_id += 1

                elif result is not None:
                    writeback_result(vm_id, mm, param, param.ret_val1, result)

            elif cmd_id == TF_PY_NW_METHOD:
                # Reuse as callback

                #ins = parse_param(vm_id, mm, param, param.param1)
                #name = parse_param(vm_id, mm, param, param.param2)
                #print(ins, name)

                #method = getattr(ins, name)
                #print(method)
                #object_dict[object_id] = method

                cw = callback_constructor(object_id, callback_param, param, mm,
                                          vm_id, queue, kvm_fd)
                object_dict[object_id] = cw
                param.base.object_id = object_id
                object_id += 1

            elif cmd_id == TF_PY_NW_CALLBACK_TEST:
                nw_func = parse_param(vm_id, mm, param, param.param1)
                print(nw_func, nw_func.object_id())
                func = object_dict[nw_func.object_id()]
                print("callback func", func)
                x = parse_param(vm_id, mm, param, param.param2)
                y = parse_param(vm_id, mm, param, param.param3)
                result = func(x, y)
                print(result)
                writeback_result(vm_id, mm, param, param.ret_val1, result)

            else:
                print("unsupported Tensorflow API")

        except Exception, error:
            param.base.done = STATUS_TASK_ERROR
            #mm.flush(task.data_ptr, sizeof(PARAM_BASE))

            print "fault: ", str(error)
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print(exc_type, fname, exc_tb.tb_lineno)
            traceback.print_stack()

        print("finished [vm#%d] TF task %d cmd %d" %
              (task.vm_id, task.node_id, param.base.cmd_id))

        param.base.done = STATUS_TASK_DONE
        #mm.flush(task.data_ptr, sizeof(PARAM_BASE))
        #mm.flush(INVOKER_FIFO_SIZE + VGPU_DSTORE_SIZE * (vm_id - 1) +
        #         param.base.dstore_offset + param.ret_val1.offset,
        #         param.ret_val1.size)

        # notify hypervisor
        ret = fcntl.ioctl(kvm_fd, IOCTL_KVM_NOTIFY_TASK_FINISHED, task.node_id)
        if ret < 0:
            print("notify task completion failed: %d\n" % ret)
Exemple #43
0
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Too many command-line arguments.')

  shutil.rmtree(FLAGS.saved_model_path)

  # The following test creates two signatures, each of which contains a
  # switch-merge construct that will be functionalized to the tf.If op. However,
  # `then` and `else` branches' arguments are deliberately made different
  # between these two model signatures, in order to trigger error in cases these
  # branches are functionalized to functions with the same function name.

  data_0 = array_ops.constant([1, 2, 3, 4, 5, 6])
  data_1 = array_ops.constant([2, 3, 4, 5, 6, 7])
  # Create placeholders to prevent constant folding.
  x_op = array_ops.placeholder(dtype=dtypes.int32)
  y_op = array_ops.placeholder(dtype=dtypes.int32)
  less_op = math_ops.less(x_op, y_op)
  switch_0_op = control_flow_ops.switch(data_0, less_op)
  switch_1_op = control_flow_ops.switch(data_1, less_op)

  # merge_0_op will be functionalized to a tf.If op with only one argument
  # `data_0` in addition to the condition `less_op`.
  merge_0_op = control_flow_ops.merge(switch_0_op)[0]

  # merge_1_op will be functionalized to a tf.If op with two arguments, `data_0`
  # and `data_1` in addition to the condition `less_op`.
  merge_1_op = control_flow_ops.merge([switch_0_op[0], switch_1_op[1]])[0]

  result = merge_0_op
  result_1 = merge_1_op

  sess = session.Session()

  sm_builder = builder.SavedModelBuilder(FLAGS.saved_model_path)
  tensor_info_x = utils.build_tensor_info(x_op)
  tensor_info_y = utils.build_tensor_info(y_op)
  tensor_info_result = utils.build_tensor_info(result)
  tensor_info_result_1 = utils.build_tensor_info(result_1)

  signature = (
      signature_def_utils.build_signature_def(
          inputs={
              'x': tensor_info_x,
              'y': tensor_info_y
          },
          outputs={'result': tensor_info_result},
          method_name=signature_constants.PREDICT_METHOD_NAME))

  signature_1 = (
      signature_def_utils.build_signature_def(
          inputs={
              'x': tensor_info_x,
              'y': tensor_info_y
          },
          outputs={'result_1': tensor_info_result_1},
          method_name=signature_constants.PREDICT_METHOD_NAME))

  sm_builder.add_meta_graph_and_variables(
      sess, [tag_constants.SERVING],
      signature_def_map={
          'sig': signature,
          'sig_1': signature_1,
      },
      strip_default_attrs=True)
  sm_builder.save()
Exemple #44
0
    def _build_net(
        self,
        state_inputs,
        batch_actions,
        scope,
        trainable,
        online_action_outputs=None,  # None for target net. (更新 actor时,用到了,对 a 求导 )
        cond_training_q=None  # bool to control switch. can be None for target net.
    ):
        '''
        :param state_inputs:
        :param batch_actions:  两个作用:(1) 训练Q,批量抽样 action 的 placeholder (2) target 网络, a' 的引用
        :param scope:
        :param trainable:
        :param online_action_outputs: # None for target net. (更新 actor时,用到了,对 a (action 网络的输出) 求导
        :param cond_training_q:
        :return:
        '''
        with tf.variable_scope(scope):
            # input: 84 * 84 * 3
            with tf.variable_scope("conv1"):
                # conv 1
                filter1 = self._weight_variable([8, 8, self.s_images_dim, 16],
                                                trainable)
                b1 = self._bias_variable([16], trainable)
                conv1 = tf.nn.relu(
                    self._conv2d(state_inputs, filter1, stride=[1, 4, 4, 1]) +
                    b1)
            # conv1: 20 * 20 * 16
            with tf.variable_scope("conv2"):
                # conv 2
                filter2 = self._weight_variable([4, 4, 16, 32], trainable)
                b2 = self._bias_variable([32], trainable)
                conv2 = tf.nn.relu(
                    self._conv2d(conv1, filter2, stride=[1, 2, 2, 1]) + b2)
                # max pooling
                max_pool2 = self._max_pooling(conv2)
            # conv2: 9 * 9 * 32
            # max_pool2: 5 * 5 * 32 = 800
            with tf.variable_scope("full_con"):
                flat = tf.reshape(max_pool2, [-1, 5 * 5 * 32])
                full_cons_1 = []
                for agent in range(self.agent_num):
                    with tf.variable_scope("agent_{}".format(agent)):
                        if cond_training_q is None:  # target net
                            actions = batch_actions
                        else:
                            # TODO: (后续可以调整)将输出图形dense后 与 每个agent对应输入对应拼接起来
                            # switch return :(output_false, output_true)
                            (_, sw_action_training_q) = switch(
                                data=batch_actions,
                                pred=cond_training_q,
                                name='switch_actions_training_q')
                            (sw_action_training_policy,
                             _) = switch(data=online_action_outputs,
                                         pred=cond_training_q,
                                         name='switch_actions_training_policy')
                            (actions, _) = merge([
                                sw_action_training_q, sw_action_training_policy
                            ])

                        agent_dense = tf.concat([flat, actions[agent]], axis=1)

                        w_full = self._weight_variable(
                            [5 * 5 * 32 + self.action_dim, 1024], trainable)
                        b_full = self._bias_variable([1024], trainable)
                        agent_full1 = tf.nn.relu(
                            tf.matmul(agent_dense, w_full) + b_full)
                        full_cons_1.append(agent_full1)

            # full_con: 1024
            with tf.variable_scope("full_con2"):
                full_cons_2 = []
                for agent in range(self.agent_num):
                    with tf.variable_scope("agent_{}".format(agent)):
                        # ouput: 3
                        w_full2 = self._weight_variable([1024, 128], trainable)
                        b_full2 = self._bias_variable(128, trainable)
                        agent_full2 = tf.nn.sigmoid(
                            tf.matmul(full_cons_1[agent], w_full2) + b_full2)
                        full_cons_2.append(agent_full2)

            # full_con2: 128
            with tf.variable_scope("ouput"):
                ouputs = []
                for agent in range(self.agent_num):
                    with tf.variable_scope("agent_{}".format(agent)):
                        # ouput: 3
                        w_outout = self._weight_variable([128, 1], trainable)
                        b_output = self._bias_variable(1, trainable)
                        out = tf.matmul(full_cons_2[agent],
                                        w_outout) + b_output
                        ouputs.append(out)
            # ouput: 1
        return ouputs