def testLoop_1(self): with self.test_session(): zero = tf.convert_to_tensor(0) one = tf.convert_to_tensor(1) n = tf.constant(10) enter_zero = control_flow_ops.enter(zero, "foo_1", False) enter_one = control_flow_ops.enter(one, "foo_1", False) enter_n = control_flow_ops.enter(n, "foo_1", False) merge_zero = control_flow_ops.merge([enter_zero, enter_zero], name="merge_zero")[0] merge_one = control_flow_ops.merge([enter_one, enter_one], name="merge_one")[0] merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0] less_op = tf.less(merge_n, merge_n) cond_op = control_flow_ops.loop_cond(less_op) switch_zero = control_flow_ops.switch(merge_zero, cond_op) switch_one = control_flow_ops.switch(merge_one, cond_op) switch_n = control_flow_ops.switch(merge_n, cond_op) next_zero = control_flow_ops.next_iteration(switch_zero[1]) next_one = control_flow_ops.next_iteration(switch_one[1]) next_n = control_flow_ops.next_iteration(switch_n[1]) merge_zero.op._update_input(1, next_zero) merge_one.op._update_input(1, next_one) merge_n.op._update_input(1, next_n) exit_n = control_flow_ops.exit(switch_n[0]) result = exit_n.eval() self.assertAllEqual(10, result)
def testLoop_1(self): with self.test_session(): zero = tf.convert_to_tensor(0) one = tf.convert_to_tensor(1) n = tf.constant(10) enter_zero = control_flow_ops.enter(zero, "foo_1", False) enter_one = control_flow_ops.enter(one, "foo_1", False) enter_n = control_flow_ops.enter(n, "foo_1", False) merge_zero = control_flow_ops.merge([enter_zero, enter_zero], name="merge_zero")[0] merge_one = control_flow_ops.merge([enter_one, enter_one], name="merge_one")[0] merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0] less_op = tf.less(merge_n, merge_n) cond_op = control_flow_ops.loop_cond(less_op) switch_zero = control_flow_ops.switch(merge_zero, cond_op) switch_one = control_flow_ops.switch(merge_one, cond_op) switch_n = control_flow_ops.switch(merge_n, cond_op) next_zero = control_flow_ops.next_iteration(switch_zero[1]) next_one = control_flow_ops.next_iteration(switch_one[1]) next_n = control_flow_ops.next_iteration(switch_n[1]) merge_zero.op._update_input(1, next_zero) merge_one.op._update_input(1, next_one) merge_n.op._update_input(1, next_n) exit_n = control_flow_ops.exit(switch_n[0]) result = exit_n.eval() self.assertAllEqual(10, result)
def generate_switch_pb(): with tf.compat.v1.Session(graph=tf.Graph()) as sess: x = tf.compat.v1.placeholder(dtype="int32", shape=()) y = tf.compat.v1.placeholder(dtype="int32", shape=()) output1 = control_flow_ops.switch(x, False) output2 = control_flow_ops.switch(y, True) tf.io.write_graph(sess.graph, logdir="./", name="test_switch.pb", as_text=False)
def ZerosLikeOutsideLoop(op, index): """Create zeros_like for the specified output of an op.""" val = op.outputs[index] if not util.IsSwitch(op): if val.dtype == dtypes.resource: return array_ops.zeros( gen_resource_variable_ops.variable_shape(val), dtype=default_gradient.get_zeros_dtype(val)) return array_ops.zeros_like(val, optimize=False) else: op_ctxt = op._get_control_flow_context() if op_ctxt: # We are in a cond context. Use a switch to create zeros only when needed. pred = op_ctxt.pred branch = op_ctxt.branch switch_val = control_flow_ops.switch(op.inputs[0], pred)[1 - branch] # A op is created along the branch taken as control dependencies are on # the whole op and not on the tensor output. pivot = array_ops.identity(switch_val) if val.dtype == dtypes.resource: with ops.control_dependencies([pivot]): return array_ops.zeros( gen_resource_variable_ops.variable_shape(switch_val), dtype=default_gradient.get_zeros_dtype(val)) zeros_shape = array_ops.shape_internal(switch_val, optimize=False) # Ensure ops created within array_ops.zeros are dominated by switch in # cond context. with ops.control_dependencies([pivot]): return array_ops.zeros(zeros_shape, dtype=val.dtype) else: return array_ops.zeros_like(val, optimize=False)
def apply_with_random_selector(image, func, num_cases): """random select a mode case to func(image, case)""" # random select a mode sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) return control_flow_ops.merge([ func(control_flow_ops.switch(image, tf.equal(case, sel))[1], case) for case in range(num_cases)])[0]
def testLoop_2(self): with self.test_session(): zero = tf.constant(0) one = tf.constant(1) n = tf.constant(10) enter_i = control_flow_ops.enter(zero, "foo", False) enter_one = control_flow_ops.enter(one, "foo", True) enter_n = control_flow_ops.enter(n, "foo", True) merge_i = control_flow_ops.merge([enter_i, enter_i])[0] less_op = tf.less(merge_i, enter_n) cond_op = control_flow_ops.loop_cond(less_op) switch_i = control_flow_ops.switch(merge_i, cond_op) add_i = tf.add(switch_i[1], enter_one) with tf.device("/gpu:0"): next_i = control_flow_ops.next_iteration(add_i) merge_i.op._update_input(1, next_i) exit_i = control_flow_ops.exit(switch_i[0]) result = exit_i.eval() self.assertAllEqual(10, result)
def testLoop_2(self): with self.test_session(): zero = tf.constant(0) one = tf.constant(1) n = tf.constant(10) enter_i = control_flow_ops.enter(zero, "foo", False) enter_one = control_flow_ops.enter(one, "foo", True) enter_n = control_flow_ops.enter(n, "foo", True) merge_i = control_flow_ops.merge([enter_i, enter_i])[0] less_op = tf.less(merge_i, enter_n) cond_op = control_flow_ops.loop_cond(less_op) switch_i = control_flow_ops.switch(merge_i, cond_op) add_i = tf.add(switch_i[1], enter_one) with tf.device("/gpu:0"): next_i = control_flow_ops.next_iteration(add_i) merge_i.op._update_input(1, next_i) exit_i = control_flow_ops.exit(switch_i[0]) result = exit_i.eval() self.assertAllEqual(10, result)
def injectFaultSwitch(a, cond): "Function to call injectFault on Switch" logging.debug("Calling Operator Switch " + getArgs(a, cond)) # FIXME: Actually implement the Switch operation # Only there's no TensorFlow documentation for it !!! # res = np.select(a, b) sess = tf.Session() op = control_flow_ops.switch(data=a, pred=cond) # this Op will not be injected due to the exception case try: res = sess.run(op) except: return a, a ''' if(cond): return a, a else: return a, a ''' # res = a, a # res = condPerturb(Ops.SWITCH, res) if logReturn: logging.debug("\tReturning from Switch " + str(res)) return res
def create_q_net(self, state_inputs, # NHWC format. action_inputs_training_q, scope, trainable, action_inputs_training_policy=None, # None for target net. cond_training_q=None # bool to control switch. can be None for target net. ): with tf.variable_scope(scope): # 输入归一化层 prev_layer=self.input_normalizer(state_inputs,**self.input_norm_params) ##fc layers l = 1 # start from fc-1 as 1 for n_unit, activation, initializer, normalizer, norm_param, regularizer in zip( self.n_fc_units, self.fc_activations, self.fc_initializers, self.fc_normalizers, self.fc_norm_params, self.fc_regularizers): # 传入动作数据 if l == DDPG_CFG.include_action_fc_layer: if action_inputs_training_policy is None: # target net actions = action_inputs_training_q else: # add logic for selecting online net action inputs # switch return :(output_false, output_true) (_, sw_action_training_q) = switch(data=action_inputs_training_q, pred=cond_training_q, name='switch_actions_training_q') (sw_action_training_policy, _) = switch(data=action_inputs_training_policy, pred=cond_training_q, name='switch_actions_training_policy') (actions, _) = merge([sw_action_training_q, sw_action_training_policy]) prev_layer = tf.concat([prev_layer, actions], axis=1) l += 1 prev_layer = fully_connected(prev_layer, num_outputs=n_unit, activation_fn=activation, weights_initializer=initializer, weights_regularizer=regularizer, normalizer_fn=normalizer, #when specify norm , bias will be ignored. normalizer_params=norm_param, trainable=trainable) # output layer. fully_connected will create bias which is not wanted in output layer. output_layer = fully_connected(inputs=prev_layer,num_outputs=1, activation_fn=None, weights_initializer=self.output_layer_initializer, weights_regularizer=self.output_layer_regularizer, biases_initializer=None, # to skip bias in output layer trainable=trainable) return output_layer
def random_resize(im, size): choice = tf.random_uniform([], maxval=4, dtype=tf.int32) im = control_flow_ops.merge([ tf.image.resize_images( control_flow_ops.switch(im, tf.equal(choice, method))[1], size, method) for method in range(4) ])[0] return im
def testGradientThroughSingleBranchOutsideOfContext(self): x = constant_op.constant(2.) s = constant_op.constant(True) x_false, x_true = control_flow_ops.switch(x, s) grad_x_true = gradients_impl.gradients(x_true, x)[0] grad_x_false = gradients_impl.gradients(x_false, x)[0] self.assertEquals(self.evaluate(grad_x_true), 1.) self.assertEquals(self.evaluate(grad_x_false), 0.)
def testSwitchDeadBranch(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) dead_branch = tf.identity(switch_op[0]) with self.assertRaisesWithPredicateMatch(StatusNotOK, lambda e: "The tensor returned for" in str(e)): dead_branch.eval()
def _testSwitchMerge_1(self, use_gpu): with self.test_session(use_gpu=use_gpu): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) merge_op = control_flow_ops.merge(switch_op)[0] result = merge_op.eval() self.assertAllEqual(np.arange(1, 7), result)
def _testSwitchMerge_1(self, use_gpu): with self.test_session(use_gpu=use_gpu): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) merge_op = control_flow_ops.merge(switch_op)[0] result = merge_op.eval() self.assertAllEqual(np.arange(1, 7), result)
def testSwitchDeadBranch(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) dead_branch = tf.identity(switch_op[0]) with self.assertRaisesWithPredicateMatch( StatusNotOK, lambda e: 'The tensor returned for' in str(e)): dead_branch.eval()
def testSwitchMergeIdentity_1(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) merge_op = control_flow_ops.merge(switch_op)[0] id_op = tf.identity(merge_op) result = id_op.eval() self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeLess_1(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") zero = tf.convert_to_tensor(0) one = tf.convert_to_tensor(1) less_op = tf.less(zero, one) switch_op = control_flow_ops.switch(data, less_op) merge_op = control_flow_ops.merge(switch_op)[0] result = merge_op.eval() self.assertAllEqual(np.arange(1, 7), result)
def testIsSwitch(self): switch_false, _ = control_flow_ops.switch(1, True) switch = switch_false.op self.assertTrue(control_flow_util.IsSwitch(switch)) ref_switch_false, _ = control_flow_ops.ref_switch( test_ops.ref_output(), True) ref_switch = ref_switch_false.op self.assertTrue(control_flow_util.IsSwitch(ref_switch)) self.assertFalse(control_flow_util.IsSwitch(test_ops.int_output().op))
def testIndexedSlicesWithDenseShape(self): with self.test_session(): data = ops.IndexedSlices(tf.constant([1, 2, 3]), tf.constant([0, 1]), dense_shape=tf.constant([3])) zero = tf.constant(0) one = tf.constant(1) less_op = tf.less(zero, one) switch_false, switch_true = control_flow_ops.switch(data, less_op) self.assertAllEqual([1, 2, 3], switch_true.values.eval()) self.assertAllEqual([0, 1], switch_true.indices.eval())
def testSwitchMergeAddIdentity_1(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) one = tf.constant(1) add_op = tf.add(switch_op[0], one) id_op = tf.identity(switch_op[1]) merge_op = control_flow_ops.merge([add_op, id_op])[0] result = merge_op.eval() self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddMul_1(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) one = tf.constant(1) add_op = tf.add(switch_op[0], one) five = tf.constant(5) mul_op = tf.mul(switch_op[1], five) merge_op = control_flow_ops.merge([add_op, mul_op])[0] result = merge_op.eval() self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeIndexedSlices(self): with self.test_session(): values = tf.constant([1, 2, 3, 4, 5, 6]) indices = tf.constant([0, 2, 4, 6, 8, 10]) data = tf.IndexedSlices(values, indices) pred = tf.convert_to_tensor(True) switch_op = control_flow_ops.switch(data, pred) merge_op = control_flow_ops.merge(switch_op)[0] val = merge_op.values.eval() ind = merge_op.indices.eval() self.assertAllEqual(np.arange(1, 7), val) self.assertAllEqual(np.arange(0, 12, 2), ind)
def random_grayscale(im): def convert(im, flag): if flag == 0: im = tf.image.rgb_to_grayscale(im) im = tf.stack([im, im, im], axis=-1) return im choice = tf.random_uniform([], maxval=4, dtype=tf.int32) im = control_flow_ops.merge([ convert(control_flow_ops.switch(im, tf.equal(choice, flag))[1], flag) for flag in range(2) ])[0] return im
def testLoop_false(self): with self.test_session(): false = tf.convert_to_tensor(False) n = tf.constant(10) enter_false = control_flow_ops.enter(false, "foo_1", False) enter_n = control_flow_ops.enter(n, "foo_1", False) merge_n = control_flow_ops.merge([enter_n], name="merge_n")[0] switch_n = control_flow_ops.switch(merge_n, enter_false) exit_n = control_flow_ops.exit(switch_n[0]) result = exit_n.eval() self.assertAllEqual(10, result)
def test_switch(): graph = tf.Graph() with graph.as_default(): data_np = np.random.uniform(0, 5, size=(2, 4, 5, 1)).astype("float32") dname = "data" flag_name = "flag" data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name=dname) split = tf.split(data, 2, axis=0) flag = tf.placeholder(shape={}, dtype=tf.bool, name=flag_name) output_false, output_true = control_flow_ops.switch(split[1], flag) with tf.Session() as sess: tf_out = sess.run(output_false, feed_dict={data.name: data_np, flag.name: False}) check_equal(graph, tf_out, {dname: data_np, flag_name: False})
def template(x_shape=[2, 3, 4, 5], pred=True, description: str = ""): from tensorflow.python.ops import control_flow_ops x = tf.placeholder(np.float32, x_shape) y_f, y_t = control_flow_ops.switch(x, pred) y = y_t if pred else y_f vx = np.random.rand(*x_shape).astype(np.float32) with tf.Session() as sess: vy, = sess.run([y], {x: vx}) graph = TensorFlowConverter(sess, batch_size=2).convert([x], [y]) generate_kernel_test_case( description=f"[TensorFlow] Switch {description}", graph=graph, inputs={graph.inputs[0]: vx}, expected={graph.outputs[0]: vy}, )
def Test(): data = tf.constant([1, 2, 3, 4, 5, 6]) zero = tf.convert_to_tensor(0) one = tf.convert_to_tensor(1) less_op = tf.less(zero, one) switch_op = control_flow_ops.switch(data, less_op) merge_op = control_flow_ops.merge(switch_op)[0] result = tf.transpose(merge_op) tensor_info_result = tf.compat.v1.saved_model.utils.build_tensor_info( result) signature_def = tf.saved_model.signature_def_utils.build_signature_def( inputs=None, outputs={'result': tensor_info_result}, method_name='some_function') return {'key': signature_def}
def Test(): data = tf.constant([1, 2, 3, 4, 5, 6]) # Create placeholders to prevent constant folding. x_op = tf.placeholder(dtype=tf.int32) y_op = tf.placeholder(dtype=tf.int32) less_op = tf.less(x_op, y_op) switch_op = control_flow_ops.switch(data, less_op) merge_op = control_flow_ops.merge(switch_op)[0] result = tf.transpose(merge_op) tensor_info_result = tf.compat.v1.saved_model.utils.build_tensor_info( result) signature_def = tf.saved_model.signature_def_utils.build_signature_def( inputs=None, outputs={'result': tensor_info_result}, method_name='some_function') return {'key': signature_def}, None, None
def testSwitchMerge(self): with self.cached_session() as sess: predicate = array_ops.placeholder(dtypes.bool) with self.test_scope(): false_output, true_output = control_flow_ops.switch( data=constant_op.constant(42.0), pred=predicate) with ops.control_dependencies( [array_ops.identity(false_output)]): five = constant_op.constant(5.0) with ops.control_dependencies( [array_ops.identity(true_output)]): ten = constant_op.constant(10.0) result = control_flow_ops.merge([five, ten]) with_true = sess.run(result, {predicate: True}) self.assertEquals(with_true.output, 10.0) self.assertEquals(with_true.value_index, 1) with_false = sess.run(result, {predicate: False}) self.assertEquals(with_false.output, 5.0) self.assertEquals(with_false.value_index, 0)
def random_distort_color(im): def distort(im, order): if order == 0: im = random_contrast( random_hue(random_saturation(random_brightness(im)))) elif order == 1: im = random_hue( random_contrast(random_brightness(random_saturation(im)))) elif order == 2: im = random_saturation( random_brightness(random_hue(random_contrast(im)))) else: im = random_brightness( random_contrast(random_saturation(random_hue(im)))) return im choice = tf.random_uniform([], maxval=4, dtype=tf.int32) im = control_flow_ops.merge([ distort( control_flow_ops.switch(im, tf.equal(choice, order))[1], order) for order in range(4) ])[0] return im
def create_q_net(self, state_inputs, # NHWC format. action_inputs_training_q, scope, trainable, action_inputs_training_policy=None, # None for target net. cond_training_q=None # bool to control switch. can be None for target net. ): prev_layer = state_inputs conv_layers = [] fc_layers = [] with tf.variable_scope(scope): ##conv layers # TODO add batch_norm to input process. for n_maps, kernel_size, stride, padding, activation, initializer, \ normalizer, norm_param, regularizer in zip( self.conv_n_feature_maps, self.conv_kernel_sizes, self.conv_strides, self.conv_padding, self.conv_activations, self.conv_initializers, self.conv_normalizers, self.conv_norm_params, self.conv_regularizers): prev_layer = conv2d(prev_layer, num_outputs=n_maps, kernel_size=kernel_size, stride=stride, padding=padding, activation_fn=activation, data_format='NHWC', normalizer_fn=normalizer, normalizer_params=norm_param, weights_initializer=initializer, weights_regularizer=regularizer, trainable=trainable) conv_layers.append(prev_layer) # end conv layer ##fc layers # flat the output of last conv layer to (batch_size, n_fc_in) # TODO calc n_fc_in from the prev_layer tensor shape. prev_layer = tf.reshape(conv_layers[-1], shape=[-1, self.n_fc_in]) l = 1 # start from fc-1 as 1 for n_unit, activation, initializer, normalizer, norm_param,regularizer in zip( self.n_fc_units, self.fc_activations, self.fc_initializers, self.fc_normalizers, self.fc_norm_params, self.fc_regularizers): # include action_inputs if l == DDPG_CFG.include_action_fc_layer: # prev_layer is fc-1-out,shape (batch_size, n_fc1_units) # action_inputs shape ( batch_size, a_dim) # NOTE: online q and target q are different now, but # different part is without params, so softupdate and copy-init # from online to target will still work. if action_inputs_training_policy is None: # target net actions = action_inputs_training_q else: # add logic for selecting online net action inputs # switch return :(output_false, output_true) (_, sw_action_training_q) = switch(data=action_inputs_training_q, pred=cond_training_q, name='switch_actions_training_q') (sw_action_training_policy, _) = switch(data=action_inputs_training_policy, pred=cond_training_q, name='switch_actions_training_policy') (actions, _) = merge([sw_action_training_q, sw_action_training_policy]) prev_layer = tf.concat([prev_layer, actions], axis=1) l += 1 prev_layer = fully_connected(prev_layer, num_outputs=n_unit, activation_fn=activation, weights_initializer=initializer, normalizer_fn=normalizer, normalizer_params=norm_param, weights_regularizer=regularizer, trainable=trainable) fc_layers.append(prev_layer) # end fc layers ##output layer # num_output = 1 , means Q(s,a) is a scalar. output_layer = fully_connected(fc_layers[-1], num_outputs=1, activation_fn=None, weights_initializer=self.output_layer_initializer, weights_regularizer=self.output_layer_regularizer, trainable=trainable) # # linear layer connected to 1 output representing Q(s,a) # # Weights are init to Uniform[-3e-3, 3e-3] # w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003) # out = tflearn.fully_connected(net, 1, weights_init=w_init) # return inputs, action, out # == == end with variable_scope() == return output_layer
def handler(cmd_queue, chan): global object_dict global object_id global callback_stack global initialized if not initialized: callback_stack = [] object_dict = dict() object_id = 1 # TODO: forward logging or disable it in test tf.logging.set_verbosity(tf.logging.INFO) initialized = True print("handler is initialized") while True: cmd = cmd_queue.get(block=True) cmd_id = cmd.__get_cmd_id() print("new command id %d" % cmd_id) try: if cmd_id == TF_PY_NW_CALLBACK_DONE: param.base.done = STATUS_TASK_DONE ret = fcntl.ioctl(kvm_fd, IOCTL_KVM_NOTIFY_TASK_FINISHED, task.node_id) if ret < 0: print("notify task completion failed: %d\n" % ret); if callback_stack and \ callback_stack[-1]["callback_id"] == param.base.object_id: print("callback is finished") return STATUS_CALLBACK_DONE else: print("callback is error") return STATUS_CALLBACK_ERROR if cmd_id == TF_PY_SESSION_INIT: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) sess = tf.Session(param0, param1, param2) # assign object_id ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = sess ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() #elif cmd_id == TF_PY_SESSION_ENTER: # sess = object_dict[param.base.object_id] # ctx_sess = sess.__enter__() # if sess is ctx_sess: # pass # else: # unlikely # print("unlikely to search for sess") # param.base.object_id = next(obj_id for obj_id, obj in # object_dict.items() if obj is ctx_sess) #elif cmd_id == TF_PY_SESSION_EXIT: # param1 = parse_param(vm_id, mm, param, param.param1) # param2 = parse_param(vm_id, mm, param, param.param2) # param3 = parse_param(vm_id, mm, param, param.param3) # sess = object_dict[param.base.object_id] # sess.__exit__(param1, param2, param3) #elif cmd_id == TF_PY_SESSION_DEL: # sess = object_dict[param.base.object_id] # sess.__del__() # deprecated #elif cmd_id == TF_PY_SESSION_RUN: # sess = object_dict[param.base.object_id] # param1 = parse_param(vm_id, mm, param, param.param1) # if type(param1) == NwObject: # print("get NwObject=%d" % param1.object_id()) # param1 = object_dict[param1.object_id()] # print(param1) # ret_val = sess.run(param1) # print(ret_val) # writeback_result(vm_id, mm, param, param.ret_val1, ret_val); elif cmd_id == TF_PY_TPU_CLUSTER_RESOLVER_INIT: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) print("TPUClusterResolver", param0, param1, param2) tpu_grpc = tf.contrib.cluster_resolver.TPUClusterResolver( tpu=param0, zone=param1, project=param2) # assign object_id ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = tpu_grpc ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() # deprecated elif cmd_id == TF_PY_TPU_CLUSTER_RESOLVER_MASTER: tpu_grpc = object_dict[cmd.__get_object_id()] # FIXED: may have parameters tpu_grpc_url = tpu_grpc.master() # serialize return value dump_ret, len_ret = pickle_arg(tpu_grpc_url) total_buffer_size = chan.buffer_size(len_ret) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size) ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_TPU_CLUSTER_RESOLVER_MASTER_RET) offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret) ret_cmd.__set_tf_args([(len_ret, offset_ret)]) ret_cmd.send() elif cmd_id == TF_PY_TPU_INITIALIZE_SYSTEM: # TODO: may have parameters ts = tpu.initialize_system() ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = ts ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_TPU_SHUTDOWN_SYSTEM: # TODO: may have parameters ts = tpu.shutdown_system() ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = ts ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_GLOBAL_VARIABLES_INITIALIZER: # TODO: may have parameters ts = tf.global_variables_initializer() ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = ts ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_ONES: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) print(param0) if param1 is None: param1 = dtypes.float32 print(param1) var = tf.ones(param0, param1) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = var ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_RANDOM_UNIFORM: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) param4 = unpickle_arg(cmd, 4) param5 = unpickle_arg(cmd, 5) if param1 is None: param1 = 0 if param3 is None: param3 = dtypes.float32 print(param0, param1, param2, param3) var = tf.random_uniform(param0, param1, param2, param3, param4, param5) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = var ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_TRANSPOSE: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) param0 = object_dict[param0.object_id()] if param2 is None: param2 = "transpose" if param3 is None: param3 = False print("transpose", param0, param1, param2, param3) var = tf.transpose(param0, param1, param2, param3) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = var ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_CAST: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param0 = object_dict[param0.object_id()] print("cast", param0, param1, param2) var = tf.cast(param0, param1, param2) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = var ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_EXPAND_DIMS: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) param0 = object_dict[param0.object_id()] print("expand_dims", param0, param1, param2, param3) var = tf.expand_dims(param0, param1, param2, param3) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = var ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_CONCAT: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param0 = object_dict[param0.object_id()] if param2 is None: param2 = "concat" print("concat", param0, param1, param2) var = tf.concat(param0, param1, param2) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = var ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_EQUAL: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param0 = object_dict[param0.object_id()] print("equal", param0, param1, param2) if isinstance(param1, NwObject): param1 = object_dict[param1.object_id()] result = tf.equal(param0, param1, param2) print(result) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = result ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_FIXED_LEN_FEATURE: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) feature = tf.FixedLenFeature(param0, param1, param2) print(feature) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = feature ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_VAR_LEN_FEATURE: param0 = unpickle_arg(cmd, 0) feature = tf.VarLenFeature(param0) print(feature) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = feature ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_PARSE_SINGLE_EXAMPLE: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) print(param1, param2) # expand embedded NwObject if isinstance(param0, NwObject): param0 = object_dict[param0.object_id()] dict_walker(param1) print("after translation", param0, param1) result = tf.parse_single_example(param0, param1, param2, param3) print(result) dict_mapper(result) print(result) dump_ret, len_ret = pickle_arg(result) total_buffer_size = chan.buffer_size(len_ret) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size) ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_PARSE_SINGLE_EXAMPLE_RET) offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret) ret_cmd.__set_tf_args([(len_ret, offset_ret)]) ret_cmd.send() elif cmd_id == TF_PY_CONTROL_FLOW_OPS_SWITCH: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) param0 = object_dict[param0.object_id()] param1 = object_dict[param1.object_id()] print("switch", param0, param1, param2, param3) result = control_flow_ops.switch(param0, param1, param2, param3) print(result) mapped_tuple = tuple_mapper(result, [0, 1]) print(mapped_tuple) dump_ret, len_ret = pickle_arg(mapped_tuple) total_buffer_size = chan.buffer_size(len_ret) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size) ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_CONTROL_FLOW_OPS_SWITCH_RET) offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret) ret_cmd.__set_tf_args([(len_ret, offset_ret)]) ret_cmd.send() elif cmd_id == TF_PY_CONTROL_FLOW_OPS_MERGE: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param0 = object_dict[param0.object_id()] print("merge", param0, param1) list_walker(param0) print("merge-new", param0, param1) result = control_flow_ops.merge(param0, param1) print(result) mapped_tuple = tuple_mapper(result, [0]) print(mapped_tuple) dump_ret, len_ret = pickle_arg(mapped_tuple) total_buffer_size = chan.buffer_size(len_ret) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size) ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_CONTROL_FLOW_OPS_MERGE_RET) offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret) ret_cmd.__set_tf_args([(len_ret, offset_ret)]) ret_cmd.send() elif cmd_id == TF_PY_TPU_REWRITE: # TODO: may have more parameters param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) # default parameter if param1 is None: param1 = None # expand embedded NwObject list_walker(param1) func = tpu.rewrite(param0, param1) print("Rewrite result:", func, " object id =", object_id) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = func ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_TPU_RUN_CONFIG: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) param4 = unpickle_arg(cmd, 4) # default parameter if param0 is None: param0 = None if param1 is None: param1 = None if param2 is None: param2 = None if param3 is None: param3 = None # expand embedded NwObject param3 = object_dict[param3.object_id()] print(param3, param4) func = tpu.RunConfig(param0, param1, param2, param3, **param4) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = func ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_TPU_TPU_ESTIMATOR: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) param4 = unpickle_arg(cmd, 4) param5 = unpickle_arg(cmd, 5) param6 = unpickle_arg(cmd, 6) param7 = unpickle_arg(cmd, 7) param8 = unpickle_arg(cmd, 8) param9 = unpickle_arg(cmd, 9) param10 = unpickle_arg(cmd, 10) param11 = unpickle_arg(cmd, 11) # default parameter if param0 is None: param0 = None if param1 is None: param1 = None if param2 is None: param2 = None if param3 is None: param3 = None if param4 is None: param4 = True if param5 is None: param5 = None if param6 is None: param6 = None if param7 is None: param7 = None if param8 is None: param8 = None if param9 is None: param9 = True if param10 is None: param10 = True if param11 is None: param11 = None # expand embedded NwObject param2 = object_dict[param2.object_id()] print(param2) func = tpu.TPUEstimator(param0, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = func ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_IMAGE_RESIZE_IMAGES: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) param4 = unpickle_arg(cmd, 4) # default parameter if param2 is None: param2 =ResizeMethod.BILINEAR if param3 is None: param3 = False if param4 is None: param4 = False # expand embedded NwObject param0 = object_dict[param0.object_id()] print(param0) img = tf.image.resize_images(param0, param1, param2, param3, param4) # TODO: it may return a float ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = img ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_SLICE: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) # expand embedded NwObject print(param0, param1, param2, param3) param0 = object_dict[param0.object_id()] ret = tf.slice(param0, param1, param2, param3) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = ret ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_SHAPE: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) if param2 is None: param2 = dtypes.int32 # expand embedded NwObject print(param0, param1, param2) param0 = object_dict[param0.object_id()] ret = tf.shape(param0, param1, param2) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = ret ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_IMAGE_SAMPLE_DISTORTED_BOUNDING_BOX: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) param4 = unpickle_arg(cmd, 4) param5 = unpickle_arg(cmd, 5) param6 = unpickle_arg(cmd, 6) param7 = unpickle_arg(cmd, 7) param8 = unpickle_arg(cmd, 8) param9 = unpickle_arg(cmd, 9) # default parameter if param4 is None: param4 = 0.1 print("sample_distorted_bounding_box", param0, param1) result = tf.image.sample_distorted_bounding_box( param0, param1, param2, param3, param4, param5, param6, param7, param8, param9) print(result) mapped_tuple = tuple_mapper(result, [0, 1, 2]) print(mapped_tuple) dump_ret, len_ret = pickle_arg(mapped_tuple) total_buffer_size = chan.buffer_size(len_ret) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size) ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_CONTROL_FLOW_OPS_MERGE_RET) offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret) ret_cmd.__set_tf_args([(len_ret, offset_ret)]) ret_cmd.send() elif cmd_id == TF_PY_IMAGE_DRAW_BOUNDING_BOXES: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) # expand embedded NwObject print(param0, param1, param2) param0 = object_dict[param0.object_id()] param1 = object_dict[param1.object_id()] ret = tf.image.draw_bounding_boxes(param0, param1, param2) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = ret ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_IMAGE_DECODE_JPEG: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) param4 = unpickle_arg(cmd, 4) param5 = unpickle_arg(cmd, 5) param6 = unpickle_arg(cmd, 6) param7 = unpickle_arg(cmd, 7) if param1 is None: param1 = 0 if param2 is None: param2 = 1 if param3 is None: param3 = True if param4 is None: param4 = False if param5 is None: param5 = 1 if param6 is None: param6 = "" param0 = object_dict[param0.object_id()] ret = tf.image.decode_jpeg(param0, param1, param2, param3, param4, param5, param6, param7) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = ret ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_IMAGE_CONVERT_IMAGE_DTYPE: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) param3 = unpickle_arg(cmd, 3) # expand embedded NwObject print(param0, param1, param2, param3) param0 = object_dict[param0.object_id()] if param2 is None: param2 = False ret = tf.image.convert_image_dtype(param0, param1, param2, param3) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = ret ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_DATA_DATASET_LIST_FILES: param0 = unpickle_arg(cmd, 0) param1 = unpickle_arg(cmd, 1) param2 = unpickle_arg(cmd, 2) print(param0, param1, param2) if isinstance(param0, NwObject): param0 = object_dict[param0.object_id()] ret = tf.data.Dataset.list_files(param0, param1, param2) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = ret ret_cmd.__set_object_id(object_id) object_id += 1 ret_cmd.send() elif cmd_id == TF_PY_NW_OBJECT: obj = object_dict[cmd.__get_object_id()] name = unpickle_arg(cmd, 0) args = unpickle_arg(cmd, 1) kwargs = unpickle_arg(cmd, 2) print("NwObject", obj, name, args, kwargs) # expand embedded NwObject args = list(args) list_walker(args) args = tuple(args) dict_walker(kwargs) print("after translation", obj, name, args, kwargs) # run result = getattr(obj, name)(*(args or []), **(kwargs or {})) print("analyze type", type(result), result) # TODO: go through tuple, dict or list if isinstance(result, tuple): result = tuple_mapper(result, range(len(result))) if isinstance(result, dict): dict_mapper(result) if isinstance(result, list): list_mapper(result) ret_cmd = None # serialize return value # if isinstance(result, list): # Check whether a nested list pickles # https://github.com/uqfoundation/dill/issues/307 # pickleable = pickle.pickles(reduce(operator.add, result)) # else: # pickleable = pickle.pickles(result) pickleable = True if is_unpickleable_type(result) or not pickleable: ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), 0) object_dict[object_id] = result ret_cmd.__set_object_id(object_id) object_id += 1 else: dump_ret, len_ret = pickle_arg(result) total_buffer_size = chan.buffer_size(len_ret) ret_cmd = chan.new_command(chan.__get_sizeof_tf_cmd(), total_buffer_size) ret_cmd.__set_cmd_base(TENSORFLOW_PY_API, TF_PY_NW_OBJECT_RET) offset_ret = ret_cmd.attach_buffer(dump_ret, len_ret) ret_cmd.__set_object_id(0) ret_cmd.__set_tf_args([(len_ret, offset_ret)]) ret_cmd.send() elif cmd_id == TF_PY_NW_METHOD: # Reuse as callback #ins = parse_param(vm_id, mm, param, param.param1) #name = parse_param(vm_id, mm, param, param.param2) #print(ins, name) #method = getattr(ins, name) #print(method) #object_dict[object_id] = method cw = callback_constructor(object_id, callback_param, param, mm, vm_id, cmd_queue, kvm_fd) object_dict[object_id] = cw param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_NW_CALLBACK_TEST: nw_func = parse_param(vm_id, mm, param, param.param1) print(nw_func, nw_func.object_id()) func = object_dict[nw_func.object_id()] print("callback func", func) x = parse_param(vm_id, mm, param, param.param2) y = parse_param(vm_id, mm, param, param.param3) result = func(x, y) print(result) writeback_result(vm_id, mm, param, param.ret_val1, result); else: print("unsupported Tensorflow API %d" % cmd_id) except Exception as error: print("fault: ", str(error)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) traceback.print_stack() cmd.free_command() print("finished cmd %d" % cmd_id)
def _build_net( self, state_inputs, batch_actions, scope, trainable, online_action_outputs=None, # None for target net. (更新 actor时,用到了,对 a 求导 ) cond_training_q=None # bool to control switch. can be None for target net. ): ''' :param state_inputs: :param batch_actions: 两个作用:(1) 训练Q,批量抽样 action 的 placeholder (2) target 网络, a' 的引用 :param scope: :param trainable: :param online_action_outputs: # None for target net. (更新 actor时,用到了,对 a (action 网络的输出) 求导 :param cond_training_q: :return: ''' with tf.variable_scope(scope): # input: 84 * 84 * 3 with tf.variable_scope("conv1"): # conv 1 filter1 = self._weight_variable([8, 8, self.s_images_dim, 16], trainable) b1 = self._bias_variable([16], trainable) conv1 = tf.nn.relu( self._conv2d(state_inputs, filter1, stride=[1, 4, 4, 1]) + b1) # conv1: 20 * 20 * 16 with tf.variable_scope("conv2"): # conv 2 filter2 = self._weight_variable([4, 4, 16, 32], trainable) b2 = self._bias_variable([32], trainable) conv2 = tf.nn.relu( self._conv2d(conv1, filter2, stride=[1, 2, 2, 1]) + b2) # max pooling max_pool2 = self._max_pooling(conv2) # conv2: 9 * 9 * 32 # max_pool2: 5 * 5 * 32 = 800 with tf.variable_scope("full_con"): flat = tf.reshape(max_pool2, [-1, 5 * 5 * 32]) full_cons_1 = [] for agent in range(self.agent_num): with tf.variable_scope("agent_{}".format(agent)): if cond_training_q is None: # target net actions = batch_actions else: # TODO: (后续可以调整)将输出图形dense后 与 每个agent对应输入对应拼接起来 # switch return :(output_false, output_true) (_, sw_action_training_q) = switch( data=batch_actions, pred=cond_training_q, name='switch_actions_training_q') (sw_action_training_policy, _) = switch(data=online_action_outputs, pred=cond_training_q, name='switch_actions_training_policy') (actions, _) = merge([ sw_action_training_q, sw_action_training_policy ]) agent_dense = tf.concat([flat, actions[agent]], axis=1) w_full = self._weight_variable( [5 * 5 * 32 + self.action_dim, 1024], trainable) b_full = self._bias_variable([1024], trainable) agent_full1 = tf.nn.relu( tf.matmul(agent_dense, w_full) + b_full) full_cons_1.append(agent_full1) # full_con: 1024 with tf.variable_scope("full_con2"): full_cons_2 = [] for agent in range(self.agent_num): with tf.variable_scope("agent_{}".format(agent)): # ouput: 3 w_full2 = self._weight_variable([1024, 128], trainable) b_full2 = self._bias_variable(128, trainable) agent_full2 = tf.nn.sigmoid( tf.matmul(full_cons_1[agent], w_full2) + b_full2) full_cons_2.append(agent_full2) # full_con2: 128 with tf.variable_scope("ouput"): ouputs = [] for agent in range(self.agent_num): with tf.variable_scope("agent_{}".format(agent)): # ouput: 3 w_outout = self._weight_variable([128, 1], trainable) b_output = self._bias_variable(1, trainable) out = tf.matmul(full_cons_2[agent], w_outout) + b_output ouputs.append(out) # ouput: 1 return ouputs
def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') shutil.rmtree(FLAGS.saved_model_path) # The following test creates two signatures, each of which contains a # switch-merge construct that will be functionalized to the tf.If op. However, # `then` and `else` branches' arguments are deliberately made different # between these two model signatures, in order to trigger error in cases these # branches are functionalized to functions with the same function name. data_0 = array_ops.constant([1, 2, 3, 4, 5, 6]) data_1 = array_ops.constant([2, 3, 4, 5, 6, 7]) # Create placeholders to prevent constant folding. x_op = array_ops.placeholder(dtype=dtypes.int32) y_op = array_ops.placeholder(dtype=dtypes.int32) less_op = math_ops.less(x_op, y_op) switch_0_op = control_flow_ops.switch(data_0, less_op) switch_1_op = control_flow_ops.switch(data_1, less_op) # merge_0_op will be functionalized to a tf.If op with only one argument # `data_0` in addition to the condition `less_op`. merge_0_op = control_flow_ops.merge(switch_0_op)[0] # merge_1_op will be functionalized to a tf.If op with two arguments, `data_0` # and `data_1` in addition to the condition `less_op`. merge_1_op = control_flow_ops.merge([switch_0_op[0], switch_1_op[1]])[0] result = merge_0_op result_1 = merge_1_op sess = session.Session() sm_builder = builder.SavedModelBuilder(FLAGS.saved_model_path) tensor_info_x = utils.build_tensor_info(x_op) tensor_info_y = utils.build_tensor_info(y_op) tensor_info_result = utils.build_tensor_info(result) tensor_info_result_1 = utils.build_tensor_info(result_1) signature = ( signature_def_utils.build_signature_def( inputs={ 'x': tensor_info_x, 'y': tensor_info_y }, outputs={'result': tensor_info_result}, method_name=signature_constants.PREDICT_METHOD_NAME)) signature_1 = ( signature_def_utils.build_signature_def( inputs={ 'x': tensor_info_x, 'y': tensor_info_y }, outputs={'result_1': tensor_info_result_1}, method_name=signature_constants.PREDICT_METHOD_NAME)) sm_builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ 'sig': signature, 'sig_1': signature_1, }, strip_default_attrs=True) sm_builder.save()
def handler(queue, kvm_fd, mm): global object_dict global object_id global callback_stack global initialized if not initialized: callback_stack = [] object_dict = dict() object_id = 1 # TODO: forward logging or disable it in test tf.logging.set_verbosity(tf.logging.INFO) initialized = True print("handler is initialized") while True: task = None task = queue.get(block=True) while task is None: try: task = queue.get(block=True, timeout=5) except Queue.Empty: task = None if callback_stack: if time.time() > callback_stack[-1]["deadline"]: print("callback failed deadline") return STATUS_CALLBACK_TIMEOUT vm_id = task.vm_id if vm_id == STOP_HANDLER: break param = TF_PY_PARAM.from_buffer(mm, task.data_ptr) callback_param = TF_PY_PARAM.from_buffer( mm, task.data_ptr + param.base.callback_param_offset) print( "retrieve [vm#%d] tensorflow task=%d cmd=%d, obj=%d, dstore=%lx, done=%d" % (task.vm_id, task.node_id, param.base.cmd_id, param.base.object_id, param.base.dstore_size, param.base.done)) print( "retrieve [vm#%d] callback node cmd=%d, obj=%d, dstore=%lx, done=%d" % (task.vm_id, callback_param.base.cmd_id, callback_param.base.object_id, callback_param.base.dstore_size, callback_param.base.done)) cmd_id = param.base.cmd_id try: if cmd_id == TF_PY_NW_CALLBACK_DONE: param.base.done = STATUS_TASK_DONE ret = fcntl.ioctl(kvm_fd, IOCTL_KVM_NOTIFY_TASK_FINISHED, task.node_id) if ret < 0: print("notify task completion failed: %d\n" % ret) if callback_stack and \ callback_stack[-1]["callback_id"] == param.base.object_id: print("callback is finished") return STATUS_CALLBACK_DONE else: print("callback is error") return STATUS_CALLBACK_ERROR if cmd_id == TF_PY_SESSION_INIT: print("SessionInit!!!") param1 = parse_param(vm_id, mm, param, param.param1) print(param1) sess = tf.Session(param1) # assign object_id object_dict[object_id] = sess param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_SESSION_ENTER: sess = object_dict[param.base.object_id] ctx_sess = sess.__enter__() if sess is ctx_sess: pass else: # unlikely print("unlikely to search for sess") param.base.object_id = next( obj_id for obj_id, obj in object_dict.items() if obj is ctx_sess) elif cmd_id == TF_PY_SESSION_EXIT: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) sess = object_dict[param.base.object_id] sess.__exit__(param1, param2, param3) elif cmd_id == TF_PY_SESSION_DEL: sess = object_dict[param.base.object_id] sess.__del__() # deprecated elif cmd_id == TF_PY_SESSION_RUN: sess = object_dict[param.base.object_id] param1 = parse_param(vm_id, mm, param, param.param1) if type(param1) == NwObject: print("get NwObject=%d" % param1.object_id()) param1 = object_dict[param1.object_id()] print(param1) ret_val = sess.run(param1) print(ret_val) writeback_result(vm_id, mm, param, param.ret_val1, ret_val) elif cmd_id == TF_PY_TPU_CLUSTER_RESOLVER_INIT: print("resloverInit!!!") param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) if param1 is None: param1 = None if param2 is None: param2 = None if param3 is None: param3 = None print("TPUClusterResolver", param1, param2, param3) tpu_grpc = tf.contrib.cluster_resolver.TPUClusterResolver( tpu=param1, zone=param2, project=param3) # assign object_id object_dict[object_id] = tpu_grpc param.base.object_id = object_id print("assign obj_id=%d" % object_id) object_id += 1 # deprecated elif cmd_id == TF_PY_TPU_CLUSTER_RESOLVER_MASTER: # FIXED: use __getattr__ print("master!!") tpu_grpc = object_dict[param.base.object_id] # FIXED: may have parameters tpu_grpc_url = tpu_grpc.master() # serialize return value writeback_result(vm_id, mm, param, param.ret_val1, tpu_grpc_url) elif cmd_id == TF_PY_TPU_INITIALIZE_SYSTEM: # TODO: may have parameters ts = tpu.initialize_system() object_dict[object_id] = ts param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_TPU_SHUTDOWN_SYSTEM: # TODO: may have parameters ts = tpu.shutdown_system() object_dict[object_id] = ts param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_GLOBAL_VARIABLES_INITIALIZER: # TODO: may have parameters ts = tf.global_variables_initializer() object_dict[object_id] = ts param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_ONES: print("param1 size=%ld,offset=%ld" % (param.param1.size, param.param1.offset)) param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) if param2 is None: param2 = dtypes.float32 print(param2) var = tf.ones(param1, param2) object_dict[object_id] = var param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_RANDOM_UNIFORM: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) param5 = parse_param(vm_id, mm, param, param.param5) param6 = parse_param(vm_id, mm, param, param.param6) if param2 is None: param2 = 0 if param4 is None: param4 = dtypes.float32 print(param1, param2, param3, param4) var = tf.random_uniform(param1, param2, param3, param4, param5, param6) object_dict[object_id] = var param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_TRANSPOSE: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) param1 = object_dict[param1.object_id()] if param3 is None: param3 = "transpose" if param4 is None: param4 = False print("transpose", param1, param2, param3, param4) var = tf.transpose(param1, param2, param3, param4) object_dict[object_id] = var param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_CAST: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param1 = object_dict[param1.object_id()] print("cast", param1, param2, param3) var = tf.cast(param1, param2, param3) object_dict[object_id] = var param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_EXPAND_DIMS: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) param1 = object_dict[param1.object_id()] print("expand_dims", param1, param2, param3, param4) var = tf.expand_dims(param1, param2, param3, param4) object_dict[object_id] = var param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_CONCAT: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param1 = object_dict[param1.object_id()] if param3 is None: param3 = "concat" print("concat", param1, param2, param3) var = tf.concat(param1, param2, param3) object_dict[object_id] = var param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_EQUAL: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param1 = object_dict[param1.object_id()] print("equal", param1, param2, param3) if isinstance(param2, NwObject): param2 = object_dict[param2.object_id()] result = tf.equal(param1, param2, param3) print(result) object_dict[object_id] = result param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_FIXED_LEN_FEATURE: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) feature = tf.FixedLenFeature(param1, param2, param3) print(feature) object_dict[object_id] = feature param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_VAR_LEN_FEATURE: param1 = parse_param(vm_id, mm, param, param.param1) feature = tf.VarLenFeature(param1) print(feature) object_dict[object_id] = feature param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_PARSE_SINGLE_EXAMPLE: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) print(param1, param2) # expand embedded NwObject if isinstance(param1, NwObject): param1 = object_dict[param1.object_id()] dict_walker(param2) print("after translation", param1, param2) result = tf.parse_single_example(param1, param2, param3, param4) print(result) dict_mapper(result) print(result) writeback_result(vm_id, mm, param, param.ret_val1, result) elif cmd_id == TF_PY_CONTROL_FLOW_OPS_SWITCH: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) param1 = object_dict[param1.object_id()] param2 = object_dict[param2.object_id()] print("switch", param1, param2, param3, param4) result = control_flow_ops.switch(param1, param2, param3, param4) print(result) mapped_tuple = tuple_mapper(result, [0, 1]) print(mapped_tuple) writeback_result(vm_id, mm, param, param.ret_val1, mapped_tuple) elif cmd_id == TF_PY_CONTROL_FLOW_OPS_MERGE: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param1 = object_dict[param1.object_id()] print("merge", param1, param2) list_walker(param1) print("merge-new", param1, param2) result = control_flow_ops.merge(param1, param2) print(result) mapped_tuple = tuple_mapper(result, [0]) print(mapped_tuple) writeback_result(vm_id, mm, param, param.ret_val1, mapped_tuple) elif cmd_id == TF_PY_TPU_REWRITE: # TODO: may have parameters param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) # default parameter if param2 is None: param2 = None # expand embedded NwObject list_walker(param2) func = tpu.rewrite(param1, param2) object_dict[object_id] = func param.base.object_id = object_id print("rewrite object_id=%d" % object_id) object_id += 1 elif cmd_id == TF_PY_TPU_RUN_CONFIG: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) param5 = parse_param(vm_id, mm, param, param.param5) # default parameter if param1 is None: param1 = None if param2 is None: param2 = None if param3 is None: param3 = None if param4 is None: param4 = None # expand embedded NwObject param4 = object_dict[param4.object_id()] print(param4, param5) func = tpu.RunConfig(param1, param2, param3, param4, **param5) object_dict[object_id] = func param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_TPU_TPU_ESTIMATOR: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) param5 = parse_param(vm_id, mm, param, param.param5) param6 = parse_param(vm_id, mm, param, param.param6) param7 = parse_param(vm_id, mm, param, param.param7) param8 = parse_param(vm_id, mm, param, param.param8) param9 = parse_param(vm_id, mm, param, param.param9) param10 = parse_param(vm_id, mm, param, param.param10) param11 = parse_param(vm_id, mm, param, param.param11) param12 = parse_param(vm_id, mm, param, param.param12) # default parameter if param1 is None: param1 = None if param2 is None: param2 = None if param3 is None: param3 = None if param4 is None: param4 = None if param5 is None: param5 = True if param6 is None: param6 = None if param7 is None: param7 = None if param8 is None: param8 = None if param9 is None: param9 = None if param10 is None: param10 = True if param11 is None: param11 = True if param12 is None: param12 = None # expand embedded NwObject param3 = object_dict[param3.object_id()] print(param3) func = tpu.TPUEstimator(param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12) object_dict[object_id] = func param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_IMAGE_RESIZE_IMAGES: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) param5 = parse_param(vm_id, mm, param, param.param5) # default parameter if param3 is None: param3 = ResizeMethod.BILINEAR if param4 is None: param4 = False if param5 is None: param5 = False # expand embedded NwObject param1 = object_dict[param1.object_id()] print(param1) img = tf.image.resize_images(param1, param2, param3, param4, param5) # TODO: it may return a float object_dict[object_id] = img param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_SLICE: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) # expand embedded NwObject print(param1, param2, param3) param1 = object_dict[param1.object_id()] ret = tf.slice(param1, param2, param3, param4) object_dict[object_id] = ret param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_SHAPE: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) if param3 is None: param3 = dtypes.int32 # expand embedded NwObject print(param1, param2, param3) param1 = object_dict[param1.object_id()] ret = tf.shape(param1, param2, param3) object_dict[object_id] = ret param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_IMAGE_SAMPLE_DISTORTED_BOUNDING_BOX: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) param5 = parse_param(vm_id, mm, param, param.param5) param6 = parse_param(vm_id, mm, param, param.param6) param7 = parse_param(vm_id, mm, param, param.param7) param8 = parse_param(vm_id, mm, param, param.param8) param9 = parse_param(vm_id, mm, param, param.param9) param10 = parse_param(vm_id, mm, param, param.param10) # default parameter if param5 is None: param5 = 0.1 print("sample_distorted_bounding_box", param1, param2) result = tf.image.sample_distorted_bounding_box( param1, param2, param3, param4, param5, param6, param7, param8, param9, param10) print(result) mapped_tuple = tuple_mapper(result, [0, 1, 2]) print(mapped_tuple) writeback_result(vm_id, mm, param, param.ret_val1, mapped_tuple) elif cmd_id == TF_PY_IMAGE_DRAW_BOUNDING_BOXES: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) # expand embedded NwObject print(param1, param2, param3) param1 = object_dict[param1.object_id()] param2 = object_dict[param2.object_id()] ret = tf.image.draw_bounding_boxes(param1, param2, param3) object_dict[object_id] = ret param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_IMAGE_DECODE_JPEG: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) param5 = parse_param(vm_id, mm, param, param.param5) param6 = parse_param(vm_id, mm, param, param.param6) param7 = parse_param(vm_id, mm, param, param.param7) param8 = parse_param(vm_id, mm, param, param.param8) if param2 is None: param2 = 0 if param3 is None: param3 = 1 if param4 is None: param4 = True if param5 is None: param5 = False if param6 is None: param6 = 1 if param7 is None: param7 = "" param1 = object_dict[param1.object_id()] img = tf.image.decode_jpeg(param1, param2, param3, param4, param5, param6, param7, param8) object_dict[object_id] = img param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_IMAGE_CONVERT_IMAGE_DTYPE: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) param4 = parse_param(vm_id, mm, param, param.param4) # expand embedded NwObject print(param1, param2, param3) param1 = object_dict[param1.object_id()] if param3 is None: param3 = False ret = tf.image.convert_image_dtype(param1, param2, param3, param4) object_dict[object_id] = ret param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_DATA_DATASET_LIST_FILES: param1 = parse_param(vm_id, mm, param, param.param1) param2 = parse_param(vm_id, mm, param, param.param2) param3 = parse_param(vm_id, mm, param, param.param3) print(param1, param2, param3) if isinstance(param1, NwObject): param1 = object_dict[oaram1.object_id()] ret = tf.data.Dataset.list_files(param1, param2, param3) object_dict[object_id] = ret param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_NW_OBJECT: print("nw_object!! id = %d" % param.base.object_id) obj = object_dict[param.base.object_id] name = parse_param(vm_id, mm, param, param.param1) args = parse_param(vm_id, mm, param, param.param2) kwargs = parse_param(vm_id, mm, param, param.param3) print("NwObject", obj, name, args, kwargs) # expand embedded NwObject args = list(args) list_walker(args) args = tuple(args) dict_walker(kwargs) print("after translation", obj, name, args, kwargs) # run result = getattr(obj, name)(*(args or []), **(kwargs or {})) param.base.object_id = -1 param.ret_val1.size = 0 print("analyze type", type(result), result) # TODO: go through tuple, dict or list if isinstance(result, tuple): result = tuple_mapper(result, range(len(result))) if isinstance(result, dict): dict_mapper(result) if isinstance(result, list): list_mapper(result) # serialize return value if is_unpickleable_type(result) or \ pickle.pickles(result) is False: object_dict[object_id] = result param.base.object_id = object_id object_id += 1 elif result is not None: writeback_result(vm_id, mm, param, param.ret_val1, result) elif cmd_id == TF_PY_NW_METHOD: # Reuse as callback #ins = parse_param(vm_id, mm, param, param.param1) #name = parse_param(vm_id, mm, param, param.param2) #print(ins, name) #method = getattr(ins, name) #print(method) #object_dict[object_id] = method cw = callback_constructor(object_id, callback_param, param, mm, vm_id, queue, kvm_fd) object_dict[object_id] = cw param.base.object_id = object_id object_id += 1 elif cmd_id == TF_PY_NW_CALLBACK_TEST: nw_func = parse_param(vm_id, mm, param, param.param1) print(nw_func, nw_func.object_id()) func = object_dict[nw_func.object_id()] print("callback func", func) x = parse_param(vm_id, mm, param, param.param2) y = parse_param(vm_id, mm, param, param.param3) result = func(x, y) print(result) writeback_result(vm_id, mm, param, param.ret_val1, result) else: print("unsupported Tensorflow API") except Exception, error: param.base.done = STATUS_TASK_ERROR #mm.flush(task.data_ptr, sizeof(PARAM_BASE)) print "fault: ", str(error) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) traceback.print_stack() print("finished [vm#%d] TF task %d cmd %d" % (task.vm_id, task.node_id, param.base.cmd_id)) param.base.done = STATUS_TASK_DONE #mm.flush(task.data_ptr, sizeof(PARAM_BASE)) #mm.flush(INVOKER_FIFO_SIZE + VGPU_DSTORE_SIZE * (vm_id - 1) + # param.base.dstore_offset + param.ret_val1.offset, # param.ret_val1.size) # notify hypervisor ret = fcntl.ioctl(kvm_fd, IOCTL_KVM_NOTIFY_TASK_FINISHED, task.node_id) if ret < 0: print("notify task completion failed: %d\n" % ret)