def version_9(cls, ctx, node, **kwargs): depth = node.get_attr_value('num_classes', None) assert depth is not None input_shape = ctx.get_shape(node._inputs[0]) depth_node = ctx.make_const(util.make_name(node.name), np.array([depth], dtype=np.int64)) values_node = ctx.make_const(util.make_name(node.name), np.array([0, 1], dtype=np.int64)) node.input_tensor_names=node.input_tensor_names+\ depth_node.output_tensor_names+\ values_node.output_tensor_names
def version_12(cls, ctx, node, **kwargs): radio = node.get_attr_value('keep_prob', None) # node.set_attr('radio',radio) assert radio is not None radio_node = ctx.make_const(util.make_name(node.name + '_radio_'), np.array([1 - radio], dtype=np.float32), is_0D_tensor=True) node.input_tensor_names += radio_node.output_tensor_names training_mode = np.bool_(True) training_mode_node = ctx.make_const(util.make_name(node.name + '_training_mode_'), np.array([training_mode], dtype=np.bool), is_0D_tensor=True) node.input_tensor_names += training_mode_node.output_tensor_names
def make_node(self,op_type,inputs,attr=None,output_count=1,outputs=None, name=None,shapes=None,dtypes=None): if attr is None: attr={} if shapes is None: shapes=[] if dtypes is None: dtypes=[] if name is None: name=util.make_name(op_type) if outputs is None: outputs=[name+':'+str(i) for i in range(output_count)] output_count=len(outputs) onnx_node=helper.make_node(op_type,inputs,outputs,name=name,**attr) node=Node(onnx_node,self) if shapes: assert len(shapes)==output_count,"Failed: output shapes count not equal to output count when make_node" for i in range(output_count): self.set_shape(node._outputs[i],shapes[i]) if dtypes: assert len(dtypes)==output_count,"Failed: output dtypes count not equal to output count when make_node" for i in range(output_count): self.set_dtype(node._outputs[i],dtypes[i]) if not shapes or not dtypes: self.update_node_shape_dtype(node) self._nodes.append(node) return node
def convert_const_to_node(ctx, node): const = node.get_attr_value('const_attr', None) assert const is not None, "Failed: const_attr is none when AddConst op mapping" const_node = ctx.make_const(util.make_name('const'), np.array([const], dtype=np.float32), raw=False) node.input_tensor_names = node.input_tensor_names + const_node.output_tensor_names
def version_1(cls, ctx, node, **kwargs): op_name = util.make_name(node.name) reciprocal = ctx.insert_new_node_on_output("Reciprocal", node.output_tensor_names[0], name=op_name) ctx.copy_shape(node.output_tensor_names[0], reciprocal.output_tensor_names[0])
def version_5(cls,ctx,node,**kwargs): shape=node.get_attr_value('output_shape', None) assert shape is not None,"Failed: ReshapeOp does not have a const shape" shape_node = ctx.make_const( util.make_name("shape"), np.array(shape, None) ) node.input_tensor_names = node.input_tensor_names + shape_node.output_tensor_names
def insert_new_node_on_input(self,node,op_type,input_name,name=None,**kwargs): if name is None: name=util.make_name(node.name) new_output=util.make_name(name) if not isinstance(input_name,list): input_name=[input_name] new_node=self.make_node( op_type, input_name, attr=kwargs, outputs=[new_output], name=name, ) for i,n in enumerate(node.input_tensor_names): if n==input_name[0]: node.input_tensor_names[i]=new_output break return new_node
def version_11(cls, ctx, node, **kwargs): pads = node.get_attr_value('paddings', None) assert pads is not None paddings = np.array(pads).astype(np.int64) paddings_node = ctx.make_const(util.make_name(node.name), paddings) node.input_tensor_names = node.input_tensor_names + paddings_node.output_tensor_names support_modes = ['constant', 'reflect', 'edge'] mode = node.get_attr_value('mode', 'constant').lower() assert mode in support_modes node.set_attr('mode', mode) constant_value = node.get_attr_value('constant_values', None) constant_value = np.array([constant_value]).astype(np.float32) constant_value_node = ctx.make_const( util.make_name(node.name), constant_value, ) node.input_tensor_names = node.input_tensor_names + constant_value_node.output_tensor_names
def version_6(cls, ctx, node, **kwargs): epsilon = node.get_attr_value('eps', 0.01) node.set_attr("epsilon", epsilon) momentum = node.get_attr_value('momentum', 0.99) mean = node.get_attr_value('save_mean', None) var = node.get_attr_value('save_var', None) assert mean is not None and var is not None mean = numpy_helper.to_array(mean) var = numpy_helper.to_array(var) mean = np.reshape(mean, [-1]) var = np.reshape(var, [-1]) new_mean_node_name = util.make_name(node.name + '_mean_') new_mean_node = ctx.make_const(new_mean_node_name, mean) node.input_tensor_names += new_mean_node.output_tensor_names new_val_node_name = util.make_name(node.name + '_var_') new_val_node = ctx.make_const(new_val_node_name, var) node.input_tensor_names += new_val_node.output_tensor_names
def insert_new_node_on_output(self,op_type,output_name,name,**kwargs): new_output=util.make_name(name) new_node=self.make_node( op_type, [output_name], attr=kwargs, outputs=[new_output], name=name, ) for node in self._nodes: if node==new_node: continue for i, input_name in enumerate(node.input_tensor_names): if input_name == output_name: node.input_tensor_names[i] = new_output return new_node