def get_output_size(self, in_dims): num_detected_boxes = self._parameters['max_detections'] * \ self._parameters['max_classes_per_detection'] return [ Dim(shape=[num_detected_boxes, 4], is_ordered=True), Dim(shape=[num_detected_boxes], is_ordered=True), Dim(shape=[num_detected_boxes], is_ordered=True), Dim(shape=[num_detected_boxes], is_ordered=True), ]
def get_output_size(self, in_dims): out_dim = in_dims[0].clone() if self.transpose_in: out_dim.transpose(self.transpose_in[0]) if self.keep_dims: names = out_dim.keys if out_dim.is_named else None out_dim = Dim(shape=[1 if idx in self._axis else dim for idx, dim in enumerate(out_dim.shape)], names=names, is_ordered=True) if self.transpose_out: out_dim.transpose(self.transpose_out[0]) else: out_dim = Dim(shape=[dim for idx, dim in enumerate(out_dim.shape) if idx not in self._axis], is_ordered=True) return [out_dim]
def set_states_as_inputs(self, G): input_nodes = { self.INPUT_NAMES[edge.to_idx]: edge.from_node for edge in G.in_edges(self.name) if isinstance(edge.from_node, ConstantInputParameters) } state_node_names = [ name for name in self.INPUT_NAMES if "state" in name ] for state_node_name in state_node_names: state_node_idx = self.INPUT_NAMES.index(state_node_name) state_node = input_nodes[state_node_name] step_idx = state_node.step_idx G.remove(state_node) state_node = G.add_input(name=state_node_name + "_" + self.name, dim=Dim(list(state_node.value.shape))) state_node.step_idx = step_idx G.add_edge(NNEdge(state_node, self, to_idx=state_node_idx)) G.add_dimensions()
def test_creation1(): dim1 = Dim() assert dim1.is_unknown assert not dim1.is_named assert not dim1.is_ordered
def _common(cls, node, **kwargs): node_opts = node.get_options(UnidirectionalSequenceLSTMOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes.get(t) for t in node.input] x = inputs[0] x_shape = x[2].shape time_major = node_opts.TimeMajor() max_time = int(x_shape[0 if time_major else 1]) n_cells = int(node.input[2].shape[0]) n_inputs = int(x_shape[2]) pout_dims = ProvisionalDim([x_shape[0], x_shape[1], n_cells]) params = LSTMParameters(node.name, in_dims_hint=SparseList([['sz', 'c']]), out_dims_hint=SparseList([['sz', 'c']]), constant_store=G.constant_store, cell_clip=node_opts.CellClip(), proj_clip=node_opts.ProjClip(), n_input_cells=max_time, n_cells=max_time, # TF says max_time - we say cells n_inputs=n_inputs, # Input will be n_input_cells, n_inputs n_output_cells=max_time, # Output will be n_output_cells, n_states n_states=n_cells, # TF says cells - we say states activation=cls.TF_ACTIVATIONS[node_opts.FusedActivationFunction()]) constant_nodes = cls.get_all_const_inputs( G, all_nodes, opts, node, params, exclude=[0], names=["%s_%s" % (in_name, node.name) for in_name in LSTMParameters.INPUT_NAMES], short_names=LSTMParameters.INPUT_NAMES, adjust_transposes=[False] * len(node.input), load_quantization_if_present=True, skip_empty_tensors=False) # trim batch dimension from state values for state_node_name in ['i_state', 'c_state']: state_node = constant_nodes[LSTMParameters.INPUT_NAMES.index(state_node_name)] if opts.get('load_tensors'): state_node.value = state_node.value[0] state_node.dims = Dim(list(state_node.value.shape), is_ordered=True) # set by default as allocated state_node.at_options.allocate = True state_node.is_constant = False # reset state after each invocation state_node.always_copy = True # add a single reset state_node.reset_name = "Reset" # Link the state weights to the input weights # The autotiler expects the state and input weights to be # concatenated. This tells the constant code generator to do this for gate in ['i', 'o', 'c', 'f']: i_w_node = constant_nodes[LSTMParameters.INPUT_NAMES.index('i_2_%s_w' % gate)] r_w_node = constant_nodes[LSTMParameters.INPUT_NAMES.index('r_2_%s_w' % gate)] r_w_node.concated_nodes.append(i_w_node) i_w_node.generate_value = False G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims) return params
def _common(cls, node, **kwargs): node_opts = node.get_options(SequenceRNNOptions) G = kwargs['G'] opts = kwargs['opts'] all_nodes = kwargs['all_nodes'] inputs = [all_nodes.get(t) for t in node.input] time_major = node_opts.TimeMajor() x = cls.remove_known_batch_dimension(G, inputs[0], node, batch_axis=1 if time_major else 0) x_shape = x[2].shape max_time = int(x_shape[0 if time_major else 1]) n_cells = int(node.input[2].shape[0]) n_inputs = int(x_shape[2]) pout_dims = ProvisionalDim([x_shape[0], x_shape[1], n_cells]) params = RNNParameters( node.name, in_dims_hint=[['sz', 'c']], out_dims_hint=[['sz', 'c']], n_input_cells=max_time, n_cells=max_time, # TF says max_time - we say cells n_inputs=n_inputs, # Input will be n_input_cells, n_inputs n_output_cells=max_time, # Output will be n_output_cells, n_states n_states=n_cells, # TF says cells - we say states activation=cls.TF_ACTIVATIONS[node_opts.FusedActivationFunction()]) constant_nodes = cls.get_all_const_inputs( G, all_nodes, opts, node, params, exclude=[0], names=[ "%s_%s" % (in_name, node.name) for in_name in RNNParameters.INPUT_NAMES ], short_names=RNNParameters.INPUT_NAMES, adjust_transposes=[False] * len(node.input), load_quantization_if_present=True, skip_empty_tensors=False) # trim batch dimension from state values for state_node_name in ['i_state']: state_node = constant_nodes[RNNParameters.INPUT_NAMES.index( state_node_name)] state_node.value = state_node.value[0] state_node.dims = Dim(list(state_node.value.shape), is_ordered=True) # set by default as allocated state_node.at_options.allocate = True state_node.is_constant = False # reset state after each invocation state_node.always_copy = True # add a single reset state_node.reset_name = "Reset" if opts.get('load_quantization'): G.quantization[NodeId(params)] = cls.load_tf_quantization( node.input, node.output) G.add_edge( NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0)) all_nodes[node.output[0]] = (params, 0, pout_dims) return params