Ejemplo n.º 1
0
    def _link_previous_op_to_branch_op(self, original_op: Op, branch_op: Op,
                                       output_shape: tf.TensorShape):
        """ Link the original op to the new branch op by creating a product """

        product = Product(original_op.name + '_to_' + branch_op.name, output_shape)
        product.producer = original_op
        product.add_consumer(branch_op)
        original_op.output = product
        branch_op.add_input(product)
        self._products[product.name] = product
Ejemplo n.º 2
0
        def create_and_connect_product(param_name: str, product_shape: tf.TensorShape, my_op: Op,
                                       param_tensor: tf.Tensor):
            """ Create product with given name, shape, and corresponding tensor.  Connect product to my_op. """

            product = Product(my_op.name + '/' + param_name, product_shape)
            product.is_parm = True
            product.add_consumer(my_op)
            product.tensor_dict[my_op] = param_tensor
            my_op.add_input(product)
            products_dict[product.name] = product
            my_op.add_param(param_name, product)
            self._num_products_made += 1
Ejemplo n.º 3
0
def fill_op_info(op: Op, tf_op_info: ModuleIdentifierOpInfo):
    """ Fill in op information """
    if op.type == 'Conv2D':
        op.groups = 1
    elif op.type == 'DepthwiseConv2dNative':
        # Set number of groups to be the number of input channels
        # Format is expected to be NHWC, so channels is the last index in shape array
        # This is not actually guaranteed to be correct, need to figure a way to check for sure
        op.groups = tf_op_info.tf_op.inputs[0].shape.as_list()[-1]

    op.model_module = TfModelModule(tf_op_info.tf_op)
    for attribute_name, attribute in tf_op_info.get_attributes().items():
        op.add_attribute(attribute_name, attribute)
Ejemplo n.º 4
0
    def _link_branch_op_to_multiple_ops(self, branch_op: Op, product_list: list,
                                        is_model_input: bool = False):
        """ Create new product with multiple consumers, linking branch op with children ops"""

        branch_op_product = Product(branch_op.name + '_to_' + 'multiple_ops', branch_op.output_shape)
        branch_op_product.is_model_input = is_model_input
        branch_op_product.producer = branch_op
        branch_op.output = branch_op_product

        # For each product from original op to multiple children, we must:
        # 1: Add each child op as a consumer of the new branch_op_product
        # 2: Append the old product's corresponding tf.Tensor to the new branch op product's tensor_list
        # 3: Add new branch_op_product as input for each child op
        # 4: Remove product from original op to child in child's inputs
        # 5: Remove product from self._products
        for product in product_list:
            assert len(product.consumers) == 1
            # item 1
            branch_op_product.add_consumer(product.consumers[0])
            # item 2
            assert len(product.tensor_dict.keys()) == 1
            branch_op_product.tensor_dict[product.consumers[0]] = product.tensor_dict[product.consumers[0]]
            # items 3 and 4
            # replace the old product with the new branch product, in the same index as the old product
            index = product.consumers[0].inputs.index(product)
            product.consumers[0].inputs[index] = branch_op_product
            # item 5
            del self._products[product.name]

        self._products[branch_op_product.name] = branch_op_product
Ejemplo n.º 5
0
def _reorder_multi_input_op_products(op: Op):
    """
    Ops with multiple input products need to have the input products arranged in the same order as in the tf graph,
    so mask propagation will work correctly.
    """

    if op.type in ['Add', 'AddN', 'ConcatV2', 'Merge', 'Mul']:
        # Create new product list with the same length as the old input products list
        # When looking at inputs to the op in tf graph, there may be inputs that were not seen during DFS.
        # Currently, handle this by first creating a product list with the length of the op inputs, filling in entries
        # corresponding to products which we have seen, then remove all indices that are not filled in.
        old_products = op.get_input_products()
        tf_op_input_list = list(op.get_module().inputs)

        new_product_list = [None] * len(tf_op_input_list)
        for product in old_products:
            index = tf_op_input_list.index(product.tensor_dict[op])
            new_product_list[index] = product
        op.inputs = [i for i in new_product_list if i]
Ejemplo n.º 6
0
        def create_conv2d_dense_type_params(my_op: Op):
            """ Create products for conv2d, dense, depthwise conv2d, and similar """
            tf_op = my_op.get_module()

            weight_op = WeightTensorUtils.get_read_op(tf_op)
            create_and_connect_product('kernel', weight_op.outputs[0].shape, my_op, weight_op.outputs[0])

            if not BiasUtils.is_bias_none(tf_op):
                bias_op = BiasUtils.get_bias_read_op(tf_op)
                create_and_connect_product('bias', bias_op.outputs[0].shape, my_op, bias_op.outputs[0])
Ejemplo n.º 7
0
    def _create_branch_op(self, output_shape: tf.TensorShape):
        """ Create a new branch op in self._ops """

        op = Op(name='branch_' + str(self._branch_count),
                dotted_name='branch_' + str(self._branch_count),
                output_shape=output_shape,
                is_anonymous=True,
                op_type='branch',
                pattern_type=None,
                internal_ops=None)
        self._ops[op.name] = op
        self._branch_count += 1
        return op
Ejemplo n.º 8
0
    def _create_op_if_not_exists(self, current_op_info: ModuleIdentifierOpInfo):
        """ Create new op if it does not yet exist """

        if current_op_info.module_name not in self._ops:
            op = Op(name=current_op_info.module_name,
                    dotted_name=current_op_info.module_name,
                    output_shape=None,
                    is_anonymous=False,
                    op_type=current_op_info.op_type,
                    pattern_type=current_op_info.pattern_type,
                    internal_ops=current_op_info.internal_ops)
            fill_op_info(op, current_op_info)
            self._ops[current_op_info.module_name] = op
            logger.debug("Created new op: %s ", current_op_info.module_name)
        else:
            logger.debug("Op %s already exists", current_op_info.module_name)
Ejemplo n.º 9
0
 def _detach_op_from_inputs(self, op: Op):
     """
     Detach op from its parent operations.
     :param op: Op to detach
     """
     tf_ops_to_detach = []
     input_products = op.get_input_products()
     for product in input_products:
         tensor = product.tensor_dict.get(op, None)
         if tensor is not None:
             for consumer in tensor.consumers():
                 corresponding_op = self._conn_graph.get_op_from_module_name(
                     consumer.name)
                 if corresponding_op == op:
                     tf_ops_to_detach.append(consumer)
     graph_editor.detach_inputs(tf_ops_to_detach)
Ejemplo n.º 10
0
        def create_batchnorm_params(my_op: Op):
            """ Create products for fusedbatchnorm """
            tf_op = my_op.get_module()

            beta_tensor = BNUtils.get_beta_read_var_op_tensor(tf_op)
            create_and_connect_product('beta', beta_tensor.shape, my_op, beta_tensor)

            gamma_tensor = BNUtils.get_gamma_read_var_op_tensor(tf_op)
            create_and_connect_product('gamma', gamma_tensor.shape, my_op, gamma_tensor)

            moving_mean_tensor = BNUtils.get_moving_mean_read_var_op_tensor(tf_op)
            create_and_connect_product('moving_mean', moving_mean_tensor.shape, my_op,
                                       moving_mean_tensor)

            moving_variance_tensor = BNUtils.get_moving_variance_read_var_op_tensor(tf_op)
            create_and_connect_product('moving_variance', moving_variance_tensor.shape, my_op,
                                       moving_variance_tensor)
Ejemplo n.º 11
0
    def _process_starting_ops(self, starting_op_names):
        """ Given name of the starting op, create the op in self._ops and add its children to the queue """

        for start_op_name in starting_op_names:
            starting_op = self._graph.get_operation_by_name(start_op_name)
            starting_op_info = self._module_identifier.get_op_info(starting_op)
            op = Op(name=starting_op_info.module_name,
                    dotted_name=starting_op_info.module_name,
                    output_shape=None,
                    is_anonymous=False,
                    op_type=starting_op_info.op_type,
                    pattern_type=starting_op_info.pattern_type,
                    internal_ops=starting_op_info.internal_ops)
            fill_op_info(op, starting_op_info)
            self._ops[starting_op_info.module_name] = op
            self._add_children_ops_to_op_queue(starting_op)
            self.starting_ops.append(op)
Ejemplo n.º 12
0
def _fill_flatten_shape_if_needed(op: Op):
    """
    Tensorflow flatten doesn't know its output size.  This poses a problem for Mask Propagator, so we try
    to deduce the size ourselves by looking at the input and multiplying all dimensions together.
    To ensure this is only done on flatten and not another reshape, check if the last dimension is unknown.
    """

    # If flatten op is last, it will have no output and thus no output shape
    if op.type == "Flatten" and op.output_shape:
        dims = op.output_shape.as_list()
        if dims:
            if dims[-1] is None:
                output_size = 1
                input_shape = op.inputs[0].shape.as_list()
                for dim in input_shape:
                    if dim is not None:
                        output_size *= dim
                new_output_shape = tf.TensorShape([tf.Dimension(None), tf.Dimension(output_size)])
                op.output_shape = new_output_shape
                op.output.shape = new_output_shape
Ejemplo n.º 13
0
    def _get_input_tensors_for_winnowed_op(self, op: Op) -> List[tf.Tensor]:
        """
        Get all of the input tensors to be used when winnowing the op.  If the parent of the op to be reduced is
        also an op that has been reduced, the input tensor will be from the reduced parent op.
        If needed, a downsample or upsample op will be attached to the previous op's output before feeding into the
        winnowed op; if this is the case, input tensors will be the outputs of the downsample or upsample op.
        :param op: Unwinnowed op whose parent tensors will be taken as input tensors for the winnowed version.
        :return: List of input tensors to op
        """
        input_tensors = []
        input_products = op.get_input_products()
        if op.type in ['Add', 'AddN', 'AddV2', 'ConcatV2']:
            assert len(input_products) > 1
        else:
            # if op is not of type add or concat or similar, we only expect to have one incoming tensor
            assert len(input_products) == 1

        for input_product in input_products:
            parent_op = input_product.producer

            if parent_op.type == 'branch':
                # if parent op is a branch op, there could be multiple output tensors coming from the parent op
                # need to find the correct index in parent op's outputs list
                branch_op = parent_op
                parent_op = branch_op.inputs[0].producer
                input_tensor = input_product.tensor_dict[op]

                # loop through each output tensor in parent op's output node to see if it matches input_tensor
                for (idx,
                     tensor) in enumerate(parent_op.output_op_node.outputs):
                    if input_tensor == tensor:
                        if parent_op in self._reduced_op_info:
                            # parent op has a reduced version of itself created already
                            # need to select the output tensor from the reduced op, not the original
                            input_tensor = self._reduced_op_info[
                                parent_op].output_op_node.outputs[idx]
            else:
                # make sure incoming tensor only goes to one op (the current op)
                assert len(input_product.tensor_dict.keys()) == 1
                input_tensor = input_product.tensor_dict[op]
                if parent_op in self._reduced_op_info:
                    # parent op has a reduced version of itself created already
                    # need to select the output tensor from the reduced op, not the original
                    # add output tensor from the reduced op corresponding to the parent op
                    input_tensor = self._reduced_op_info[
                        parent_op].output_op_node.outputs[0]

            # now check the masks of the parent op and the current op to see if a downsample or upsample layer
            # needs to be added
            # First, need to find the correct indices for which masks to use
            # If parent op is of type skip, keep going upwards until we find a non skip op to use its output mask
            while OpConnectivity.get_op_connectivity(
                    ModelApi.tensorflow,
                    parent_op.type) == ConnectivityType.skip:
                parent_op = parent_op.inputs[0].producer
            parent_mask = self._op_to_mask_dict[parent_op]
            parent_output_mask = parent_mask.output_channel_masks[0]
            child_mask = self._op_to_mask_dict[op]
            child_input_mask_index = op.get_input_products().index(
                input_product)
            child_input_mask = child_mask.input_channel_masks[
                child_input_mask_index]
            input_tensor = _insert_downsample_or_upsample_ops_if_needed(
                input_tensor, parent_output_mask, child_input_mask)

            input_tensors.append(input_tensor)

        assert len(input_tensors) == len(input_products)
        return input_tensors