def test_multi_output_with_unuse_model(self):
        """ Test multi-output model with Tuple Tensor as intermediate output and with one of tuple tensor not used """
        class MultiOutputWithUnuseModel(torch.nn.Module):
            """
            Model with Tuple of Tensors as output with one output tensor unused
            """
            def __init__(self):
                super(MultiOutputWithUnuseModel, self).__init__()
                self.layer = test_models.TupleOutputModel()
                self.conv1 = torch.nn.Conv2d(2, 4, kernel_size=3, padding=1)
                self.conv2 = torch.nn.Conv2d(6, 4, kernel_size=3, padding=1)

            def forward(self, *inputs):
                x, _, z = self.layer(inputs[0])
                x1 = self.conv1(x)
                z1 = self.conv2(z)
                return torch.cat([x1, z1], 1)

        inp_data = torch.rand(1, 3, 8, 8)
        model = MultiOutputWithUnuseModel()
        conn_graph = ConnectedGraph(model, (inp_data, ))
        self.assertEqual(6, len(conn_graph.ordered_ops))
        self.assertEqual(
            5,
            len([
                op for op in conn_graph.get_all_ops().keys()
                if 'convolution' in op
            ]))
        self.assertEqual(
            0,
            len([
                op for op in conn_graph.get_all_ops().keys() if 'Tuple' in op
            ]))
        self.assertEqual('cat', conn_graph.ordered_ops[-1].type)

        product_names = conn_graph.get_all_products().keys()
        self.assertEqual(
            0,
            len([product for product in product_names if 'Tuple' in product]))

        expected_products = [
            # layer #1 to conv1,conv2
            'convolution_0_to_convolution_3',
            'convolution_2_to_convolution_4',

            # conv1,conv2 to cat
            'convolution_3_to_cat_5',
            'convolution_4_to_cat_5'
        ]

        products = conn_graph.get_all_products()
        for product_name in product_names:
            if product_name in expected_products:
                product = products[product_name]
                self.assertEqual(product.shape, product.producer.output_shape)
                expected_products.remove(product_name)
        self.assertEqual(0, len(expected_products))
    def test_module_reuse_model(self):
        class ReuseReluLeafModel(torch.nn.Module):
            """ A model with Relu instance used multiple times
            Expected one input of size (1, 64, 8, 8) """
            def __init__(self):
                super(ReuseReluLeafModel, self).__init__()
                self.conv1 = torch.nn.Conv2d(64, 64, kernel_size=3, padding=1)
                self.conv2 = torch.nn.Conv2d(64, 64, kernel_size=3, padding=1)
                self.relu = torch.nn.ReLU(inplace=True)

            def forward(self, *inputs):
                x = self.conv1(inputs[0])
                x = self.relu(x)
                x = self.conv2(x)
                return self.relu(x)

        inp_data = torch.rand(1, 64, 8, 8)
        model = ReuseReluLeafModel()
        conn_graph = ConnectedGraph(model, (inp_data, ))
        self.assertEqual(4, len(conn_graph.ordered_ops))
        self.assertEqual(
            2,
            len([
                op for name, op in conn_graph.get_all_ops().items()
                if 'relu' in name and op.get_module() == model.relu
            ]))

        class ReluModel(torch.nn.Module):
            def __init__(self):
                super(ReluModel, self).__init__()
                self.relu = torch.nn.ReLU(inplace=True)

            def forward(self, *inputs):
                return self.relu(inputs[0])

        class ReuseReluLayerModel(torch.nn.Module):
            """ A model with Relu Layer instance used multiple times
            Expected one input of size (1, 64, 8, 8) """
            def __init__(self):
                super(ReuseReluLayerModel, self).__init__()
                self.conv = torch.nn.Conv2d(64, 64, kernel_size=3, padding=1)
                self.layer = ReluModel()

            def forward(self, *inputs):
                x = self.layer(inputs[0])
                x = self.conv(x)
                return self.layer(x)

        layer_model = ReuseReluLayerModel()
        conn_graph = ConnectedGraph(layer_model, (inp_data, ))
        self.assertEqual(3, len(conn_graph.ordered_ops))
        self.assertEqual(
            2,
            len([
                op for name, op in conn_graph.get_all_ops().items()
                if 'relu' in name and op.get_module() == layer_model.layer.relu
            ]))
Esempio n. 3
0
    def test_get_all_ops_in_neighborhood(self):
        """ Test that default quantization parameters are set correctly when using json config file """
        model = SingleResidual()
        model.eval()
        input_shapes = (1, 3, 32, 32)

        random_inputs = utils.create_rand_tensors_given_shapes(input_shapes)
        conn_graph = ConnectedGraph(model, random_inputs)
        starting_op = conn_graph.get_all_ops()['convolution_7']
        add_10_op = conn_graph.get_all_ops()['add_10']
        adaptive_avg_pool2d_9_op = conn_graph.get_all_ops()['adaptive_avg_pool2d_9']
        neighborhood = _get_all_ops_in_neighborhood(starting_op, 'output')
        assert len(neighborhood) == 3
        assert starting_op in neighborhood
        assert add_10_op in neighborhood
        assert adaptive_avg_pool2d_9_op in neighborhood
 def test_dropouts(self):
     """ Test building ConnectedGraph on a model with dropouts """
     # pylint: disable=protected-access
     model = test_models.ModelWithDropouts()
     model.eval()
     inp_shape = (1, 3, 32, 32)
     inp_tensor_list = create_rand_tensors_given_shapes(inp_shape)
     conn_graph = ConnectedGraph(model, inp_tensor_list)
     self.assertEqual(9, len(conn_graph.ordered_ops))
     # Split count of 2 due to residual as well as reshape having a split
     self.assertEqual(1, conn_graph._split_count)
     # All ops will include 2 inserted split ops
     self.assertEqual(10, len(conn_graph.get_all_ops().keys()))
     dropout_1_op = conn_graph.get_all_ops()['dropout_3']
     dropout_2_op = conn_graph.get_all_ops()['feature_dropout_4']
     self.assertEqual(model.dropout1, dropout_1_op.get_module())
     self.assertEqual(model.dropout2, dropout_2_op.get_module())
 def test_concat(self):
     """ Test building ConnectedGraph on a model with concat """
     model = test_models.ConcatModel()
     model.eval()
     inp_shape_1 = (1, 3, 8, 8)
     inp_shape_2 = (1, 3, 8, 8)
     inp_shape_3 = (1, 3, 8, 8)
     inp_tensor_list = create_rand_tensors_given_shapes(
         [inp_shape_1, inp_shape_2, inp_shape_3])
     conn_graph = ConnectedGraph(model, inp_tensor_list)
     concat_op = conn_graph.get_all_ops()['cat_3']
     self.assertEqual(3, len(concat_op.inputs))
     self.assertEqual(14, concat_op.output_shape[1])
 def test_multi_output_model(self):
     """ Test multi-output model with Tuple Tensor as intermediate  output. """
     model = test_models.MultiOutputModel()
     inp_data = torch.rand(1, 3, 8, 8)
     conn_graph = ConnectedGraph(model, (inp_data, ))
     self.assertEqual(7, len(conn_graph.ordered_ops))
     self.assertEqual(
         6,
         len([
             op for op in conn_graph.get_all_ops().keys()
             if 'convolution' in op
         ]))
     self.assertEqual(
         0,
         len([
             op for op in conn_graph.get_all_ops().keys() if 'Tuple' in op
         ]))
     self.assertEqual(
         0,
         len([
             product for product in conn_graph.get_all_products().keys()
             if 'Tuple' in product
         ]))
     self.assertEqual('cat', conn_graph.ordered_ops[-1].type)
Esempio n. 7
0
def get_module_act_func_pair(model: torch.nn.Module, model_input: Union[Tuple[torch.Tensor], List[torch.Tensor]]) -> \
        Dict[torch.nn.Module, Union[torch.nn.Module, None]]:
    """
    For given model, returns dictionary of module to immediate following activation function else maps
    module to None.

    Activation functions should be defined as nn.Modules in model and not as functional in the forward pass.

    :param model: Pytorch model
    :param model_input:  Model input, Can be a list/tuple of input tensor(s)
    :return: Dictionary of module to activation function
    """
    # Keep model in evaluation mode
    model.eval()

    # Create ConnectedGraph
    graph = ConnectedGraph(model, model_input)

    # Maps module to next following activation function else None
    module_act_func_pair = {}

    # Get all the ops
    all_ops = graph.get_all_ops()

    for op in all_ops.values():

        # Get module associated with op
        cur_module = op.get_module()

        if cur_module:
            module_act_func_pair[cur_module] = None

            if op.output:
                assert op.output.consumers, 'op output should have at least one consumer op.'
                # Get the next op
                next_op = op.output.consumers[0]
                # Get module associated with next op
                next_module = next_op.get_module()

                # Get the appropriate activation function
                if isinstance(next_module, ActivationTypes):
                    module_act_func_pair[cur_module] = next_module
                    logger.debug(
                        "Module: %s is followed by activation function: %s",
                        op.dotted_name, next_op.dotted_name)

    return module_act_func_pair
    def does_module_have_relu_activation(connected_graph: ConnectedGraph,
                                         module: torch.nn.Module) -> bool:
        """
        Finds if a given module has a ReLU activation
        :param connected_graph: Reference to ConnectedGraph instance
        :param module: PyTorch module to find activation for
        :return: True if module has a relu activation
        """

        for op in connected_graph.get_all_ops().values():

            if op.model_module and op.model_module.get_module() is module:
                assert len(op.output.consumers) == 1
                is_relu_activation = isinstance(
                    op.output.consumers[0].model_module.get_module(),
                    (torch.nn.ReLU, torch.nn.PReLU))
                return is_relu_activation

        return False
 def test_single_residual(self):
     """ Test building ConnectedGraph on single residual model """
     # pylint: disable=protected-access
     model = test_models.SingleResidual()
     model.eval()
     inp_shape = (1, 3, 32, 32)
     inp_tensor_list = create_rand_tensors_given_shapes(inp_shape)
     conn_graph = ConnectedGraph(model, inp_tensor_list)
     self.assertEqual(17, len(conn_graph.ordered_ops))
     # Split count of 2 due to residual as well as reshape having a split
     self.assertEqual(2, conn_graph._split_count)
     # All ops will include 2 inserted split ops
     self.assertEqual(19, len(conn_graph.get_all_ops().keys()))
     input_ops = get_all_input_ops(conn_graph)
     self.assertEqual(1, len(input_ops))
     self.assertEqual(model.conv1, input_ops[0].get_module())
     output_ops = get_all_output_ops(conn_graph)
     self.assertEqual(1, len(output_ops))
     self.assertEqual(model.fc, output_ops[0].get_module())
Esempio n. 10
0
    def test_submodules_with_sequence_and_module_list(self):
        """ Test building ConnectedGraph on a model with sequence and module list """
        class ModuleListAndSequentialModel(torch.nn.Module):
            def __init__(self):
                super(ModuleListAndSequentialModel, self).__init__()
                self.mod_list = torch.nn.ModuleList([
                    torch.nn.Sequential(
                        test_models.BasicConv2d(kernel_size=3),
                        test_models.BasicConv2d(kernel_size=3)),
                    torch.nn.Sequential(
                        torch.nn.Sequential(
                            test_models.BasicConv2d(kernel_size=3),
                            test_models.BasicConv2d(kernel_size=3)), ),
                    torch.nn.ModuleList([
                        torch.nn.ModuleList(
                            [test_models.BasicConv2d(kernel_size=3)])
                    ]),
                    test_models.ModuleListModel()
                ])

            def forward(self, *inputs):
                s1 = self.mod_list[0](inputs[0])
                s2 = self.mod_list[1](inputs[0])
                m1 = self.mod_list[2][0][0](inputs[0])
                m2 = self.mod_list[3](inputs[1])
                return s1, s2, m1, m2

        inp_data_1 = torch.rand(1, 64, 8, 8)
        inp_data_2 = torch.rand(1, 3, 8, 8)
        conn_graph = ConnectedGraph(ModuleListAndSequentialModel(),
                                    (inp_data_1, inp_data_2))
        self.assertEqual(30, len(conn_graph.ordered_ops))
        self.assertEqual(
            0,
            len([
                op for op in conn_graph.get_all_ops().keys() if 'Tuple' in op
            ]))
Esempio n. 11
0
def get_ops_with_missing_modules(
        model: torch.nn.Module, model_input: Union[torch.Tensor,
                                                   Tuple]) -> List[str]:
    """
    Utility function to ensure that all connected graph ops of a certain type have associated modules
    :param model: Pytorch model to create connected graph from
    :param model_input: Example input to model.  Can be a single tensor or a list/tuple of input tensors
    :return: List of op names with missing modules
    """
    try:
        conn_graph = ConnectedGraph(model, model_input)
    except:
        logger.error(
            'A connected graph failed to be built. This may prevent from AIMET features from being able to '
            'run on the model. Please address the errors shown.')
        raise AssertionError

    missing_modules = []
    for op_name, op in conn_graph.get_all_ops().items():
        if not op.get_module(
        ) and op.type not in ConnectedGraph.functional_ops:
            missing_modules.append(op_name)

    return missing_modules
class GraphSearchUtils:
    """
    Code to search a model graph to find nodes to use for cross-layer-scaling and high-bias-fold
    """
    def __init__(self, model: torch.nn.Module,
                 input_shapes: Union[Tuple, List[Tuple]]):
        inp_tensor_list = tuple(
            utils.create_rand_tensors_given_shapes(input_shapes))
        self._connected_graph = ConnectedGraph(model, inp_tensor_list)
        self._ordered_module_list = utils.get_ordered_list_of_conv_modules(
            model, inp_tensor_list)

    @staticmethod
    def find_downstream_layer_groups_to_scale(op,
                                              layer_groups,
                                              current_group=None,
                                              visited_nodes=None):
        """
        Recursive function to find cls layer groups downstream from a given op
        :param op: Starting op to search from
        :param layer_groups: Running list of layer groups
        :param current_group: Running current layer group
        :param visited_nodes: Running list of visited nodes (to short-circuit recursion)
        :return: None
        """

        if not visited_nodes:
            visited_nodes = []
        if not current_group:
            current_group = []

        if op in visited_nodes:
            return
        visited_nodes.append(op)
        # print("Visiting node: {}".format(op.dotted_name))

        # If current node is Conv2D, add to the current group
        if op.model_module and isinstance(
                op.model_module.get_module(),
            (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
            current_group.append(op.model_module.get_module())

        # Terminating condition for current group
        if not op.model_module or not isinstance(
                op.model_module.get_module(),
            (torch.nn.Conv2d, torch.nn.ReLU, torch.nn.PReLU,
             torch.nn.ConvTranspose2d)):
            if (len(current_group) > 1) and (current_group
                                             not in layer_groups):
                layer_groups.append(current_group)
            current_group = []

        if op.output:
            for consumer in op.output.consumers:
                GraphSearchUtils.find_downstream_layer_groups_to_scale(
                    consumer, layer_groups, current_group, visited_nodes)

        # Reached a leaf.. See if the current group has something to grab
        if (len(current_group) > 1) and (current_group not in layer_groups):
            layer_groups.append(current_group)

    @staticmethod
    def convert_layer_group_to_cls_sets(layer_group):
        """
        Helper function to convert a layer group to a list of cls sets
        :param layer_group: Given layer group to conver
        :return: List of cls sets
        """
        cls_sets = []

        prev_layer_to_scale = layer_group.pop(0)
        while layer_group:
            next_layer_to_scale = layer_group.pop(0)

            if next_layer_to_scale.groups > 1:

                if layer_group:
                    next_non_depthwise_conv_layer = layer_group.pop(0)
                    cls_sets.append((prev_layer_to_scale, next_layer_to_scale,
                                     next_non_depthwise_conv_layer))
                    prev_layer_to_scale = next_non_depthwise_conv_layer

            else:
                cls_sets.append((prev_layer_to_scale, next_layer_to_scale))
                prev_layer_to_scale = next_layer_to_scale

        return cls_sets

    def find_layer_groups_to_scale(self) -> List[List[torch.nn.Conv2d]]:
        """
        :return: List of groups of layers. Each group can be independently equalized
        """

        # Find the input node(s) in the graph
        input_nodes = []
        for op in self._connected_graph.get_all_ops().values():
            if op.inputs and op.inputs[0].is_model_input:
                input_nodes.append(op)

        layer_groups = []
        for op in input_nodes:
            self.find_downstream_layer_groups_to_scale(op, layer_groups)

        # Sort the layer groups in order of occurrence in the model
        ordered_layer_groups = []
        for _, module in self._ordered_module_list:
            for layer_group in layer_groups:
                if layer_group[0] is module:
                    ordered_layer_groups.append(layer_group)

        return ordered_layer_groups

    @staticmethod
    def does_module_have_relu_activation(connected_graph: ConnectedGraph,
                                         module: torch.nn.Module) -> bool:
        """
        Finds if a given module has a ReLU activation
        :param connected_graph: Reference to ConnectedGraph instance
        :param module: PyTorch module to find activation for
        :return: True if module has a relu activation
        """

        for op in connected_graph.get_all_ops().values():

            if op.model_module and op.model_module.get_module() is module:
                assert len(op.output.consumers) == 1
                is_relu_activation = isinstance(
                    op.output.consumers[0].model_module.get_module(),
                    (torch.nn.ReLU, torch.nn.PReLU))
                return is_relu_activation

        return False

    def is_relu_activation_present_in_cls_sets(self, cls_sets: List[ClsSet]):
        """
        :param cls_sets: CLS sets to find relu activations in
        :return: List of groups of layers. Each group can be independently equalized
        """

        is_relu_activation_in_cls_sets = []
        for cls_set in cls_sets:

            # We need to check activation functions for all layers but the last one in the set
            # Because we are only interested in checking activation functions between the layers we will scale
            cls_set = cls_set[:-1]

            is_relu_activation_in_cls_set = ()
            for module in cls_set:
                is_relu_activation_in_cls_set += (
                    self.does_module_have_relu_activation(
                        self._connected_graph, module), )

            if len(is_relu_activation_in_cls_set) == 1:
                is_relu_activation_in_cls_set = is_relu_activation_in_cls_set[
                    0]

            is_relu_activation_in_cls_sets.append(
                is_relu_activation_in_cls_set)

        return is_relu_activation_in_cls_sets
Esempio n. 13
0
    def test_multi_output_with_shuffled_layers(self):
        """ Test a multiple layer multi-output model with intermediate Tuple Tensors shuffled """
        class MultiOutputShuffledModel(torch.nn.Module):
            """
            Model with Tuple of Tensors as output shuffled between layers
            """
            def __init__(self):
                super(MultiOutputShuffledModel, self).__init__()
                self.layer1 = test_models.ConfigurableTupleOutputModel(
                    channels=(1, 2, 3))
                self.layer2 = test_models.ConfigurableTupleOutputModel(
                    channels=(2, 3, 1))
                self.layer3 = test_models.ConfigurableTupleOutputModel(
                    channels=(3, 1, 2))

            def forward(self, *inputs):
                x1, x2, x3 = self.layer1(inputs[0], inputs[1], inputs[2])
                y2, y3, y1 = self.layer2(x2, x3, x1)
                z3, z1, z2 = self.layer3(y3, y1, y2)
                return torch.cat([z1, z2, z3, x1], 1)

        model = MultiOutputShuffledModel()
        inp_tensor_list = create_rand_tensors_given_shapes([(1, 1, 8, 8),
                                                            (1, 2, 8, 8),
                                                            (1, 3, 8, 8)])
        conn_graph = ConnectedGraph(model, inp_tensor_list)
        self.assertEqual(10, len(conn_graph.ordered_ops))
        self.assertEqual(
            9,
            len([
                op for op in conn_graph.get_all_ops().keys()
                if 'convolution' in op
            ]))
        self.assertEqual(
            0,
            len([
                op for op in conn_graph.get_all_ops().keys() if 'Tuple' in op
            ]))
        self.assertEqual('cat', conn_graph.ordered_ops[-1].type)

        product_names = conn_graph.get_all_products().keys()
        self.assertEqual(
            0,
            len([product for product in product_names if 'Tuple' in product]))

        expected_products = [
            # TODO fix order of products

            # layer #1 to layer #2
            'convolution_0__to__Split_0',
            'convolution_1_to_convolution_3',
            'convolution_2_to_convolution_4',

            # layer #2 to layer #3
            'convolution_3_to_convolution_8',
            'convolution_4_to_convolution_6',
            'convolution_5_to_convolution_7',

            # layer #3, layer#1.conv1 to cat
            'convolution_6_to_cat_9',
            'convolution_7_to_cat_9',
            'convolution_8_to_cat_9'
        ]

        products = conn_graph.get_all_products()
        for product_name in product_names:
            if product_name in expected_products:
                product = products[product_name]
                self.assertEqual(product.shape, product.producer.output_shape)
                expected_products.remove(product_name)
        self.assertEqual(0, len(expected_products))
        split_product = conn_graph.get_all_products(
        )['Split_0__to__multiple_ops']
        self.assertTrue(conn_graph.get_all_ops()['convolution_5'] in
                        split_product.consumers)
        self.assertTrue(
            conn_graph.get_all_ops()['cat_9'] in split_product.consumers)