Esempio n. 1
0
 def test_infer_max_size(self):
     # The largest intermediate tensor comes immediately after the second conv
     # and has size 163840 = 5 * 32 * 32 * 32 * 2
     self.assertRaisesRegex(RuntimeError,
                            ".*max_size.*",
                            shape.ShapeProperty().infer,
                            self.subgraph_model,
                            max_size=327680 - 1)
     shape.ShapeProperty().infer(self.subgraph_model, max_size=327680)
Esempio n. 2
0
    def test_rewire(self):
        subgraph_spec = [
            SubgraphNode(op=new_op(op_name="conv_layer1/conv/1",
                                   op_type=OpType.CONV,
                                   op_kwargs={
                                       "features": 64,
                                       "kernel_size": [1, 1]
                                   },
                                   input_names=["conv_layer0/avg_pool"]), ),
            SubgraphNode(op=new_op(op_name="conv_layer1/gelu/1",
                                   op_type=OpType.GELU,
                                   input_names=["conv_layer1/conv/1"]),
                         output_names=["conv_layer1/relu"])
        ]

        graph = replace_subgraph(self.graph, subgraph_spec)
        state = Model(graph,
                      self.constants).init(random.PRNGKey(0),
                                           {"input": jnp.ones((5, 32, 32, 3))})
        subgraph_model = SubgraphModel(graph, self.constants, state,
                                       {"input": jnp.ones(
                                           (5, 32, 32, 3))}, subgraph_spec)
        sp = shape.ShapeProperty().infer(subgraph_model)

        self.assertLen(sp.input_shapes, 1)
        self.assertIn("conv_layer0/avg_pool:0", sp.input_shapes)
        self.assertLen(sp.output_shapes, 2)
        self.assertIn("conv_layer1/gelu/1:0", sp.output_shapes)
        self.assertIn("conv_layer1/relu:0", sp.output_shapes)
Esempio n. 3
0
 def test_abstract_sequential_synthesizer_output_features(self):
   graph, constants, _ = cnn.CifarNet()
   subgraph_spec = [
       SubgraphNode(
           op=new_op(
               op_name="conv_layer1/conv",
               op_type=OpType.CONV,
               op_kwargs={
                   "features": "S:-1*2",
                   "kernel_size": [1, 1]
               },
               input_names=["conv_layer0/avg_pool"]),),
       SubgraphNode(
           op=new_op(
               op_name="conv_layer1/relu",
               op_type=OpType.RELU,
               input_names=["conv_layer1/conv"]),
           output_names=["conv_layer1/relu"])
   ]
   subgraph = replace_subgraph(graph, subgraph_spec)
   subgraph_model = SubgraphModel(subgraph, constants, None,
                                  {"input": jnp.zeros((5, 32, 32, 10))},
                                  subgraph_spec)
   sp = shape.ShapeProperty().infer(subgraph_model)
   syn = TestSequentialSynthesizer([(subgraph_model, [sp])], 0)
   self.assertEqual(syn.output_features_mul, 2)
   self.assertEqual(syn.output_features_div, 1)
Esempio n. 4
0
 def test_synthesizer_easy_one(self):
     """Replacing [conv3x3(features = 64)]."""
     subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:5]]
     subg[-1].output_names = self.graph.ops[5].input_names
     subgraph_model = SubgraphModel(self.graph, self.constants, self.state,
                                    self.input, subg)
     sp = shape.ShapeProperty().infer(subgraph_model,
                                      max_size=self.max_size)
     dp = depth.DepthProperty().infer(subgraph_model)
     self._synthesize(subgraph_model, [sp, dp])
Esempio n. 5
0
 def test_synthesizer_two(self):
     """Replacing [conv3x3(features = 64), ReLU, avgpool2x2(strides=2x2)]."""
     subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:7]]
     subg[-1].output_names = self.graph.ops[7].input_names
     subgraph_model = SubgraphModel(self.graph, self.constants, self.state,
                                    self.input, subg)
     sp = shape.ShapeProperty().infer(subgraph_model,
                                      max_size=self.max_size)
     lp = linear.LinopProperty().infer(subgraph_model)
     self._synthesize(subgraph_model, [sp, lp])
Esempio n. 6
0
    def test_infer_intermediates(self):
        sp = shape.ShapeProperty().infer(self.subgraph_model,
                                         intermediates=True)
        self.assertLen(sp.input_shapes, 1)
        self.assertIn("input", sp.input_shapes)
        self.assertEqual(sp.input_shapes["input"], (5, 32, 32, 3))

        self.assertLen(sp.output_shapes,
                       len(self.subgraph_model.subg_model.graph.ops))
        self.assertIn("fc/logits", sp.output_shapes)
        self.assertEqual(sp.output_shapes["fc/logits"], (5, 10))
Esempio n. 7
0
    def setUp(self):
        super().setUp()

        self.graph, self.constants, _ = cnn.CifarNet()
        state = Model(self.graph,
                      self.constants).init(random.PRNGKey(0),
                                           {"input": jnp.ones((5, 32, 32, 3))})
        self.subgraph_model = SubgraphModel(
            self.graph, self.constants, state,
            {"input": jnp.ones((5, 32, 32, 3))})
        self.sp = shape.ShapeProperty().infer(self.subgraph_model)
 def test_synthesizer_hard(self):
     if not self.hard:
         return
     subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:7]]
     subg[-1].output_names = self.graph.ops[7].input_names
     subgraph_model = SubgraphModel(self.graph, self.constants, self.state,
                                    self.input, subg)
     sp = shape.ShapeProperty().infer(subgraph_model,
                                      max_size=self.max_size)
     dp = depth.DepthProperty().infer(subgraph_model)
     lp = linear.LinopProperty().infer(subgraph_model)
     self._synthesize(subgraph_model, [sp, dp, lp])
    def test_synthesizer_easy_one(self):
        """Replacing [conv3x3(features = 64)].

    Because we do not test linear, this is replaced by dense3x3(features = 64)
    due to the enumeration order.
    """
        subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:5]]
        subg[-1].output_names = self.graph.ops[5].input_names
        subgraph_model = SubgraphModel(self.graph, self.constants, self.state,
                                       self.input, subg)
        sp = shape.ShapeProperty().infer(subgraph_model,
                                         max_size=self.max_size)
        dp = depth.DepthProperty().infer(subgraph_model)
        self._synthesize(subgraph_model, [sp, dp])
    def test_synthesizer_two(self):
        """Replacing [conv3x3(features = 64), ReLU, avgpool2x2(strides=2x2)].

    Because we do not check for the depth property, [dense(features = 64),
    avgpool2x2(strides=2x2)] works as well (which is what is synthesized due to
    the enumeration order).
    """
        subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:7]]
        subg[-1].output_names = self.graph.ops[7].input_names
        subgraph_model = SubgraphModel(self.graph, self.constants, self.state,
                                       self.input, subg)
        sp = shape.ShapeProperty().infer(subgraph_model,
                                         max_size=self.max_size)
        lp = linear.LinopProperty().infer(subgraph_model)
        self._synthesize(subgraph_model, [sp, lp])
    def test_synthesizer_easy_two(self):
        """Replacing [conv3x3(features = 64)].

    Because we test all three props, this is replaced by conv3x3(features = 64)
    (i.e., an identical op) due to the enumeration order.
    """
        subg = [subgraph.SubgraphNode(op=o) for o in self.graph.ops[4:5]]
        subg[-1].output_names = self.graph.ops[5].input_names
        subgraph_model = SubgraphModel(self.graph, self.constants, self.state,
                                       self.input, subg)
        sp = shape.ShapeProperty().infer(subgraph_model,
                                         max_size=self.max_size)
        dp = depth.DepthProperty().infer(subgraph_model)
        lp = linear.LinopProperty().infer(subgraph_model)
        self._synthesize(subgraph_model, [sp, dp, lp])
Esempio n. 12
0
    def test_synthesizer_resnet_big(self):
        self.graph, self.constants, _ = resnetv1.ResNet18(
            num_classes=10, input_resolution="small")
        self.m = Model(self.graph, self.constants)
        self.input = {"input": jnp.ones((5, 32, 32, 3))}
        self.state = self.m.init(random.PRNGKey(0), self.input)
        self.out = self.m.apply(self.state,
                                self.input)[self.graph.output_names[0]]
        self.max_size = int(10e8)

        subg_ops = self.graph.ops[3:5] + self.graph.ops[8:12]
        subg = [subgraph.SubgraphNode(op=o) for o in subg_ops]
        subg[-1].output_names = [f"{subg[-1].op.name}:0"]
        subgraph_model = SubgraphModel(self.graph, self.constants, self.state,
                                       self.input, subg)
        sp = shape.ShapeProperty().infer(subgraph_model,
                                         max_size=self.max_size)
        lp = linear.LinopProperty().infer(subgraph_model)
        self._synthesize(subgraph_model, [sp, lp])