예제 #1
0
    def test_make_parallel(self, output_spec):
        batch_size = 128
        input_spec = TensorSpec((1, 10, 10), torch.float32)

        conv_layer_params = ((2, 3, 2), (5, 3, 1))
        fc_layer_params = (256, 256)
        network = EncodingNetwork(input_tensor_spec=input_spec,
                                  output_tensor_spec=output_spec,
                                  conv_layer_params=conv_layer_params,
                                  fc_layer_params=fc_layer_params,
                                  activation=torch.relu_,
                                  last_layer_size=1,
                                  last_activation=math_ops.identity,
                                  name='base_encoding_network')
        replicas = 2
        num_layers = len(conv_layer_params) + len(fc_layer_params) + 1

        def _benchmark(pnet, name):
            t0 = time.time()
            outputs = []
            for _ in range(1000):
                embedding = input_spec.randn(outer_dims=(batch_size, ))
                output, _ = pnet(embedding)
                outputs.append(output)
            o = math_ops.add_n(outputs).sum()
            logging.info("%s time=%s %s" % (name, time.time() - t0, float(o)))

            if output_spec is None:
                self.assertEqual(output.shape, (batch_size, replicas, 1))
                self.assertEqual(pnet.output_spec.shape, (replicas, 1))
            else:
                self.assertEqual(output.shape,
                                 (batch_size, replicas, *output_spec.shape))
                self.assertEqual(pnet.output_spec.shape,
                                 (replicas, *output_spec.shape))

        pnet = network.make_parallel(replicas)
        self.assertTrue(isinstance(pnet, ParallelEncodingNetwork))
        self.assertEqual(len(list(pnet.parameters())), num_layers * 2)
        _benchmark(pnet, "ParallelEncodingNetwork")
        self.assertEqual(pnet.name, "parallel_" + network.name)

        pnet = alf.networks.network.NaiveParallelNetwork(network, replicas)
        _benchmark(pnet, "NaiveParallelNetwork")

        # test on default network name
        self.assertEqual(pnet.name, "naive_parallel_" + network.name)

        # test on user-defined network name
        pnet = alf.networks.network.NaiveParallelNetwork(network,
                                                         replicas,
                                                         name="pnet")
        self.assertEqual(pnet.name, "pnet")
예제 #2
0
    def test_encoding_network_img(self):
        input_spec = TensorSpec((3, 80, 80), torch.float32)
        img = input_spec.zeros(outer_dims=(1, ))
        network = EncodingNetwork(input_tensor_spec=input_spec,
                                  conv_layer_params=((16, (5, 3), 2, (1, 1)),
                                                     (15, 3, (2, 2), 0)))

        self.assertLen(list(network.parameters()), 4)

        output, _ = network(img)
        output_spec = network._img_encoding_net.output_spec
        self.assertEqual(output.shape[-1], np.prod(output_spec.shape))
예제 #3
0
 def test_encoding_network_input_preprocessor(self):
     input_spec = TensorSpec((1, ))
     inputs = common.zero_tensor_from_nested_spec(input_spec, batch_size=1)
     network = EncodingNetwork(input_tensor_spec=input_spec,
                               input_preprocessors=torch.tanh)
     output, _ = network(inputs)
     self.assertEqual(output.size()[1], 1)
예제 #4
0
    def test_encoding_network_nonimg(self, last_layer_size, last_activation,
                                     output_tensor_spec):
        input_spec = TensorSpec((100, ), torch.float32)
        embedding = input_spec.zeros(outer_dims=(1, ))

        if (last_layer_size is None and last_activation is not None) or (
                last_activation is None and last_layer_size is not None):
            with self.assertRaises(AssertionError):
                network = EncodingNetwork(
                    input_tensor_spec=input_spec,
                    output_tensor_spec=output_tensor_spec,
                    fc_layer_params=(30, 40, 50),
                    activation=torch.tanh,
                    last_layer_size=last_layer_size,
                    last_activation=last_activation)
        else:
            network = EncodingNetwork(input_tensor_spec=input_spec,
                                      output_tensor_spec=output_tensor_spec,
                                      fc_layer_params=(30, 40, 50),
                                      activation=torch.tanh,
                                      last_layer_size=last_layer_size,
                                      last_activation=last_activation)

            num_layers = 3 if last_layer_size is None else 4
            self.assertLen(list(network.parameters()), num_layers * 2)

            if last_activation is None:
                self.assertEqual(network._fc_layers[-1]._activation,
                                 torch.tanh)
            else:
                self.assertEqual(network._fc_layers[-1]._activation,
                                 last_activation)

            output, _ = network(embedding)

            if output_tensor_spec is None:
                if last_layer_size is None:
                    self.assertEqual(output.size()[1], 50)
                else:
                    self.assertEqual(output.size()[1], last_layer_size)
                self.assertEqual(network.output_spec.shape,
                                 tuple(output.size()[1:]))
            else:
                self.assertEqual(tuple(output.size()[1:]),
                                 output_tensor_spec.shape)
                self.assertEqual(network.output_spec.shape,
                                 output_tensor_spec.shape)
예제 #5
0
    def test_encoding_network_side_effects(self):
        input_spec = TensorSpec((100, ), torch.float32)

        fc_layer_params_list = [20, 10]
        self.assertRaises(
            AssertionError,
            EncodingNetwork,
            input_tensor_spec=input_spec,
            fc_layer_params=fc_layer_params_list,
        )

        fc_layer_params = (20, 10)
        enc_net = EncodingNetwork(input_tensor_spec=input_spec,
                                  fc_layer_params=fc_layer_params,
                                  last_layer_size=3,
                                  last_activation=torch.relu)

        self.assertTrue((fc_layer_params == (20, 10)))

        target_net = enc_net.copy()
        self.assertTrue(
            len(list(target_net.parameters())) == \
                len(list(enc_net.parameters())))
예제 #6
0
    def test_parallel_network_output_size(self, replicas):
        batch_size = 128
        input_spec = TensorSpec((100, ), torch.float32)

        # a dummy encoding network which ouputs the input
        network = EncodingNetwork(input_tensor_spec=input_spec)

        pnet = network.make_parallel(replicas)
        nnet = alf.networks.network.NaiveParallelNetwork(network, replicas)

        def _check_output_size(embedding):
            p_output, _ = pnet(embedding)
            n_output, _ = nnet(embedding)
            self.assertTrue(p_output.shape == n_output.shape)
            self.assertTrue(p_output.shape[1:] == pnet._output_spec.shape)

        # the case with shared inputs
        embedding = input_spec.randn(outer_dims=(batch_size, ))
        _check_output_size(embedding)

        # the case with non-shared inputs
        embedding = input_spec.randn(outer_dims=(batch_size, replicas))
        _check_output_size(embedding)
예제 #7
0
    def test_make_parallel_warning_on_using_naive_parallel(self):
        input_spec = TensorSpec((256, ))
        fc_layer_params = (32, 32)

        pre_encoding_net = EncodingNetwork(input_tensor_spec=input_spec,
                                           fc_layer_params=fc_layer_params)

        network = EncodingNetwork(input_tensor_spec=input_spec,
                                  fc_layer_params=fc_layer_params,
                                  input_preprocessors=pre_encoding_net)

        replicas = 2

        # Create a parallel network via the ``make_parallel()`` interface.
        # As now ``input_preprocessors`` is not supported in
        # ``ParallelEncodingNetwork``, ``make_parallel()`` will return an
        # instance of the ``NaiveParallelNetwork``
        expected_warning_message = ("``NaiveParallelNetwork`` is used by "
                                    "``make_parallel()`` !")
        with self.assertLogs() as ctx:
            pnet = network.make_parallel(replicas)
            warning_message = ctx.records[0]
            assert expected_warning_message in str(warning_message)
예제 #8
0
    def test_encoding_network_preprocessing_combiner(self):
        input_spec = dict(a=TensorSpec((3, 80, 80)),
                          b=[TensorSpec((80, 80)),
                             TensorSpec(())])
        imgs = common.zero_tensor_from_nested_spec(input_spec, batch_size=1)
        network = EncodingNetwork(input_tensor_spec=input_spec,
                                  preprocessing_combiner=NestSum(average=True),
                                  conv_layer_params=((1, 2, 2, 0), ))

        self.assertEqual(network._processed_input_tensor_spec,
                         TensorSpec((3, 80, 80)))

        output, _ = network(imgs)
        self.assertTensorEqual(output, torch.zeros((40 * 40, )))
예제 #9
0
 def test_empty_layers(self):
     input_spec = TensorSpec((3, ), torch.float32)
     network = EncodingNetwork(input_spec)
     self.assertEmpty(list(network.parameters()))
     self.assertEmpty(network._fc_layers)
     self.assertTrue(network._img_encoding_net is None)