예제 #1
0
    def test_fprop(self): 

        conv_layer = layers.Conv2D(kernel=self.conv_W, bias=self.conv_b,
                                   strides=(1,1),
                                   padding=PaddingMode.valid,
                                   data_format="channels_last",
                                   conv_mxts_mode="Linear")
        self.create_small_net_with_conv_layer(conv_layer,
                                              outputs_per_channel=9)

        func = compile_func([self.input_layer.get_activation_vars()],
                                self.conv_layer.get_activation_vars())
        np.testing.assert_almost_equal(func(self.inp),
                               np.array(
                               [[[[439, 467, 495],
                                  [551, 579, 607],
                                  [663, 691, 719]],
                                 [[-439, -467, -495],
                                  [-551, -579, -607],
                                  [-663, -691, -719]],],
                               [[[1335, 1363, 1391],
                                 [1447, 1475, 1503],
                                 [1559, 1587, 1615],],
                                [[-1335, -1363, -1391],
                                 [-1447, -1475, -1503],
                                 [-1559, -1587, -1615]]]]).transpose(0,2,3,1))
예제 #2
0
 def test_relu_after_conv2d_batchnorm(self):
     input_layer = layers.Input(batch_shape=(None, 2, 2, 2))
     conv_layer = layers.Conv2D(kernel=np.random.random(
         (2, 2, 2, 2)).astype("float32"),
                                bias=np.random.random(
                                    (2, )).astype("float32"),
                                conv_mxts_mode=ConvMxtsMode.Linear,
                                strides=(1, 1),
                                padding=PaddingMode.valid,
                                data_format="channels_last")
     conv_layer.set_inputs(input_layer)
     batch_norm = layers.BatchNormalization(
         gamma=np.array([1.0, 1.0]).astype("float32"),
         beta=np.array([-0.5, 0.5]).astype("float32"),
         axis=-1,
         mean=np.array([-0.5, 0.5]).astype("float32"),
         var=np.array([1.0, 1.0]).astype("float32"),
         epsilon=0.001)
     batch_norm.set_inputs(conv_layer)
     relu_after_bn = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)
예제 #3
0
    def test_dense_backprop(self):
        conv_layer = layers.Conv2D(kernel=self.conv_W,
                                   bias=self.conv_b,
                                   strides=(1, 1),
                                   padding=PaddingMode.valid,
                                   data_format="channels_last",
                                   conv_mxts_mode="Linear")
        self.create_small_net_with_conv_layer(conv_layer,
                                              outputs_per_channel=9)

        self.dense_layer.update_task_index(task_index=0)
        func = compile_func([
            self.input_layer.get_activation_vars(),
            self.input_layer.get_reference_vars()
        ], self.input_layer.get_mxts())
        np.testing.assert_almost_equal(
            func([self.inp, np.zeros_like(self.inp)]),
            np.array([[[[0, 2, 2, 2], [4, 12, 12, 8], [4, 12, 12, 8],
                        [4, 10, 10, 6]],
                       [[8, 18, 18, 10], [20, 44, 44, 24], [20, 44, 44, 24],
                        [12, 26, 26, 14]]],
                      [[[0, 2, 2, 2], [4, 12, 12, 8], [4, 12, 12, 8],
                        [4, 10, 10, 6]],
                       [[8, 18, 18, 10], [20, 44, 44, 24], [20, 44, 44, 24],
                        [12, 26, 26, 14]]]]).transpose(0, 2, 3, 1))
예제 #4
0
 def test_relu_after_conv2d(self):
     input_layer = layers.Input(batch_shape=(None, 2, 2, 2))
     conv_layer = layers.Conv2D(kernel=np.random.random(
         (2, 2, 2, 2)).astype("float32"),
                                bias=np.random.random(
                                    (2, )).astype("float32"),
                                conv_mxts_mode=ConvMxtsMode.Linear,
                                strides=(1, 1),
                                padding=PaddingMode.valid,
                                data_format="channels_last")
     conv_layer.set_inputs(input_layer)
     relu_after_conv = layers.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_conv.set_inputs(conv_layer)
     relu_after_conv.build_fwd_pass_vars()
     self.assertEqual(relu_after_conv.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)