Ejemplo n.º 1
0
def conv1d_conversion(config, name, verbose, nonlinear_mxts_mode,
                      conv_mxts_mode, **kwargs):
    validate_keys(config, [
        KerasKeys.weights, KerasKeys.activation, KerasKeys.filters,
        KerasKeys.kernel_size, KerasKeys.padding, KerasKeys.strides
    ])
    #nonlinear_mxts_mode only used for activation
    converted_activation = activation_conversion(
        config=config,
        name=name,
        verbose=verbose,
        nonlinear_mxts_mode=nonlinear_mxts_mode)
    to_return = [
        layers.Conv1D(
            name=("preact_" if len(converted_activation) > 0 else "") + name,
            kernel=config[KerasKeys.weights][0],
            bias=(config[KerasKeys.weights][1]
                  if len(config[KerasKeys.weights]) > 1 else np.zeros(
                      config[KerasKeys.weights][0].shape[-1])),
            stride=config[KerasKeys.strides],
            padding=config[KerasKeys.padding].upper(),
            conv_mxts_mode=conv_mxts_mode)
    ]
    to_return.extend(converted_activation)
    return deeplift.util.connect_list_of_layers(to_return)
Ejemplo n.º 2
0
 def test_fprop(self): 
     conv_layer = layers.Conv1D(kernel=self.conv_W, bias=self.conv_b,
                               stride=1,
                               padding=PaddingMode.valid,
                               conv_mxts_mode="Linear")
     self.create_small_net_with_conv_layer(conv_layer,
                                           outputs_per_channel=3)
     func = compile_func([self.input_layer.get_activation_vars()],
                             self.conv_layer.get_activation_vars())
    #input:
    #      [[[-8,-7,-6,-5],
    #        [-4,-3,-2,-1]],
    #       [[ 0, 1, 2, 3],
    #        [ 4, 5, 6, 7]]]
    # W:
    # [-2,-1
    #   0, 1]
    # 16+7+0+-3 = 20 - bias (1.0) = 19
    # 0+-1+0+5 = 4 - bias (1.0) = 3
     np.testing.assert_almost_equal(func(self.inp),
                            np.array(
                            [[[ 19, 17, 15],
                              [-19,-17,-15]],
                             [[ 3, 1,-1],
                              [-3,-1, 1]]]).transpose(0,2,1))
 def test_relu_after_conv1d(self): 
     input_layer = layers.Input(batch_shape=(None,2,2))
     conv_layer = layers.Conv1D(
                     kernel=np.random.random((2,2,2)).astype("float32"),
                     bias=np.random.random((2,)).astype("float32"),
                     conv_mxts_mode=ConvMxtsMode.Linear,
                     stride=1,
                     padding=PaddingMode.valid)
     conv_layer.set_inputs(input_layer)
     relu_after_conv = layers.ReLU(nonlinear_mxts_mode=
                                 NonlinearMxtsMode.DeepLIFT_GenomicsDefault) 
     relu_after_conv.set_inputs(conv_layer)
     relu_after_conv.build_fwd_pass_vars()
     self.assertEqual(relu_after_conv.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)
Ejemplo n.º 4
0
 def test_dense_backprop_stride(self):
     conv_layer = layers.Conv1D(kernel=self.conv_W, bias=self.conv_b,
                                stride=2,
                                padding=PaddingMode.valid,
                                conv_mxts_mode="Linear")
     self.create_small_net_with_conv_layer(conv_layer,
                                           outputs_per_channel=2)
     self.dense_layer.update_task_index(task_index=0)
     func = compile_func([self.input_layer.get_activation_vars(),
                          self.input_layer.get_reference_vars()],
                         self.input_layer.get_mxts())
     np.testing.assert_almost_equal(
         func([self.inp, np.zeros_like(self.inp)]),
         np.array(
          [[[ -4,  -2,  -4, -2],
            [  0,   2,   0,  2]],
           [[ -4,  -2,  -4, -2],
            [  0,   2,   0,  2]]]).transpose(0,2,1))
Ejemplo n.º 5
0
 def test_fprop_pos_and_neg_contribs(self): 
     conv_layer = layers.Conv1D(kernel=self.conv_W, bias=self.conv_b,
                               stride=1,
                               padding=PaddingMode.valid,
                               conv_mxts_mode="Linear")
     self.create_small_net_with_conv_layer(conv_layer,
                                           outputs_per_channel=3)
     pos_contribs, neg_contribs = self.conv_layer.get_pos_and_neg_contribs() 
     func_pos = compile_func([self.input_layer.get_activation_vars(),
                              self.input_layer.get_reference_vars()],
                          pos_contribs)
     func_neg = compile_func([self.input_layer.get_activation_vars(),
                              self.input_layer.get_reference_vars()],
                          neg_contribs)
    #diff from ref:
    #      [[[-9,-8,-7,-6],
    #        [-5,-4,-3,-2]],
    #       [[-1, 0, 1, 2],
    #        [ 3, 4, 5, 6]]]
    # W:
    # [-2,-1
    #   0, 1]
    # 18+8 = 26, -4 = -4
    # 0+-1+0+5 = 4 - bias (1.0) = 3
     np.testing.assert_almost_equal(func_pos([self.inp,
                                              np.ones_like(self.inp)]),
                            np.array(
                            [[[ 26, 23, 20],
                              [  4,  3,  2]],
                             [[  6,  5,  6],
                              [  0,  1,  4]]]).transpose(0,2,1))
     np.testing.assert_almost_equal(func_neg([self.inp,
                                              np.ones_like(self.inp)]),
                            np.array(
                            [[[ -4, -3, -2],
                              [-26,-23,-20]],
                             [[  0, -1, -4],
                              [ -6, -5, -6]]]).transpose(0,2,1))
 def test_relu_after_conv1d_batchnorm(self): 
     input_layer = layers.Input(batch_shape=(None,2,2))
     conv_layer = layers.Conv1D(
                     kernel=np.random.random((2,2,2)).astype("float32"),
                     bias=np.random.random((2,)).astype("float32"),
                     conv_mxts_mode=ConvMxtsMode.Linear,
                     stride=1,
                     padding=PaddingMode.valid)
     conv_layer.set_inputs(input_layer)
     batch_norm = layers.BatchNormalization(
                     gamma=np.array([1.0, 1.0]).astype("float32"),
                     beta=np.array([-0.5, 0.5]).astype("float32"),
                     axis=-1,
                     mean=np.array([-0.5, 0.5]).astype("float32"),
                     var=np.array([1.0, 1.0]).astype("float32"),
                     epsilon=0.001)
     batch_norm.set_inputs(conv_layer)
     relu_after_bn = layers.ReLU(nonlinear_mxts_mode=
                                 NonlinearMxtsMode.DeepLIFT_GenomicsDefault) 
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)