Beispiel #1
0
 def prepare_batch_norm_deeplift_model(self, axis):
     self.input_layer = blobs.Input(num_dims=None, shape=(None,2,2,2))
     if (self.keras_version <= 0.3):
         std = self.std
         epsilon = self.epsilon
     else:
         std = np.sqrt(self.std+self.epsilon)
         epsilon = 0
     self.batch_norm_layer = blobs.BatchNormalization(
                              gamma=self.gamma,
                              beta=self.beta,
                              axis=axis,
                              mean=self.mean,
                              std=std,
                              epsilon=epsilon)
     self.batch_norm_layer.set_inputs(self.input_layer)
     self.flatten_layer = blobs.Flatten()
     self.flatten_layer.set_inputs(self.batch_norm_layer)
     self.dense_layer = blobs.Dense(
                         W=np.ones((1,8)).T,
                         b=np.zeros(1),
                         dense_mxts_mode=DenseMxtsMode.Linear)
     self.dense_layer.set_inputs(self.flatten_layer)
     self.dense_layer.build_fwd_pass_vars()
     self.dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
     self.dense_layer.set_active()
     self.dense_layer.update_task_index(0)
     self.input_layer.update_mxts()
Beispiel #2
0
def batchnorm_conversion(layer, name, mxts_mode):
    return [
        blobs.BatchNormalization(name=name,
                                 gamma=np.array(layer.gamma.get_value()),
                                 beta=np.array(layer.beta.get_value()),
                                 axis=layer.axis,
                                 mean=np.array(layer.running_mean.get_value()),
                                 std=np.array(layer.running_std.get_value()),
                                 epsilon=layer.epsilon,
                                 input_shape=layer.input_shape[1:])
    ]
def batchnorm_conversion(layer, name, verbose, **kwargs):
    #note: the variable called "running_std" actually stores
    #the variance...
    gamma, beta, running_mean, running_var = layer.get_weights()
    return [
        blobs.BatchNormalization(name=name,
                                 verbose=verbose,
                                 gamma=np.array(gamma),
                                 beta=np.array(beta),
                                 axis=layer.axis,
                                 mean=np.array(running_mean),
                                 var=running_var,
                                 epsilon=layer.epsilon)
    ]
Beispiel #4
0
    def setUp(self):
        self.input_layer = blobs.Input(num_dims=None, shape=(None, 4, 2))
        self.mean = np.array([1, -1])
        self.gamma = np.array([2, -2])
        self.beta = np.array([1, -1])
        self.std = np.array([1.99, 1.99])
        self.epsilon = 0.01
        self.batch_norm_layer = blobs.BatchNormalization(axis=-1,
                                                         gamma=self.gamma,
                                                         beta=self.beta,
                                                         mean=self.mean,
                                                         std=self.std,
                                                         epsilon=self.epsilon)

        self.batch_norm_layer.set_inputs(self.input_layer)
        self.batch_norm_layer.build_fwd_pass_vars()
        self.inp = (np.arange(16).reshape((2, 4, 2)).astype("float32")) - 8.0
        self.ref = np.zeros_like(self.inp) + 1.0
Beispiel #5
0
 def test_relu_after_dense_batchnorm(self):
     input_layer = blobs.Input(num_dims=None, shape=(None, 4))
     dense_layer = blobs.Dense(W=np.random.random((4, 2)),
                               b=np.random.random((2, )),
                               dense_mxts_mode=DenseMxtsMode.Linear)
     dense_layer.set_inputs(input_layer)
     batch_norm = blobs.BatchNormalization(gamma=np.array([1.0, 1.0]),
                                           beta=np.array([-0.5, 0.5]),
                                           axis=-1,
                                           mean=np.array([-0.5, 0.5]),
                                           std=np.array([1.0, 1.0]),
                                           epsilon=0.001)
     batch_norm.set_inputs(dense_layer)
     relu_after_bn = blobs.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.RevealCancel)
Beispiel #6
0
def batchnorm_conversion(layer, name, verbose, **kwargs):
    import keras
    if (hasattr(keras,'__version__')):
        keras_version = float(keras.__version__[0:3])
    else:
        keras_version = 0.2
    if (keras_version <= 0.3):
        std = np.array(layer.running_std.get_value())
        epsilon = layer.epsilon
    else:
        std = np.sqrt(np.array(layer.running_std.get_value()+layer.epsilon))
        epsilon = 0
    return [blobs.BatchNormalization(
            name=name,
            verbose=verbose,
            gamma=np.array(layer.gamma.get_value()),
            beta=np.array(layer.beta.get_value()),
            axis=layer.axis,
            mean=np.array(layer.running_mean.get_value()),
            std=std,
            epsilon=epsilon)] 
Beispiel #7
0
 def test_relu_after_conv1d_batchnorm(self):
     input_layer = blobs.Input(num_dims=None, shape=(None, 2, 2))
     conv_layer = blobs.Conv1D(W=np.random.random((2, 2, 2)),
                               b=np.random.random((2, )),
                               conv_mxts_mode=ConvMxtsMode.Linear,
                               stride=1,
                               border_mode=PaddingMode.valid,
                               channels_come_last=True)
     conv_layer.set_inputs(input_layer)
     batch_norm = blobs.BatchNormalization(gamma=np.array([1.0, 1.0]),
                                           beta=np.array([-0.5, 0.5]),
                                           axis=-1,
                                           mean=np.array([-0.5, 0.5]),
                                           std=np.array([1.0, 1.0]),
                                           epsilon=0.001)
     batch_norm.set_inputs(conv_layer)
     relu_after_bn = blobs.ReLU(
         nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault)
     relu_after_bn.set_inputs(batch_norm)
     relu_after_bn.build_fwd_pass_vars()
     self.assertEqual(relu_after_bn.nonlinear_mxts_mode,
                      NonlinearMxtsMode.Rescale)