Ejemplo n.º 1
0
 def test_fprop_pos_and_neg_contribs(self):
     pos_mxts, neg_mxts = self.batch_norm_layer.get_pos_and_neg_contribs()
     func_pos = compile_func([
         self.input_layer.get_activation_vars(),
         self.input_layer.get_reference_vars()
     ], pos_mxts)
     func_neg = compile_func([
         self.input_layer.get_activation_vars(),
         self.input_layer.get_reference_vars()
     ], neg_mxts)
     diff_from_ref = self.inp - self.ref
     print(diff_from_ref)
     pos_answer = (((diff_from_ref*(diff_from_ref>0.0))\
                   *(self.gamma[None,None,:]*(self.gamma>0.0))/
                     (self.std+self.epsilon))
                  +((diff_from_ref*(diff_from_ref<0.0))\
                   *(self.gamma[None,None,:]*(self.gamma<0.0))/
                     (self.std+self.epsilon)))
     neg_answer = (((diff_from_ref*(diff_from_ref<0.0))\
                   *(self.gamma[None,None,:]*(self.gamma>0.0))/
                     (self.std+self.epsilon))
                  +((diff_from_ref*(diff_from_ref>0.0))\
                   *(self.gamma[None,None,:]*(self.gamma<0.0))/
                     (self.std+self.epsilon)))
     np.testing.assert_almost_equal(func_pos(self.inp, self.ref),
                                    pos_answer)
     np.testing.assert_almost_equal(func_neg(self.inp, self.ref),
                                    neg_answer)
Ejemplo n.º 2
0
 def test_fprop_pos_and_neg_contribs(self):
     conv_layer = blobs.Conv1D(W=self.conv_W,
                               b=self.conv_b,
                               stride=1,
                               border_mode=PaddingMode.valid,
                               channels_come_last=False,
                               conv_mxts_mode=ConvMxtsMode.Linear)
     self.create_small_net_with_conv_layer(conv_layer,
                                           outputs_per_channel=3)
     pos_contribs, neg_contribs = self.conv_layer.get_pos_and_neg_contribs()
     func_pos = compile_func([
         self.input_layer.get_activation_vars(),
         self.input_layer.get_reference_vars()
     ], pos_contribs)
     func_neg = compile_func([
         self.input_layer.get_activation_vars(),
         self.input_layer.get_reference_vars()
     ], neg_contribs)
     #diff from ref:
     #      [[[-9,-8,-7,-6],
     #        [-5,-4,-3,-2]],
     #       [[-1, 0, 1, 2],
     #        [ 3, 4, 5, 6]]]
     # W:
     # [-2,-1
     #   0, 1]
     # 18+8 = 26, -4 = -4
     # 0+-1+0+5 = 4 - bias (1.0) = 3
     np.testing.assert_almost_equal(
         func_pos(self.inp, np.ones_like(self.inp)),
         np.array([[[26, 23, 20], [4, 3, 2]], [[6, 5, 6], [0, 1, 4]]]))
     np.testing.assert_almost_equal(
         func_neg(self.inp, np.ones_like(self.inp)),
         np.array([[[-4, -3, -2], [-26, -23, -20]],
                   [[0, -1, -4], [-6, -5, -6]]]))
Ejemplo n.º 3
0
    def set_up_prediction_func_and_deeplift_func(self, out_layer):

        out_layer.set_inputs(self.dense_layer)
        out_layer.build_fwd_pass_vars()
        self.input_layer.reset_mxts_updated()
        out_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
        out_layer.set_active()
        self.input_layer.update_mxts()

        fprop_func = compile_func([self.input_layer.get_activation_vars()],
                                  out_layer.get_activation_vars())
        fprop_results = [list(x) for x in fprop_func(self.inp)]

        bprop_func = compile_func([
            self.input_layer.get_activation_vars(),
            self.input_layer.get_reference_vars()
        ], self.input_layer.get_mxts())
        bprop_results_each_task = []
        for task_idx in range(len(fprop_results[0])):
            out_layer.update_task_index(task_index=task_idx)
            bprop_results_task = [
                list(x) for x in bprop_func(self.inp, np.zeros_like(self.inp))
            ]
            bprop_results_each_task.append(bprop_results_task)

        out_layer.set_inactive()
        return fprop_results, bprop_results_each_task
Ejemplo n.º 4
0
 def setUp(self):
     if (hasattr(keras, '__version__')==False):
         self.keras_version = 0.2 #didn't have the __version__ tag
     else:
         self.keras_version = float(keras.__version__[0:3])
     self.inp = (np.random.randn(10*10*51)
                 .reshape(10,10,51).transpose(0,2,1))
     self.keras_model = keras.models.Sequential()
     conv_layer = keras.layers.convolutional.Convolution1D(
                     nb_filter=2, filter_length=4, subsample_length=2,
                     #re. input_shape=(51,10), that is, putting the channel
                     #axis last; this is actually due to the bug
                     #that seems to date back to v0.2.0...
                     #https://github.com/fchollet/keras/blob/0.2.0/keras/layers/convolutional.py#L88
                     activation="relu", input_shape=(51,10))
     self.keras_model.add(conv_layer)
     self.keras_model.add(keras.layers.convolutional.MaxPooling1D(
                          pool_length=4, stride=2)) 
     if (self.keras_version > 0.2):
         self.keras_model.add(keras.layers.convolutional.AveragePooling1D(
                          pool_length=4, stride=2))
     else:
         pass #there was no average pooling in 0.2.0 it seems
     self.keras_model.add(keras.layers.core.Flatten())
     self.keras_model.add(keras.layers.core.Dense(output_dim=1))
     self.keras_model.add(keras.layers.core.Activation("sigmoid"))
     self.keras_model.compile(loss="mse", optimizer="sgd")
     
     if (self.keras_version <= 0.3): 
         self.keras_output_fprop_func = compile_func(
                         [self.keras_model.layers[0].input],
                         self.keras_model.layers[-1].get_output(False))
         grad = theano.grad(theano.tensor.sum(
                    self.keras_model.layers[-2].get_output(False)[:,0]),
                    self.keras_model.layers[0].input)
         self.grad_func = theano.function(
                      [self.keras_model.layers[0].input],
                      grad, allow_input_downcast=True)
     else:
         keras_output_fprop_func = compile_func(
             [self.keras_model.layers[0].input,
              keras.backend.learning_phase()],
             self.keras_model.layers[-1].output)
         self.keras_output_fprop_func =\
             lambda x: keras_output_fprop_func(x,False)
         grad = theano.grad(theano.tensor.sum(
                    self.keras_model.layers[-2].output[:,0]),
                    self.keras_model.layers[0].input)
         grad_func = theano.function(
                      [self.keras_model.layers[0].input,
                       keras.backend.learning_phase()],
                      grad, allow_input_downcast=True,
                      on_unused_input='ignore')
         self.grad_func = lambda x: grad_func(x, False)
Ejemplo n.º 5
0
    def setUp(self):
        if (hasattr(keras, '__version__')==False):
            self.keras_version = 0.2 #didn't have the __version__ tag
        else:
            self.keras_version = float(keras.__version__[0:2])
        if (self.keras_version <= 0.2): 
            pass
        else:
            self.inp = (np.random.randn(10*10*51*51)
                        .reshape(10,10,51,51).transpose(0,2,3,1))
            self.keras_model = keras.models.Sequential()
            conv_layer = keras.layers.convolutional.Convolution2D(
                            nb_filter=2, nb_row=4, nb_col=4, subsample=(2,2),
                            activation="relu", input_shape=(51,51,10),
                            dim_ordering='tf')
            self.keras_model.add(conv_layer)
            self.keras_model.add(keras.layers.convolutional.MaxPooling2D(
                                 pool_size=(4,4), strides=(2,2),
                                 dim_ordering='tf')) 
            self.keras_model.add(keras.layers.convolutional.AveragePooling2D(
                                 pool_size=(4,4), strides=(2,2),
                                 dim_ordering='tf')) 
            self.keras_model.add(keras.layers.core.Flatten())
            self.keras_model.add(keras.layers.core.Dense(output_dim=1))
            self.keras_model.add(keras.layers.core.Activation("sigmoid"))
            self.keras_model.compile(loss="mse", optimizer="sgd")

            if (self.keras_version <= 0.3): 
                self.keras_output_fprop_func = compile_func(
                                [self.keras_model.layers[0].input],
                                self.keras_model.layers[-1].get_output(False))
                grad = theano.grad(theano.tensor.sum(
                           self.keras_model.layers[-2].get_output(False)[:,0]),
                           self.keras_model.layers[0].input)
                self.grad_func = theano.function(
                             [self.keras_model.layers[0].input],
                             grad, allow_input_downcast=True,
                             on_unused_input='ignore')
            else:
                keras_output_fprop_func = compile_func(
                    [self.keras_model.layers[0].input,
                     keras.backend.learning_phase()],
                    self.keras_model.layers[-1].output)
                self.keras_output_fprop_func =\
                    lambda x: keras_output_fprop_func(x,False)
                grad = theano.grad(theano.tensor.sum(
                           self.keras_model.layers[-2].output[:,0]),
                           self.keras_model.layers[0].input)
                grad_func = theano.function(
                             [self.keras_model.layers[0].input,
                              keras.backend.learning_phase()],
                             grad, allow_input_downcast=True,
                             on_unused_input='ignore')
                self.grad_func = lambda x: grad_func(x, False)
Ejemplo n.º 6
0
    def test_relu_intgrad(self):
        out_layer = blobs.ReLU(
            nonlinear_mxts_mode=blobs.NonlinearMxtsMode.Gradient)
        out_layer.set_inputs(self.dense_layer)
        out_layer.build_fwd_pass_vars()
        self.input_layer.reset_mxts_updated()
        out_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
        out_layer.set_active()
        self.input_layer.update_mxts()

        fprop_func = compile_func([self.input_layer.get_activation_vars()],
                                  out_layer.get_activation_vars())
        fprop_results = [list(x) for x in fprop_func(self.inp)]

        grad_func_temp = compile_func([
            self.input_layer.get_activation_vars(),
            self.input_layer.get_reference_vars()
        ], self.input_layer.get_mxts())
        grad_func = (
            lambda input_data_list, input_references_list, **kwargs:
            #index [0] below is because we are retrieving the
            #first mode, to be passed into grad_func_temp
            grad_func_temp(input_data_list[0], input_references_list[0]))
        integrated_grads_func = (
            deeplift.util.get_integrated_gradients_function(
                gradient_computation_function=grad_func, num_intervals=10))
        bprop_results_each_task = []
        for task_idx in range(len(fprop_results[0])):
            out_layer.update_task_index(task_index=task_idx)  #set task
            bprop_results_task = [
                list(x) for x in integrated_grads_func(
                    task_idx=None,  #task setting handled manually
                    #in line above
                    input_data_list=np.array([self.inp]),
                    input_references_list=np.array(
                        [0.1 * np.ones_like(self.inp)]),
                    #batch_size and progress_update
                    #are ignored by grad_func
                    batch_size=20,
                    progress_update=10)
            ]
            bprop_results_each_task.append(bprop_results_task)

        out_layer.set_inactive()

        print(bprop_results_each_task)
        np.testing.assert_almost_equal(
            np.array(bprop_results_each_task[0]),
            np.array([-2.0 / 5.0 * np.array([1.0, 4.0])]))
Ejemplo n.º 7
0
 def test_fprop(self):
     func = compile_func([self.input_layer.get_activation_vars()],
                         self.batch_norm_layer.get_activation_vars())
     answer = (((self.inp - self.mean[None,None,:])\
               *(self.gamma[None,None,:]/(self.std+self.epsilon)))
               + self.beta)
     np.testing.assert_almost_equal(func(self.inp), answer)
Ejemplo n.º 8
0
 def test_fprop(self):
     conv_layer = blobs.Conv1D(W=self.conv_W,
                               b=self.conv_b,
                               stride=1,
                               border_mode=PaddingMode.valid,
                               channels_come_last=False,
                               conv_mxts_mode=ConvMxtsMode.Linear)
     self.create_small_net_with_conv_layer(conv_layer,
                                           outputs_per_channel=3)
     func = compile_func([self.input_layer.get_activation_vars()],
                         self.conv_layer.get_activation_vars())
     #input:
     #      [[[-8,-7,-6,-5],
     #        [-4,-3,-2,-1]],
     #       [[ 0, 1, 2, 3],
     #        [ 4, 5, 6, 7]]]
     # W:
     # [-2,-1
     #   0, 1]
     # 16+7+0+-3 = 20 - bias (1.0) = 19
     # 0+-1+0+5 = 4 - bias (1.0) = 3
     np.testing.assert_almost_equal(
         func(self.inp),
         np.array([[[19, 17, 15], [-19, -17, -15]], [[3, 1, -1],
                                                     [-3, -1, 1]]]))
Ejemplo n.º 9
0
 def test_convert_conv1d_model_forward_prop(self):
     deeplift_model = kc.convert_sequential_model(model=self.keras_model)
     deeplift_fprop_func = compile_func(
         [deeplift_model.get_layers()[0].get_activation_vars()],
         deeplift_model.get_layers()[-1].get_activation_vars())
     np.testing.assert_almost_equal(deeplift_fprop_func(self.inp),
                                    self.keras_output_fprop_func(self.inp),
                                    decimal=6)
Ejemplo n.º 10
0
 def test_fprop_diff_from_ref(self):
     func = compile_func([
         self.input_layer.get_activation_vars(),
         self.input_layer.get_reference_vars()
     ], self.batch_norm_layer._get_diff_from_reference_vars())
     answer = ((self.inp - self.ref)\
               *(self.gamma[None,None,:]/(self.std+self.epsilon)))
     np.testing.assert_almost_equal(func(self.inp, self.ref), answer)
    def setUp(self):
        if (hasattr(keras, '__version__')==False):
            self.keras_version = 0.2 #didn't have the __version__ tag
        else:
            self.keras_version = float(keras.__version__[0:3])
        self.inp1 = (np.random.randn(10*10*51)
                    .reshape(10,10,51).transpose(0,2,1))
        self.inp2 = (np.random.randn(10*10*51)
                    .reshape(10,10,51).transpose(0,2,1))
        self.run_functional_tests = True
        if (self.keras_version < 1.0):
            self.run_functional_tests = False
            return #skip setup
        inp1 = keras.layers.Input(shape=(51,10), name="inp1")
        inp2 = keras.layers.Input(shape=(51,10), name="inp2")
        conv = keras.layers.convolutional.Convolution1D(
                 nb_filter=2, filter_length=4,
                 subsample_length=2, activation="relu")
        maxpool = keras.layers.convolutional.MaxPooling1D(pool_length=4, stride=2)
        conv1_out = conv(inp1)
        conv2_out = conv(inp2)
        maxpool1_out = maxpool(conv1_out)
        maxpool2_out = maxpool(conv2_out)
        merge_out = keras.layers.merge([maxpool1_out, maxpool2_out],
                                       mode='concat', concat_axis=2)
        flatten_out = keras.layers.core.Flatten()(merge_out)
        dense1_out = keras.layers.core.Dense(output_dim=5)(flatten_out)
        dense1relu_out = keras.layers.core.Activation("relu")(dense1_out)
        output_preact = keras.layers.core.Dense(
                         output_dim=1, name="output_preact")(dense1relu_out)
        output = keras.layers.core.Activation("sigmoid",
                        name="output_postact")(output_preact)
        self.keras_model = keras.models.Model(input=[inp1, inp2],
                                              output=output)
        self.keras_model.compile(optimizer='rmsprop',
                              loss='binary_crossentropy',
                              metrics=['accuracy'])
 
        if (self.keras_version <= 0.3): 
            pass
        else:
            keras_output_fprop_func = compile_func(
                [inp1, inp2, keras.backend.learning_phase()],
                self.keras_model.layers[-1].output)
            self.keras_output_fprop_func =\
                lambda x,y: keras_output_fprop_func(x,y,False)
            grad = theano.grad(theano.tensor.sum(output_preact[:,0]),
                               [inp1, inp2])
            grad_func = theano.function(
                    [inp1, inp2, keras.backend.learning_phase()],
                    grad, allow_input_downcast=True, on_unused_input='ignore')
            self.grad_func = lambda x,y: grad_func(x,y,False)
Ejemplo n.º 12
0
 def test_dense_backprop(self):
     func = compile_func([
         self.input_layer.get_activation_vars(),
         self.input_layer.get_reference_vars()
     ], self.input_layer.get_mxts())
     self.dense_layer.update_task_index(task_index=0)
     self.assertListEqual(
         [list(x) for x in func(self.inp, np.zeros_like(self.inp))],
         [self.w1, self.w1])
     self.dense_layer.update_task_index(task_index=1)
     self.assertListEqual(
         [list(x) for x in func(self.inp, np.zeros_like(self.inp))],
         [self.w2, self.w2])
Ejemplo n.º 13
0
    def test_backprop(self):
        np.random.seed(1234)
        bn_pos_mxts_to_set = np.random.random((self.inp.shape)) - 0.5
        bn_neg_mxts_to_set = np.random.random((self.inp.shape)) - 0.5
        self.set_mxts(bn_pos_mxts_to_set, bn_neg_mxts_to_set)
        func_pos = compile_func([self.input_layer.get_activation_vars()],
                                self.input_layer.get_pos_mxts())
        func_neg = compile_func([self.input_layer.get_activation_vars()],
                                self.input_layer.get_neg_mxts())
        diff_from_ref = self.inp - self.ref
        inp_pos_mxts = ((bn_pos_mxts_to_set *
                         (((self.gamma > 0.0) * self.gamma)[None, None, :]) +
                         bn_neg_mxts_to_set *
                         (((self.gamma < 0.0) * self.gamma)[None, None, :])) /
                        (self.std + self.epsilon))

        inp_neg_mxts = ((bn_pos_mxts_to_set *
                         (((self.gamma < 0.0) * self.gamma)[None, None, :]) +
                         bn_neg_mxts_to_set *
                         (((self.gamma > 0.0) * self.gamma)[None, None, :])) /
                        (self.std + self.epsilon))
        np.testing.assert_almost_equal(func_pos(self.inp), inp_pos_mxts)
        np.testing.assert_almost_equal(func_neg(self.inp), inp_neg_mxts)
    def test_convert_conv1d_model_forward_prop(self): 
        if (self.run_functional_tests==False):
            return
        deeplift_model = kc.convert_functional_model(
                          model=self.keras_model,
                          nonlinear_mxts_mode=NonlinearMxtsMode.Rescale)
        deeplift_fprop_func = compile_func(
 [deeplift_model.get_name_to_blob()['inp1'].get_activation_vars(),
  deeplift_model.get_name_to_blob()['inp2'].get_activation_vars()],
  deeplift_model.get_name_to_blob()['output_postact'].get_activation_vars())
        np.testing.assert_almost_equal(
            deeplift_fprop_func(self.inp1, self.inp2),
            self.keras_output_fprop_func(self.inp1, self.inp2),
            decimal=6)
Ejemplo n.º 15
0
    def test_fprop_stride(self):

        conv_layer = blobs.Conv1D(W=self.conv_W,
                                  b=self.conv_b,
                                  stride=2,
                                  border_mode=PaddingMode.valid,
                                  channels_come_last=False)
        self.create_small_net_with_conv_layer(conv_layer,
                                              outputs_per_channel=3)
        func = compile_func([self.input_layer.get_activation_vars()],
                            self.conv_layer.get_activation_vars())
        np.testing.assert_almost_equal(
            func(self.inp),
            np.array([[[23, 35], [-23, -35]], [[71, 83], [-71, -83]]]))
Ejemplo n.º 16
0
 def test_running_of_different_dense_modes(self):
     for mode in DenseMxtsMode.vals:
         input_layer = blobs.Input(num_dims=None, shape=(None, 4))
         W = np.array([self.w1, self.w2]).T
         b = np.array([-1.0, 1.0])
         dense_layer = blobs.Dense(W=W, b=b, dense_mxts_mode=mode)
         dense_layer.set_inputs(input_layer)
         dense_layer.build_fwd_pass_vars()
         dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros)
         dense_layer.set_active()
         input_layer.update_mxts()
         func = compile_func([
             input_layer.get_activation_vars(),
             input_layer.get_reference_vars()
         ], input_layer.get_mxts())
         dense_layer.update_task_index(task_index=0)
         func(self.inp, np.zeros_like(self.inp))
Ejemplo n.º 17
0
 def test_dense_backprop_stride(self):
     conv_layer = blobs.Conv1D(W=self.conv_W,
                               b=self.conv_b,
                               stride=2,
                               border_mode=PaddingMode.valid,
                               channels_come_last=False)
     self.create_small_net_with_conv_layer(conv_layer,
                                           outputs_per_channel=2)
     self.dense_layer.update_task_index(task_index=0)
     func = compile_func([
         self.input_layer.get_activation_vars(),
         self.input_layer.get_reference_vars()
     ], self.input_layer.get_mxts())
     np.testing.assert_almost_equal(
         func(self.inp, np.zeros_like(self.inp)),
         np.array([[[0, 2, 0, 2], [4, 6, 4, 6]], [[0, 2, 0, 2],
                                                  [4, 6, 4, 6]]]))
Ejemplo n.º 18
0
    def test_fprop_avgpool(self): 

        pool_layer = blobs.AvgPool1D(pool_length=2,
                                  stride=1,
                                  border_mode=PaddingMode.valid,
                                  ignore_border=True,
                                  channels_come_last=False)
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=3)

        func = compile_func([self.input_layer.get_activation_vars()],
                           self.pool_layer.get_activation_vars())
        np.testing.assert_almost_equal(func(self.backprop_test_inps),
                                        np.array(
                                        [[
                                          [0.5,2.5,3.5],
                                          [2.5,1.5,0.5]],
                                         [[-0.5,-1.5,-2.5],
                                          [-2.5,-1.5,-0.5]
                                         ]]))
Ejemplo n.º 19
0
    def test_backprop_avgpool(self):
        pool_layer = blobs.AvgPool1D(pool_length=2, stride=1,
                                     border_mode=PaddingMode.valid,
                                     ignore_border=True,
                                     channels_come_last=False)
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=3)

        self.dense_layer.update_task_index(task_index=0)
        func = compile_func([self.input_layer.get_activation_vars(), 
                           self.input_layer.get_reference_vars()],
                           self.input_layer.get_mxts())
        avg_pool_grads = np.array([1, 2, 2, 1]).astype("float32")*0.5 
        np.testing.assert_almost_equal(func(
                  self.backprop_test_inps,
                  np.ones_like(self.backprop_test_inps)*self.reference_inps),
                              np.array([
                              [avg_pool_grads*2,
                                avg_pool_grads*3], 
                              [avg_pool_grads*2,
                               avg_pool_grads*3]]))
Ejemplo n.º 20
0
    def test_fprop_maxpool1d(self): 

        pool_layer = blobs.MaxPool1D(pool_length=2,
                          stride=1,
                          border_mode=PaddingMode.valid,
                          ignore_border=True,
                          maxpool_deeplift_mode=MaxPoolDeepLiftMode.gradient,
                          channels_come_last=False)
        self.create_small_net_with_pool_layer(pool_layer,
                                              outputs_per_channel=3)

        func = compile_func([self.input_layer.get_activation_vars()],
                            self.pool_layer.get_activation_vars())
        np.testing.assert_almost_equal(func(self.backprop_test_inps),
                                       np.array(
                                        [[
                                         [1,4,4],
                                         [3,2,1]],
                                        [[ 0,-1,-2],
                                         [-2,-1, 0]
                                        ]]))
Ejemplo n.º 21
0
 def test_backprop_maxpool_gradients(self):
     pool_layer = blobs.MaxPool1D(pool_length=2,
                   stride=1,
                   border_mode=PaddingMode.valid,
                   ignore_border=True,
                   maxpool_deeplift_mode=MaxPoolDeepLiftMode.gradient,
                   channels_come_last=False)
     self.create_small_net_with_pool_layer(pool_layer,
                                           outputs_per_channel=3)
     self.dense_layer.update_task_index(task_index=0)
     func = compile_func([
                 self.input_layer.get_activation_vars(),
                 self.input_layer.get_reference_vars()],
             self.input_layer.get_mxts())
     np.testing.assert_almost_equal(
         func(self.backprop_test_inps,
              np.ones_like(self.backprop_test_inps)*self.reference_inps),
               np.array([
               [np.array([0, 1, 2, 0])*2,
                np.array([1, 1, 1, 0])*3],
               [np.array([1, 1, 1, 0])*2,
                np.array([0, 1, 1, 1])*3]]))
Ejemplo n.º 22
0
    def setUp(self):
        if (hasattr(keras, '__version__') == False):
            self.keras_version = 0.2  #didn't have the __version__ tag
        else:
            self.keras_version = float(keras.__version__[0:3])
        self.inp1 = (np.random.randn(10 * 10 * 51).reshape(10, 10,
                                                           51).transpose(
                                                               0, 2, 1))
        self.inp2 = (np.random.randn(10 * 10 * 51).reshape(10, 10,
                                                           51).transpose(
                                                               0, 2, 1))
        self.run_graph_tests = True
        if (self.keras_version < 1.0):
            self.keras_model = keras.models.Graph()
        elif (hasattr(keras, 'legacy')):
            self.keras_model = keras.legacy.models.Graph()
        else:
            self.run_graph_tests = False
            return  #skip setup
        self.keras_model.add_input(name="inp1", input_shape=(51, 10))
        self.keras_model.add_node(keras.layers.convolutional.Convolution1D(
            nb_filter=2,
            filter_length=4,
            subsample_length=2,
            activation="relu"),
                                  name="conv1",
                                  input="inp1")
        self.keras_model.add_node(keras.layers.convolutional.MaxPooling1D(
            pool_length=4, stride=2),
                                  name="mp1",
                                  input="conv1")

        self.keras_model.add_input(name="inp2", input_shape=(51, 10))
        self.keras_model.add_node(keras.layers.convolutional.Convolution1D(
            nb_filter=2,
            filter_length=4,
            subsample_length=2,
            activation="relu"),
                                  name="conv2",
                                  input="inp2")
        self.keras_model.add_node(keras.layers.convolutional.MaxPooling1D(
            pool_length=4, stride=2),
                                  name="mp2",
                                  input="conv2")

        self.keras_model.add_node(keras.layers.core.Flatten(),
                                  name='flatten',
                                  inputs=['mp1', 'mp2'],
                                  merge_mode='concat',
                                  concat_axis=2)
        self.keras_model.add_node(keras.layers.core.Dense(output_dim=5),
                                  name="dense1",
                                  input="flatten")
        self.keras_model.add_node(keras.layers.core.Activation("relu"),
                                  name="denserelu1",
                                  input="dense1")
        self.keras_model.add_node(keras.layers.core.Dense(output_dim=1),
                                  name='output_preact',
                                  input='denserelu1')
        self.keras_model.add_node(keras.layers.core.Activation("sigmoid"),
                                  name='output_postact',
                                  input='output_preact')
        self.keras_model.add_output(name='output', input='output_postact')
        self.keras_model.compile(loss={"output": "binary_crossentropy"},
                                 optimizer="sgd")

        if (self.keras_version <= 0.3):
            self.keras_output_fprop_func = compile_func([
                self.keras_model.inputs['inp1'].input,
                self.keras_model.inputs['inp2'].input
            ], self.keras_model.outputs['output'].get_output(False))
            grad = theano.grad(
                theano.tensor.sum(
                    (self.keras_model.nodes['output_preact'].get_output(False)
                     [:, 0])), [
                         self.keras_model.inputs['inp1'].input,
                         self.keras_model.inputs['inp2'].input
                     ])
            self.grad_func = theano.function([
                self.keras_model.inputs['inp1'].input,
                self.keras_model.inputs['inp2'].input
            ],
                                             grad,
                                             allow_input_downcast=True)
        else:
            keras_output_fprop_func = compile_func([
                self.keras_model.inputs['inp1'].input,
                self.keras_model.inputs['inp2'].input,
                keras.backend.learning_phase()
            ], self.keras_model.layers[-1].output)
            self.keras_output_fprop_func =\
                lambda x,y: keras_output_fprop_func(x,y,False)
            grad = theano.grad(
                theano.tensor.sum(
                    self.keras_model.nodes['output_preact'].output[:, 0]), [
                        self.keras_model.inputs['inp1'].input,
                        self.keras_model.inputs['inp2'].input
                    ])
            grad_func = theano.function([
                self.keras_model.inputs['inp1'].input,
                self.keras_model.inputs['inp2'].input,
                keras.backend.learning_phase()
            ],
                                        grad,
                                        allow_input_downcast=True,
                                        on_unused_input='ignore')
            self.grad_func = lambda x, y: grad_func(x, y, False)
Ejemplo n.º 23
0
 def test_dense_fprop(self):
     func = compile_func([self.input_layer.get_activation_vars()],
                         self.dense_layer.get_activation_vars())
     self.assertListEqual([list(x) for x in func(self.inp)],
                          [[9.0, -9.0], [19.0, -19.0]])