def convert_sequential_model( model, num_dims=None, nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT, verbose=True, dense_mxts_mode=DenseMxtsMode.Linear, maxpool_deeplift_mode=default_maxpool_deeplift_mode): converted_layers = [] if (model.layers[0].input_shape is not None): input_shape = model.layers[0].input_shape assert input_shape[0] is None #batch axis num_dims_input = len(input_shape) assert num_dims is None or num_dims_input==num_dims,\ "num_dims argument of "+str(num_dims)+" is incompatible with"\ +" the number of dims in layers[0].input_shape which is: "\ +str(model.layers[0].input_shape) num_dims = num_dims_input else: input_shape = None converted_layers.append( blobs.Input(num_dims=num_dims, shape=input_shape, name="input")) #converted_layers is actually mutated to be extended with the #additional layers so the assignment is not strictly necessary, #but whatever converted_layers = sequential_container_conversion( layer=model, name="", verbose=verbose, nonlinear_mxts_mode=nonlinear_mxts_mode, dense_mxts_mode=dense_mxts_mode, maxpool_deeplift_mode=maxpool_deeplift_mode, converted_layers=converted_layers) deeplift.util.connect_list_of_layers(converted_layers) converted_layers[-1].build_fwd_pass_vars() return models.SequentialModel(converted_layers)
def prepare_batch_norm_deeplift_model(self, axis): self.input_layer = blobs.Input(num_dims=None, shape=(None,2,2,2)) if (self.keras_version <= 0.3): std = self.std epsilon = self.epsilon else: std = np.sqrt(self.std+self.epsilon) epsilon = 0 self.batch_norm_layer = blobs.BatchNormalization( gamma=self.gamma, beta=self.beta, axis=axis, mean=self.mean, std=std, epsilon=epsilon) self.batch_norm_layer.set_inputs(self.input_layer) self.flatten_layer = blobs.Flatten() self.flatten_layer.set_inputs(self.batch_norm_layer) self.dense_layer = blobs.Dense( W=np.ones((1,8)).T, b=np.zeros(1), dense_mxts_mode=DenseMxtsMode.Linear) self.dense_layer.set_inputs(self.flatten_layer) self.dense_layer.build_fwd_pass_vars() self.dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros) self.dense_layer.set_active() self.dense_layer.update_task_index(0) self.input_layer.update_mxts()
def setUp(self): #theano dimensional ordering assumed here...would need to swap #axes for tensorflow self.reference_inps=np.array([[[ [0,0,2,3], [0,1,0,0], [0,5,4,0], [6,0,7,8]], [[1,1,3,4], [1,2,1,1], [1,6,5,1], [7,1,8,9]]]]) self.backprop_test_inps = np.array([[[ [2,0,2,3], [0,1,4,0], [7,6,5,0], [6,0,8,9]], [[0,0,2,3], [0,1,0,0], [0,5,4,0], [6,0,7,8]]], [[[1,1,3,4], [1,2,1,1], [1,6,5,1], [7,1,8,9]], [[3,1,3,4], [1,2,5,1], [8,7,6,1], [7,1,9,10]]]]) self.input_layer = blobs.Input( num_dims=None, shape=(None,2,4,4))
def setUp(self): #swap axes for tensorflow self.input_layer = blobs.Input(num_dims=None, shape=(None, 4, 2)) #tensorflow, shockingly, does not flip the weights of a conv self.w1 = np.arange(4).reshape(2, 2)[:, ::-1].astype("float32") self.w2 = -np.arange(4).reshape(2, 2)[:, ::-1].astype("float32") self.conv_W = (np.array([self.w1, self.w2]).astype("float32")) self.conv_b = np.array([-1.0, 1.0]).astype("float32")
def convert_graph_model(model, nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT, verbose=True, dense_mxts_mode=DenseMxtsMode.Linear, maxpool_deeplift_mode=default_maxpool_deeplift_mode, auto_build_outputs=True): name_to_blob = OrderedDict() keras_layer_to_deeplift_blobs = OrderedDict() keras_non_input_layers = [] #convert the inputs for keras_input_layer_name in model.inputs: keras_input_layer = model.inputs[keras_input_layer_name] input_shape = keras_input_layer.get_config()['input_shape'] if (input_shape[0] is not None): input_shape = [None] + [x for x in input_shape] assert input_shape[0] is None #for the batch axis deeplift_input_layer =\ blobs.Input(shape=input_shape, num_dims=None, name=keras_input_layer_name) name_to_blob[keras_input_layer_name] = deeplift_input_layer keras_layer_to_deeplift_blobs[id(keras_input_layer)] =\ [deeplift_input_layer] #convert the nodes/outputs for layer_name, layer in list(model.nodes.items()): conversion_function = layer_name_to_conversion_function( layer.get_config()[KerasKeys.name]) keras_non_input_layers.append(layer) deeplift_layers = conversion_function( layer=layer, name=layer_name, verbose=verbose, nonlinear_mxts_mode=nonlinear_mxts_mode, dense_mxts_mode=dense_mxts_mode, maxpool_deeplift_mode=maxpool_deeplift_mode) deeplift.util.connect_list_of_layers(deeplift_layers) keras_layer_to_deeplift_blobs[id(layer)] = deeplift_layers for deeplift_layer in deeplift_layers: name_to_blob[deeplift_layer.get_name()] = deeplift_layer #connect any remaining things not connected to their inputs for keras_non_input_layer in keras_non_input_layers: deeplift_layers =\ keras_layer_to_deeplift_blobs[id(keras_non_input_layer)] previous_keras_layer = get_previous_layer(keras_non_input_layer) previous_deeplift_layer =\ keras_layer_to_deeplift_blobs[id(previous_keras_layer)][-1] deeplift.util.apply_softmax_normalization_if_needed( deeplift_layers[0], previous_deeplift_layer) deeplift_layers[0].set_inputs(previous_deeplift_layer) if (auto_build_outputs): for layer in model.outputs.values(): layer_to_build = keras_layer_to_deeplift_blobs[id(layer)][-1] layer_to_build.build_fwd_pass_vars() return models.GraphModel(name_to_blob=name_to_blob, input_layer_names=model.inputs.keys())
def input_layer_conversion(keras_input_layer, layer_name): input_shape = keras_input_layer.get_config()['batch_input_shape'] if (input_shape[0] is not None): input_shape = [None] + [x for x in input_shape] assert input_shape[0] is None #for the batch axis deeplift_input_layer =\ blobs.Input(shape=input_shape, num_dims=None, name=layer_name) return deeplift_input_layer
def setUp(self): #theano convolutional ordering assumed here...would need to #swap axes for tensorflow self.input_layer = blobs.Input(num_dims=None, shape=(None, 2, 4, 4)) self.w1 = np.arange(8).reshape(2, 2, 2)[:, ::-1, ::-1].astype("float32") self.w2 = -np.arange(8).reshape(2, 2, 2)[:, ::-1, ::-1].astype("float32") self.conv_W = np.array([self.w1, self.w2]).astype("float32") self.conv_b = np.array([-1.0, 1.0]).astype("float32")
def setUp(self): self.input_layer = blobs.Input(num_dims=None, shape=(None, 4)) self.w1 = [1.0, -2.0, -3.0, 4.0] W = np.array([self.w1]).T b = np.array([1.0]) self.dense_layer = blobs.Dense(W=W, b=b, dense_mxts_mode=DenseMxtsMode.Linear) self.dense_layer.set_inputs(self.input_layer) self.inp = [[-1.0, -1.0, 1.0, 1.0]]
def setUp(self): self.input_layer1 = blobs.Input(num_dims=None, shape=(None,1,1,1)) self.input_layer2 = blobs.Input(num_dims=None, shape=(None,1,1,1)) self.concat_layer = blobs.Concat(axis=1) self.concat_layer.set_inputs([self.input_layer1, self.input_layer2]) self.flatten_layer = blobs.Flatten() self.flatten_layer.set_inputs(self.concat_layer) self.dense_layer = blobs.Dense( W=np.array([([1,2])]).T, b=[1], dense_mxts_mode=DenseMxtsMode.Linear) self.dense_layer.set_inputs(self.flatten_layer) self.dense_layer.build_fwd_pass_vars() self.input_layer1.reset_mxts_updated() self.input_layer2.reset_mxts_updated() self.dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros) self.dense_layer.set_active() self.input_layer1.update_mxts() self.input_layer2.update_mxts() self.inp1 = np.arange(2).reshape((2,1,1,1))+1 self.inp2 = np.arange(2).reshape((2,1,1,1))+1
def test_relu_after_dense(self): input_layer = blobs.Input(num_dims=None, shape=(None, 4)) dense_layer = blobs.Dense(W=np.random.random((2, 4)), b=np.random.random((2, )), dense_mxts_mode=DenseMxtsMode.Linear) dense_layer.set_inputs(input_layer) relu_after_dense = blobs.ReLU( nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault) relu_after_dense.set_inputs(dense_layer) relu_after_dense.build_fwd_pass_vars() self.assertEqual(relu_after_dense.nonlinear_mxts_mode, NonlinearMxtsMode.RevealCancel)
def setUp(self): self.input_layer = blobs.Input(num_dims=None, shape=(None, 4)) self.w1 = [1.0, 2.0, 3.0, 4.0] self.w2 = [-1.0, -2.0, -3.0, -4.0] W = np.array([self.w1, self.w2]).T b = np.array([-1.0, 1.0]) self.dense_layer = blobs.Dense(W=W, b=b, dense_mxts_mode=DenseMxtsMode.Linear) self.dense_layer.set_inputs(self.input_layer) self.dense_layer.build_fwd_pass_vars() self.dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros) self.dense_layer.set_active() self.input_layer.update_mxts() self.inp = [[1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]]
def test_relu_after_conv2d(self): input_layer = blobs.Input(num_dims=None, shape=(None, 2, 2, 2)) conv_layer = blobs.Conv2D(W=np.random.random((2, 2, 2, 2)), b=np.random.random((2, )), conv_mxts_mode=ConvMxtsMode.Linear, strides=(1, 1), border_mode=PaddingMode.valid, channels_come_last=True) conv_layer.set_inputs(input_layer) relu_after_conv = blobs.ReLU( nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault) relu_after_conv.set_inputs(conv_layer) relu_after_conv.build_fwd_pass_vars() self.assertEqual(relu_after_conv.nonlinear_mxts_mode, NonlinearMxtsMode.Rescale)
def setUp(self): #theano dimensional ordering assumed here...would need to swap #axes for tensorflow self.reference_inps=np.array([[[0,0,0,0], [0,0,0,0]]]) self.backprop_test_inps = np.array( [[ [0,1,4,3], [3,2,1,0]], [ [0,-1,-2,-3], [-3,-2,-1,0] ]]) self.input_layer = blobs.Input( num_dims=None, shape=(None,2,4))
def test_running_of_different_dense_modes(self): for mode in DenseMxtsMode.vals: input_layer = blobs.Input(num_dims=None, shape=(None, 4)) W = np.array([self.w1, self.w2]).T b = np.array([-1.0, 1.0]) dense_layer = blobs.Dense(W=W, b=b, dense_mxts_mode=mode) dense_layer.set_inputs(input_layer) dense_layer.build_fwd_pass_vars() dense_layer.set_scoring_mode(blobs.ScoringMode.OneAndZeros) dense_layer.set_active() input_layer.update_mxts() func = compile_func([ input_layer.get_activation_vars(), input_layer.get_reference_vars() ], input_layer.get_mxts()) dense_layer.update_task_index(task_index=0) func(self.inp, np.zeros_like(self.inp))
def setUp(self): self.input_layer = blobs.Input(num_dims=None, shape=(None, 4, 2)) self.mean = np.array([1, -1]) self.gamma = np.array([2, -2]) self.beta = np.array([1, -1]) self.std = np.array([1.99, 1.99]) self.epsilon = 0.01 self.batch_norm_layer = blobs.BatchNormalization(axis=-1, gamma=self.gamma, beta=self.beta, mean=self.mean, std=self.std, epsilon=self.epsilon) self.batch_norm_layer.set_inputs(self.input_layer) self.batch_norm_layer.build_fwd_pass_vars() self.inp = (np.arange(16).reshape((2, 4, 2)).astype("float32")) - 8.0 self.ref = np.zeros_like(self.inp) + 1.0
def test_relu_after_dense_batchnorm(self): input_layer = blobs.Input(num_dims=None, shape=(None, 4)) dense_layer = blobs.Dense(W=np.random.random((4, 2)), b=np.random.random((2, )), dense_mxts_mode=DenseMxtsMode.Linear) dense_layer.set_inputs(input_layer) batch_norm = blobs.BatchNormalization(gamma=np.array([1.0, 1.0]), beta=np.array([-0.5, 0.5]), axis=-1, mean=np.array([-0.5, 0.5]), std=np.array([1.0, 1.0]), epsilon=0.001) batch_norm.set_inputs(dense_layer) relu_after_bn = blobs.ReLU( nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault) relu_after_bn.set_inputs(batch_norm) relu_after_bn.build_fwd_pass_vars() self.assertEqual(relu_after_bn.nonlinear_mxts_mode, NonlinearMxtsMode.RevealCancel)
def test_relu_after_conv1d_batchnorm(self): input_layer = blobs.Input(num_dims=None, shape=(None, 2, 2)) conv_layer = blobs.Conv1D(W=np.random.random((2, 2, 2)), b=np.random.random((2, )), conv_mxts_mode=ConvMxtsMode.Linear, stride=1, border_mode=PaddingMode.valid, channels_come_last=True) conv_layer.set_inputs(input_layer) batch_norm = blobs.BatchNormalization(gamma=np.array([1.0, 1.0]), beta=np.array([-0.5, 0.5]), axis=-1, mean=np.array([-0.5, 0.5]), std=np.array([1.0, 1.0]), epsilon=0.001) batch_norm.set_inputs(conv_layer) relu_after_bn = blobs.ReLU( nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault) relu_after_bn.set_inputs(batch_norm) relu_after_bn.build_fwd_pass_vars() self.assertEqual(relu_after_bn.nonlinear_mxts_mode, NonlinearMxtsMode.Rescale)
def convert_graph_model(model, nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT, verbose=True, dense_mxts_mode=DenseMxtsMode.Linear, conv_mxts_mode=ConvMxtsMode.Linear, maxpool_deeplift_mode=default_maxpool_deeplift_mode, auto_build_outputs=True): name_to_blob = OrderedDict() keras_layer_to_deeplift_blobs = OrderedDict() keras_non_input_layers = [] #convert the inputs for keras_input_layer_name in model.inputs: keras_input_layer = model.inputs[keras_input_layer_name] input_shape = keras_input_layer.get_config()['input_shape'] if (input_shape[0] is not None): input_shape = [None] + [x for x in input_shape] assert input_shape[0] is None #for the batch axis deeplift_input_layer =\ blobs.Input(shape=input_shape, num_dims=None, name=keras_input_layer_name) name_to_blob[keras_input_layer_name] = deeplift_input_layer keras_layer_to_deeplift_blobs[id(keras_input_layer)] =\ [deeplift_input_layer] #convert the nodes/outputs for layer_name, layer in list(model.nodes.items()): #need some special handling when previous layer #is Merge as merge is not given its own node if (type(get_previous_layer(layer)).__name__ == 'Merge'): merge_layer = get_previous_layer(layer) keras_non_input_layers.append(merge_layer) deeplift_merge_layer = merge_conversion(layer=merge_layer, name='merge_before_' + layer_name, verbose=verbose) keras_layer_to_deeplift_blobs[id(merge_layer)] =\ deeplift_merge_layer assert len(deeplift_merge_layer) == 1 name_to_blob[deeplift_merge_layer[0].get_name()] =\ deeplift_merge_layer[0] #now for converting the actual layer conversion_function = layer_name_to_conversion_function( type(layer).__name__) keras_non_input_layers.append(layer) deeplift_layers = conversion_function( layer=layer, name=layer_name, verbose=verbose, nonlinear_mxts_mode=nonlinear_mxts_mode, dense_mxts_mode=dense_mxts_mode, conv_mxts_mode=conv_mxts_mode, maxpool_deeplift_mode=maxpool_deeplift_mode) deeplift.util.connect_list_of_layers(deeplift_layers) keras_layer_to_deeplift_blobs[id(layer)] = deeplift_layers for deeplift_layer in deeplift_layers: name_to_blob[deeplift_layer.get_name()] = deeplift_layer #connect any remaining things not connected to their inputs for keras_non_input_layer in keras_non_input_layers: deeplift_layers =\ keras_layer_to_deeplift_blobs[id(keras_non_input_layer)] previous_keras_layers = get_previous_layer(keras_non_input_layer) if (isinstance(previous_keras_layers, list)): previous_deeplift_layers =\ [keras_layer_to_deeplift_blobs[id(x)][-1] for x in previous_keras_layers] deeplift_layers[0].set_inputs(previous_deeplift_layers) else: previous_deeplift_layer =\ keras_layer_to_deeplift_blobs[id(previous_keras_layers)][-1] deeplift_layers[0].set_inputs(previous_deeplift_layer) if (auto_build_outputs): for layer in model.outputs.values(): layer_to_build = keras_layer_to_deeplift_blobs[id(layer)][-1] layer_to_build.build_fwd_pass_vars() return models.GraphModel(name_to_blob=name_to_blob, input_layer_names=model.inputs.keys())
def test_relu_after_other_layer(self): input_layer = blobs.Input(num_dims=None, shape=(None, 4)) relu_layer = blobs.ReLU( nonlinear_mxts_mode=NonlinearMxtsMode.DeepLIFT_GenomicsDefault) relu_layer.set_inputs(input_layer) relu_layer.build_fwd_pass_vars()