def model(): up_0 = Input(shape=input_shape, name='up_stream_0') up_1 = Input(shape=input_shape, name='up_stream_1') down_0 = Input(shape=input_shape, name='down_stream_0') down_1 = Input(shape=input_shape, name='down_stream_1') up_stream = share_stream(x_shape=input_shape) down_stream = share_stream(x_shape=input_shape) up_feature_0 = up_stream(up_0) up_feature_1 = up_stream(up_1) down_feature_0 = down_stream(down_0) down_feature_1 = down_stream(down_1) up_feature_0 = Flatten()(up_feature_0) up_feature_1 = Flatten()(up_feature_1) down_feature_0 = Flatten()(down_feature_0) down_feature_1 = Flatten()(down_feature_1) up_feature = Maximum()([up_feature_0, up_feature_1]) down_feature = Maximum()([down_feature_0, down_feature_1]) feature = concatenate([up_feature, down_feature]) fc_1 = Dense(units=256, activation='relu', use_bias=True, kernel_regularizer=l2(0.001))(feature) fc_1 = Dropout(0.5)(fc_1) fc_2 = Dense(units=128, activation='relu', use_bias=True)(fc_1) fc_3 = Dense(units=96, activation='relu', use_bias=True)(fc_2) fc_4 = Dense(units=60, activation='softmax', use_bias=True)(fc_3) network = Model(input=[up_0, up_1, down_0, down_1], outputs=fc_4) return network
def _encoder(self): encoder_input = Input(shape=(None, ), dtype='int32', name=self.encoder_input) #encoder_input = Input(shape=(self.h_params.max_seq_length,),dtype='int32', name=self.encoder_input) #glove_embedding_encoder = Embedding(self.h_params.num_embeddings, self.h_params.embed_dim, weights=[self.embedding_matrix], # input_length=self.h_params.max_seq_length, # trainable=False , mask_zero=True, name='GloVe_embedding_encoder') glove_embedding_encoder = Embedding(self.h_params.num_embeddings, self.h_params.embed_dim, weights=[self.embedding_matrix], input_length=None, trainable=False, mask_zero=True, name='GloVe_embedding_encoder') x = glove_embedding_encoder(encoder_input) x = Dense(self.h_params.embed_dim)(x) f_out, b_out, f_state_h, f_state_c, b_state_h, b_state_c = Bidirectional( LSTM(self.h_params.hidden_dim, dropout=self.h_params.dropout, recurrent_dropout=self.h_params.dropout, return_state=True), merge_mode=None, name='encoder')(x) encoder_out = Maximum(name=self.encoder_output)([f_state_h, b_state_h]) encoder_out_cell = Maximum()([f_state_c, b_state_c]) return encoder_input, encoder_out, encoder_out_cell
def get_model(nb_words, nb_chars, embedding_matrix): input1 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') input2 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') input3 = Input(shape=(5,)) embed1 = Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True, mask_zero=False) lstm0 = CuDNNLSTM(num_lstm, return_sequences=True) lstm1 = Bidirectional(CuDNNLSTM(num_lstm)) lstm2 = CuDNNLSTM(num_lstm) att1 = Attention(10) den = Dense(64, activation='tanh') # att1 = Lambda(lambda x: K.max(x,axis = 1)) v3 = embed1(input3) v1 = embed1(input1) v2 = embed1(input2) v11 = lstm1(v1) v22 = lstm1(v2) v1ls = lstm2(lstm0(v1)) v2ls = lstm2(lstm0(v2)) v1 = Concatenate(axis=1)([att1(v1), v11]) v2 = Concatenate(axis=1)([att1(v2), v22]) input1c = Input(shape=(MAX_CHAR_LENGTH,)) input2c = Input(shape=(MAX_CHAR_LENGTH,)) embed1c = Embedding(nb_chars, EMBEDDING_DIM) lstm1c = Bidirectional(CuDNNLSTM(6)) att1c = Attention(10) v1c = embed1(input1c) v2c = embed1(input2c) v11c = lstm1c(v1c) v22c = lstm1c(v2c) v1c = Concatenate(axis=1)([att1c(v1c), v11c]) v2c = Concatenate(axis=1)([att1c(v2c), v22c]) mul = Multiply()([v1, v2]) sub = Lambda(lambda x: K.abs(x))(Subtract()([v1, v2])) maximum = Maximum()([Multiply()([v1, v1]), Multiply()([v2, v2])]) mulc = Multiply()([v1c, v2c]) subc = Lambda(lambda x: K.abs(x))(Subtract()([v1c, v2c])) maximumc = Maximum()([Multiply()([v1c, v1c]), Multiply()([v2c, v2c])]) sub2 = Lambda(lambda x: K.abs(x))(Subtract()([v1ls, v2ls])) matchlist = Concatenate(axis=1)([mul, sub, mulc, subc, maximum, maximumc, sub2]) matchlist = Dropout(0.05)(matchlist) matchlist = Concatenate(axis=1)( [Dense(32, activation='relu')(matchlist), Dense(48, activation='sigmoid')(matchlist)]) res = Dense(1, activation='sigmoid')(matchlist) model = Model(inputs=[input1, input2, input3, input1c, input2c], outputs=res) model.compile(optimizer=Adam(lr=0.001), loss="binary_crossentropy") return model
def generate_embeddings_attention(config, incoming_layer): if config['bidirectional_attention']: gru = Bidirectional( GRU(config['gru_units'], return_sequences=True, dropout=config['gru_dropout'], recurrent_dropout=config['recurrent_dropout']))(incoming_layer) else: gru = GRU( config['gru_units'], return_sequences=True, dropout=config['gru_dropout'], recurrent_dropout=config['recurrent_dropout'])(incoming_layer) dense_att_1 = TimeDistributed( Dense(config['gru_units'], activation=config['attention_activation']))(gru) dense_att_2 = TimeDistributed(Dense(1))(dense_att_1) # to undo the time distribution and have 1 value for each action reshape_distributed = Reshape((config['max_phrase_length'], ))(dense_att_2) attention = Activation('softmax')(reshape_distributed) #so we can multiply it with embeddings reshape_att = Reshape((config['max_phrase_length'], 1), name='reshape_att')(attention) if config['merge_mode'] == 'multiply': apply_att = Multiply()([incoming_layer, reshape_att]) elif config['merge_mode'] == 'average': apply_att = Average()([incoming_layer, reshape_att]) elif config['merge_mode'] == 'maximum': apply_att = Maximum()([incoming_layer, reshape_att]) return apply_att
def mulit_lstm_model(maxlen, input_dim): input = Input(shape=(maxlen, ), dtype='float64') embedding = Embedding(input_dim, 256, input_length=maxlen)(input) convs1, convs2, convs3 = [], [], [] for kernel_size in [3, 4, 5]: conv1 = Conv1D(filters=64, kernel_size=kernel_size, activation='relu', padding='same')(embedding) convs1.append(AveragePooling1D()(conv1)) for (kernel_size, conv1) in zip([3, 4, 5], convs1): conv2 = Conv1D(filters=64, kernel_size=kernel_size, activation='relu', padding='same')(conv1) convs2.append(AveragePooling1D()(conv2)) for (kernel_size, conv2) in zip([3, 4, 5], convs2): conv3 = Conv1D(filters=64, kernel_size=kernel_size, activation='relu', padding='same')(conv2) convs3.append(AveragePooling1D()(conv3)) convs_max = Maximum()(convs3) lstm = LSTM(512)(convs_max) output = Dense(8, activation='softmax')(lstm) model = Model(inputs=input, outputs=output) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) plot_model(model, to_file='tmp/multi_lstm_model.png', show_shapes=True) return model
def shrink_model(model): input1 = model.get_layer("input_1").input input2 = model.get_layer("input_2").input a = model.get_layer("embedding_1")(input1) a = model.get_layer("batch_normalization_1")(a) a_concat = [] for i in range(1, 13): if i in [7, 9, 10]: # got from experiments print("Skiping conv-%d" % i) continue ac = model.get_layer("conv1d_%d" % i)(a) ac = model.get_layer("time_distributed_%d" % i)(ac) ac = model.get_layer("zero_padding1d_%d" % i)(ac) a_concat.append(ac) a_sum = Maximum()(a_concat) b = model.get_layer("embedding_2")(input2) x = Concatenate(axis=-1)([a, a_sum, b]) x = model.get_layer("batch_normalization_2")(x) x = model.get_layer("flatten_1")(x) x = model.get_layer("dense_13")(x) out = model.get_layer("dense_14")(x) shrinked_model = Model(inputs=[input1, input2], outputs=out) return shrinked_model
def get_convo_nn2(no_word=200, n_gram=21, no_char=178): input1 = Input(shape=(n_gram,)) input2 = Input(shape=(n_gram,)) a = Embedding(no_char, 32, input_length=n_gram)(input1) a = SpatialDropout1D(0.15)(a) a = BatchNormalization()(a) a_concat = [] for i in range(1,9): a_concat.append(conv_unit(a, n_gram, no_word, window=i)) for i in range(9,12): a_concat.append(conv_unit(a, n_gram, no_word - 50, window=i)) a_concat.append(conv_unit(a, n_gram, no_word - 100, window=12)) a_sum = Maximum()(a_concat) b = Embedding(12, 12, input_length=n_gram)(input2) b = SpatialDropout1D(0.15)(b) x = Concatenate(axis=-1)([a, a_sum, b]) #x = Concatenate(axis=-1)([a_sum, b]) x = BatchNormalization()(x) x = Flatten()(x) x = Dense(100, activation='relu')(x) out = Dense(1, activation='sigmoid')(x) model = Model(inputs=[input1, input2], outputs=out) model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['acc']) return model
def _mfm(self, X, name, out_channels, kernel_size=3, strides=1, dense=False): """ private func for creating mfm layer. Todo: * maybe more natural if implemented as custom layer like the comment out code at the bottom of this file. """ if dense: X = Dense(out_channels * 2, name=name + '_dense1', kernel_regularizer=regularizers.l2(0.0005))(X) else: X = Conv2D(out_channels * 2, name=name + '_conv2d1', kernel_size=kernel_size, kernel_regularizer=regularizers.l2(0.0005), strides=strides, padding='same')(X) X = Maximum()([ Lambda(lambda x, c: x[..., :c], arguments={'c': out_channels})(X), Lambda(lambda x, c: x[..., c:], arguments={'c': out_channels})(X) ]) return X
def double_stranded_model(inputs, inp, oup, params): """ keras model for scanning both DNA strands. Sequence patterns may be present on either strand. By scanning both DNA strands with the same motifs (kernels) the performance of the model will generally improve. In the model below, this is achieved by reverse complementing the input tensor and keeping the convolution filters fixed. """ with inputs.use('dna') as layer: # the name in inputs.use() should be the same as the dataset name. forward = layer convlayer = Conv2D(params[0], (params[1], 1), activation=params[2]) revcomp = Reverse()(forward) revcomp = Complement()(revcomp) forward = convlayer(forward) revcomp = convlayer(revcomp) revcomp = Reverse()(revcomp) layer = Maximum()([forward, revcomp]) output = LocalAveragePooling2D(window_size=layer.shape.as_list()[1], name='motif')(layer) return inputs, output
def conv_lstm(): embed_size = 301 num_filters = 64 maxlen = 7000 kernel_size = [3, 5, 7] main_input = Input(shape=(maxlen, )) emb = Embedding(embed_size, 256, input_length=maxlen)(main_input) # _embed = SpatialDropout1D(0.15)(emb) warppers = [] warppers2 = [] warppers3 = [] for _kernel_size in kernel_size: conv1d = Conv1D(filters=num_filters, kernel_size=_kernel_size, activation='relu', padding='same')(emb) warppers.append(AveragePooling1D(2)(conv1d)) for (_kernel_size, cnn) in zip(kernel_size, warppers): conv1d_2 = Conv1D(filters=num_filters, kernel_size=_kernel_size, activation='relu', padding='same')(cnn) warppers2.append(AveragePooling1D(2)(conv1d_2)) for (_kernel_size, cnn) in zip(kernel_size, warppers2): conv1d_2 = Conv1D(filters=num_filters, kernel_size=_kernel_size, activation='relu', padding='same')(cnn) warppers3.append(AveragePooling1D(2)(conv1d_2)) fc = Maximum()(warppers3) rl = CuDNNLSTM(128)(fc) main_output = Dense(8, activation='softmax')(rl) model = Model(inputs=main_input, outputs=main_output) return model
def build_generator(self): """U-Net Generator""" def conv2d(layer_input, filters, f_size=4, bn=True): """Layers used during downsampling""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if bn: d = BatchNormalization(momentum=0.8)(d) return d def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layers used during upsampling""" u = UpSampling2D(size=2)(layer_input) u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u) if dropout_rate: u = Dropout(dropout_rate)(u) u = BatchNormalization(momentum=0.8)(u) u = Concatenate()([u, skip_input]) return u # Image input d01 = Input(shape=self.img_shape) d02 = Input(shape=self.img_shape) d0 = Maximum()([d01, d02]) # Downsampling d1 = conv2d(d0, self.gf, bn=False) d2 = conv2d(d1, self.gf * 2) d3 = conv2d(d2, self.gf * 4) d4 = conv2d(d3, self.gf * 8) d5 = conv2d(d4, self.gf * 8) d6 = conv2d(d5, self.gf * 8) d7 = conv2d(d6, self.gf * 8) # Upsampling u1 = deconv2d(d7, d6, self.gf * 8) u2 = deconv2d(u1, d5, self.gf * 8) u3 = deconv2d(u2, d4, self.gf * 8) u4 = deconv2d(u3, d3, self.gf * 4) u5 = deconv2d(u4, d2, self.gf * 2) u6 = deconv2d(u5, d1, self.gf) u7 = UpSampling2D(size=2)(u6) output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u7) return Model([d01, d02], output_img)
def _cnn_model2(inputs, inp, oup, params): with inputs.use('dna') as inlayer: layer = inlayer conv = Conv2D(5, (3, 1), name='singlestrand') fl = conv(layer) rl = Reverse()(conv(Complement()(Reverse()(inlayer)))) layer = Maximum()([fl, rl]) return inputs, layer
def make_activation(activation, alpha=None, beta=None, share_params=False, blu_reg=0, aplu_segments=3, **kwargs): shared_axes = (1, 2) if share_params else None if activation == 'aplu': return APLU(segments=aplu_segments, shared_axes=shared_axes) elif activation == 'blu' or activation == 'blu-beta': return BLU(alpha=alpha or 0.5, shared_axes=shared_axes, beta_regularizer=l2(blu_reg)) elif activation == 'blu-alpha': return BLU(beta=beta or 0.5, parametric_alpha=True, parametric_beta=False, shared_axes=shared_axes, alpha_regularizer=l2(blu_reg)) elif activation == 'blu-alpha-beta': return BLU(parametric_alpha=True, shared_axes=shared_axes, alpha_regularizer=l2(blu_reg), beta_regularizer=l2(blu_reg)) elif activation == 'blu-const': return BLU(alpha=alpha or 0.5, beta=beta or 0.5, parametric_beta=False, shared_axes=shared_axes) elif activation == 'lrelu': return LeakyReLU(alpha=alpha) elif activation == 'maxout': return Maximum() elif activation == 'pelu': return PELU(shared_axes=shared_axes) elif activation == 'prelu': return PReLU(shared_axes=shared_axes) elif activation == 'srelu': return ScaledReLU(alpha=alpha or 0.5, beta=beta or 1.5, shared_axes=shared_axes) elif activation == 'srelu-alpha': return ScaledReLU(parametric_alpha=True, beta=beta or 1.5, shared_axes=shared_axes) elif activation == 'srelu-beta': return ScaledReLU(alpha=alpha or 0.5, parametric_beta=True, shared_axes=shared_axes) elif activation == 'srelu-alpha-beta': return ScaledReLU(parametric_alpha=True, parametric_beta=True, shared_axes=shared_axes) elif activation == 'softexp': return SoftExp(shared_axes=shared_axes) else: return Activation(activation)
def maxout_dense(x, n_units, n_layers, l2_reg=None): dense_list = [ Dense(n_units, kernel_regularizer=_create_if_not_none(l2, l2_reg))(x) for _ in range(n_layers) ] x = Maximum()(dense_list) x = BatchNormalization()(x) x = LeakyReLU()(x) return x
def mfm(x): shape = K.int_shape(x) x = Permute(dims=(3,2,1))(x) #swapping 1st and 3rd axis x1 = Cropping2D((0,shape[3]//2),0)(x) x2 = Cropping2D((shape[3]//2,0),0)(x) x = Maximum()([x1 , x2]) x = Permute(dims=(3,2,1))(x) #swapping 1st and 3rd axis x = Reshape([shape(1),shape(2),shape(3)//2])(x) return x
def mfm(x): shape = K.int_shape(x) x = Permute(dims=(3, 2, 1))(x) # swap 1 <-> 3 axis x1 = Cropping2D(cropping=((0, shape[3] // 2), 0))(x) x2 = Cropping2D(cropping=((shape[3] // 2, 0), 0))(x) x = Maximum()([x1, x2]) x = Permute(dims=(3, 2, 1))(x) # swap 1 <-> 3 axis x = Reshape([shape[1], shape[2], shape[3] // 2])(x) return x
def build_resnet_18_merged(input_shape, num_outputs, merge_type='concat'): branches = input_shape[2] # number of branches == number of views input_shape[2] = 1 # convert shape from (PLANES, CELLS, VIEWS) into (PLANES, CELLS, 1) branches_inputs = [] # inputs of the network branches branches_outputs = [] # outputs of the network branches for branch in range(branches): # generate branche and save its input and output branch_model = ResnetBuilder.build(input_shape, num_outputs, basic_block, [2, 2, 2, 2], branches=True) branches_inputs.append(branch_model.input) branches_outputs.append(branch_model.output) # merge the branches if merge_type == 'add': merged = Add()(branches_outputs) elif merge_type == 'sub': merged = Substract()(branches_outputs) elif merge_type == 'mul': merged = Multiply()(branches_outputs) elif merge_type == 'avg': merged = Average()(branches_outputs) elif merge_type == 'max': merged = Maximum()(branches_outputs) elif merge_type == 'dot': merged = Dot()(branches_outputs) else: merged = Concatenate()(branches_outputs) # dense output layer dense = Dense(units=num_outputs, kernel_initializer="he_normal", activation="softmax")(merged) # generate final model model = Model(branches_inputs, dense) #model = Model(branches_inputs, merged) return model
def build_model(self, input_shape, n_classes): xception_1 = self.xception(include_top=False, pooling='max') for layer in xception_1.layers: layer.trainable = False input_1 = xception_1.input output_1 = xception_1.output xception_2 = self.xception(include_top=False, pooling='max') for layer in xception_2.layers: layer.trainable = False input_2 = xception_2.input output_2 = xception_2.output xception_3 = self.xception(include_top=False, pooling='max') for layer in xception_3.layers: layer.trainable = False input_3 = xception_3.input output_3 = xception_3.output xception_4 = self.xception(include_top=False, pooling='max') for layer in xception_4.layers: layer.trainable = False input_4 = xception_4.input output_4 = xception_4.output concat_layer = Maximum()([output_1, output_2, output_3, output_4]) concat_layer.trainable = False # concat_layer = Dropout(0.25)(concat_layer) # dense_layer1 = Dense(units=1024, activation='relu')(concat_layer) dense_layer1 = Dropout(0.5)(concat_layer) output_layer = Dense(n_classes, activation='softmax', name='predictions')(dense_layer1) model = Model(inputs=[input_1, input_2, input_3, input_4], outputs=[output_layer]) model.summary() plot_model(model, to_file=self.output_directory + '/model_graph.png', show_shapes=True, show_layer_names=True) model.compile(loss=categorical_crossentropy, optimizer=Adam(lr=0.01), metrics=['acc']) # model save file_path = self.output_directory + '/best_model.hdf5' model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path, monitor='loss', save_best_only=True) # Tensorboard log log_dir = self.output_directory + '/tf_logs' chk_n_mkdir(log_dir) tb_cb = TrainValTensorBoard(log_dir=log_dir) self.callbacks = [model_checkpoint, tb_cb] return model
def TSNs_SpatialStream(input_shape, classes, num_segments=3, base_model='Xception', dropout_prob=0.8, consensus_type='avg', partial_bn=True): """ Spatial stream of the Temporal Segment Networks (https://arxiv.org/pdf/1705.02953.pdf) defined as multi-input Keras model. """ # Define the shared layers, base conv net and enable partial batch # normalization strategy inputs = [Input(input_shape) for _ in range(num_segments)] dropout = Dropout(dropout_prob) dense = Dense(classes, activation=None) act = Activation(activation='softmax', name='prediction') models_dict = dict(getmembers(keras.applications, isfunction)) base = models_dict[base_model](include_top=False, pooling='avg') if partial_bn: num_bn_layers = 0 for layer in base.layers: if isinstance(layer, BatchNormalization): num_bn_layers += 1 if num_bn_layers != 1: layer.trainable = False # Pass multiple inputs (depending on num_segments) through the base conv # net outputs = [] visual_features = [] for seg_input in inputs: seg_output = base(seg_input) visual_features.append(seg_output) seg_output = dropout(seg_output) seg_output = dense(seg_output) outputs.append(seg_output) # Use a consensus function to combine class scores if consensus_type == 'avg': output = Average()(outputs) elif consensus_type == 'max': output = Maximum()(outputs) elif consensus_type == 'attention': weighted_outputs = [] attn_layer = Dense(1, use_bias=False, name='attn_layer') attn_weights = [attn_layer(_) for _ in visual_features] attn_weights = Lambda(lambda x: K.concatenate(x, axis=-1), name='concatenate')(attn_weights) attn_weights = Activation('softmax')(attn_weights) for i, seg_output in enumerate(outputs): weight = Lambda(lambda x: x[:, i])(attn_weights) weighted_outputs.append(Multiply()([weight, seg_output])) output = Add()(weighted_outputs) output = act(output) model = Model(inputs, output) return model
def base_network(): X_input = Input((224, 224, 3)) X = Conv2D(64, (7, 7), strides=(2, 2), activation='relu', padding='same', data_format='channels_last')(X_input) X = BatchNormalization()(X) X = MaxPool2D((3, 3), strides=(2, 2), padding='same')(X) X = Conv2D(64, (1, 1), activation='relu')(X) X = Conv2D(192, (3, 3), activation='relu', padding='same')(X) X = BatchNormalization()(X) X = MaxPool2D(padding='same')(X) X = Conv2D(192, (1, 1), activation='relu')(X) X = Conv2D(384, (3, 3), activation='relu', padding='same')(X) X = BatchNormalization()(X) X = MaxPool2D()(X) X = Conv2D(384, (1, 1), activation='relu')(X) X = Conv2D(256, (3, 3), activation='relu', padding='same')(X) X = Conv2D(256, (1, 1), activation='relu')(X) X = Conv2D(256, (3, 3), activation='relu', padding='same')(X) X = Conv2D(256, (1, 1), activation='relu')(X) X = Conv2D(256, (3, 3), activation='relu', padding='same')(X) X = BatchNormalization()(X) X = MaxPool2D((3, 3), strides=(2, 2), padding='same')(X) X = Flatten()(X) X_d1 = Dense(4096)(X) X_d2 = Dense(4096)(X) X = Maximum()([X_d1, X_d2]) X_d1 = Dense(4096)(X) X_d2 = Dense(4096)(X) X = Maximum()([X_d1, X_d2]) X = Dense(128, activation='relu')(X) X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X) return Model(inputs=X_input, outputs=X)
def Model_Design(): # Two-strem CNN Model Design up_0 = Input(shape=input_shape, name='up_stream_0') up_1 = Input(shape=input_shape, name='up_stream_1') down_0 = Input(shape=input_shape, name='down_stream_0') down_1 = Input(shape=input_shape, name='down_stream_1') up_stream = Share_Stream(x_shape=input_shape) down_stream = Share_Stream(x_shape=input_shape) up_feature_0 = up_stream(up_0) up_feature_1 = up_stream(up_1) down_feature_0 = down_stream(down_0) down_feature_1 = down_stream(down_1) up_feature_0 = Flatten()(up_feature_0) up_feature_1 = Flatten()(up_feature_1) down_feature_0 = Flatten()(down_feature_0) down_feature_1 = Flatten()(down_feature_1) up_feature = Maximum()([up_feature_0, up_feature_1 ]) # Change Here: Add, Maximum, Average, Multiply down_feature = Maximum()([down_feature_0, down_feature_1]) feature = concatenate([up_feature, down_feature]) fc_1 = Dense(units=1028, activation='relu', kernel_regularizer=l2(0.001))(feature) fc_1 = Dropout(0.5)(fc_1) fc_2 = Dense(units=128, activation='relu', use_bias=True)(fc_1) fc_3 = Dense(units=96, activation='relu', use_bias=True)(fc_2) fc_4 = Dense(units=60, activation='softmax', use_bias=True)(fc_3) network = Model(input=[up_0, up_1, down_0, down_1], outputs=fc_4) return network
def lightCNN(inputs): net = Conv2D(32, kernel_size=(5, 5), strides=(1, 1), input_shape=input_shape)(inputs) print("executed") net = mfm(net) net1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(net) #net2 = Conv2D(32,kernel_size=(1,1),strides=(1,1))(net1) #net2 = mfm(net2) net2 = Conv2dMFM(net1, f1=32, k1=(1, 1), s1=(1, 1), f2=48, k2=(3, 3), s2=(1, 1), k3=(2, 2), s3=(2, 2)) net3 = Conv2dMFM(net2, f1=48, k1=(1, 1), s1=(1, 1), f2=64, k2=(3, 3), s2=(1, 1), k3=(2, 2), s3=(2, 2)) net4 = Conv2dMFM(net3, f1=64, k1=(1, 1), s1=(1, 1), f2=32, k2=(3, 3), s2=(1, 1), k3=(2, 2), s3=(2, 2)) net5 = Conv2dMFM(net4, f1=32, k1=(1, 1), s1=(1, 1), f2=32, k2=(3, 3), s2=(1, 1), k3=(2, 2), s3=(2, 2)) net5 = Flatten()(net5) net6_1 = Dense(32)(net5) net6_2 = Dense(32)(net5) net6 = Maximum()([net6_1, net6_2]) net7 = Dense(num_classes, activation='softmax')(net6) return net7
def build_model(emb_matrix, maxlen): emb_layer = Embedding(input_dim=emb_matrix.shape[0], output_dim=emb_matrix.shape[1], weights=[emb_matrix], input_length=maxlen, trainable=False) lstm0 = CuDNNLSTM(300, return_sequences=True) lstm1 = Bidirectional(CuDNNLSTM(150, return_sequences=True)) lstm2 = CuDNNLSTM(300) attn1 = Attention(maxlen) attn2 = Attention(maxlen) seq1 = Input(shape=(maxlen, )) seq2 = Input(shape=(maxlen, )) emb1 = emb_layer(seq1) emb2 = emb_layer(seq2) lstm1a = lstm1(emb1) lstm1b = lstm1(emb2) lstm2a = lstm2(lstm0(lstm1a)) lstm2b = lstm2(lstm0(lstm1b)) v1 = Concatenate()([attn1(lstm1a), lstm2a]) v2 = Concatenate()([attn2(lstm1b), lstm2b]) feat_input = Input(shape=(data_feat.shape[1], )) feat_dense = BatchNormalization()(feat_input) feat_dense = Dense(150, activation='relu')(feat_dense) mul = Multiply()([v1, v2]) sub = Lambda(lambda x: K.abs(x))(Subtract()([v1, v2])) maximum = Maximum()([Multiply()([v1, v1]), Multiply()([v2, v2])]) #sub2 = Lambda(lambda x: K.abs(x))(Subtract()([lstm2a, lstm2b])) merge = Concatenate()([mul, sub, maximum, feat_dense]) merge = Dropout(0.2)(merge) merge = BatchNormalization()(merge) merge = Dense(300, activation='relu')(merge) merge = Dropout(0.2)(merge) merge = BatchNormalization()(merge) res = Dense(1, activation='sigmoid')(merge) model = Model(inputs=[seq1, seq2, feat_input], outputs=res) model.compile(optimizer=Adam(lr=0.001), loss="binary_crossentropy", metrics=['acc']) return model
def double_stranded_model(inputs, inp, oup, params): with inputs.use('dna') as layer: forward = layer convlayer = Conv2D(params[0], (params[1], 1), activation=params[2]) revcomp = Reverse()(forward) revcomp = Complement()(revcomp) forward = convlayer(forward) revcomp = convlayer(revcomp) revcomp = Reverse()(revcomp) layer = Maximum()([forward, revcomp]) output = GlobalAveragePooling2D(name='motif')(layer) return inputs, output
def gan_distortion_layer(self, tensors): """ all_votes = [true_votes, voter_softmax, strategic_votes] probs = [true_probs, voter_softmax, strategic_probs] tensors = all_votes + probs """ losses = [] for i in range(self.n_agents + 2): if i == 1: continue votes_ = tensors[i] probs_ = tensors[self.n_agents + 2 + i] losses.append(self.distortion([votes_, probs_])) return Maximum()(losses)
def ensemble(models, model_input): """ Actually creates and returns the ensemble models :param models: Which models to ensemble :param model_input: Input layer :return: average ensemble, max ensemble """ outputs = [model(model_input) for model in models] # Average the outputs y_avg = Average()(outputs) model_avg = Model(model_input, y_avg, name='avg_ensemble') # Take max of the outputs y_max = Maximum()(outputs) model_max = Model(model_input, y_max, name='max_ensemble') return model_avg, model_max
def double_stranded(model, use_maximum=False): inputs = model.inputs inputs_rc = [ReverseComplement()(i) for i in inputs] output = model.outputs[0] output_rc = model(inputs_rc) if len(model.output_shape ) == 3: # If the model is a u-net, the output must be reversed output_rc = Reverse()(output_rc) merge_layer = Maximum() if use_maximum else Average() outputs_merge = merge_layer([output, output_rc]) model_merge = Model(inputs=inputs, outputs=outputs_merge) model_merge.compile(optimizer=model.optimizer, loss=model.loss, metrics=model.metrics) return model_merge
def egrmodel2(A,X, graph_conv_filters,num_filters): X_input = Input(shape=(X.shape[1], X.shape[2])) graph_conv_filters_input = Input(shape=(graph_conv_filters.shape[1], graph_conv_filters.shape[2])) layer_gcnn1 = MultiGraphCNN(16, num_filters, activation='elu')([X_input, graph_conv_filters_input]) layer_gcnn1 = Dropout(0.2)(layer_gcnn1) layer_gcnn2 = MultiGraphCNN(16, num_filters, activation='elu')([layer_gcnn1, graph_conv_filters_input]) layer_gcnn2 = Dropout(0.2)(layer_gcnn2) layer_gcnn3 = MultiGraphCNN(16, num_filters, activation='elu')([layer_gcnn2, graph_conv_filters_input]) layer_gcnn3 = Dropout(0.2)(layer_gcnn3) layer_gcnn4 = Maximum()([layer_gcnn1, layer_gcnn2, layer_gcnn3]) # layer_gcnn5 = Reshape((layer_gcnn4.shape[1]*layer_gcnn4.shape[2],))(layer_gcnn4) layer_gcnn5 = Flatten()(layer_gcnn4) # # layer_conv5 = AveragePooling2D(pool_size=(2, 1), strides=None, padding='valid', data_format=None)(layer_conv5) layer_dense1 = Dense(50, activation='sigmoid')(layer_gcnn5) model = Model(inputs=[X_input, graph_conv_filters_input], outputs=layer_dense1) model.summary() return model
def __init__(self, input_size): num_inputs = 3 input_images = [ Input(shape=(input_size, input_size, 3)) for _ in range(num_inputs) ] nets = [self.build_net(input_images[i], i) for i in range(num_inputs)] fused_net = None if num_inputs < 2: fused_net = nets[0] else: fused_net = Maximum()([nets[index] for index in range(num_inputs)]) fused_net = Dense(1024, activation='sigmoid')(fused_net) # x = Dropout(0.5)(x) fused_net = Dense(512, activation='sigmoid')(fused_net) # x = Dropout(0.5)(x) fused_net = Dense(256, activation='sigmoid')(fused_net) self.feature_extractor = Model( [input_images[0], input_images[1], input_images[2]], fused_net)
def _make_isa_layers(self, inputt): """ Generar los nodos entidad de las relaciones isa y los conecta seg\'un el orden computado anteriormente. """ # avoid overhead of acces class attribute tt = self.ents procord = self.porcord if not procord: return # chs = set() for childs, parent in procord: # chs.update(childs) for child in childs: if not(child in tt): tt[child] = self.entitie_capsule(child, inputt) if len(childs) == 1: rel = self.isa_capsule(childs[0]+'_isa_'+parent, tt[childs[0]]) else: strch = '_'.join(childs) mm = Maximum(name='max_'+strch)([tt[i] for i in childs]) rel = self.isa_capsule(strch+'_isa_'+parent, mm) tt[parent] = self.entitie_capsule(parent, rel)