Beispiel #1
0
    def network_initializer(self, max_timesteps=None):
        ### build encoder
        enc_input = Input(shape=(self.max_length, ),
                          dtype='int32',
                          name='input')
        enc_embedding = Embedding(self.vocab_size,
                                  self.embedding_size,
                                  name='embedding')(enc_input)

        # stacking encoding LSTMs
        convos = []
        for i, hs in enumerate(self.kernel_sizes):
            if hs > 0:
                convo_layer = Conv1D(filters=hs,
                                     kernel_size=i + 1,
                                     padding='same')(enc_embedding)
                if self.L_pooling > 1:
                    convo_layer = MaxPooling1D(
                        pool_size=self.L_pooling)(convo_layer)
                convo_layer = Flatten()(convo_layer)
                convos.append(convo_layer)
        enc_output = Concatenate()(convos) if len(convos) > 1 else convos[0]
        enc_output = Lambda(lambda x: K.l2_normalize(x, axis=1),
                            name=BioactivityLSTM.ENCODING_NAME)(enc_output)

        ### output layer

        out_layer = Dense(1, kernel_regularizer=L1L2(l2=self.l2))(enc_output)
        self.model = Model(inputs=enc_input, outputs=out_layer)
        self.model.compile(optimizer=self.optimizer, loss='mse')
Beispiel #2
0
    def network_initializer(self, max_timesteps=None):
        ### build encoder
        enc_input = Input(shape=(None, ), dtype='int32', name='input')
        enc_embedding = Embedding(self.vocab_size,
                                  self.embedding_size,
                                  mask_zero=True,
                                  name='embedding')(enc_input)

        # stacking encoding LSTMs
        hidden_states = []
        enc_layer = enc_embedding
        for i, layer_size in enumerate(self.layer_sizes):
            return_sequences = (i != len(self.layer_sizes) - 1)
            enc_layer, hidden_state, cell_state = LSTM(
                layer_size,
                return_sequences=return_sequences,
                return_state=True,
                name='lstm_%d' % (i + 1))(enc_layer)
            hidden_states += [hidden_state, cell_state]

        # concatenating LSTMs' states and normalizing their norms
        enc_output = Concatenate()(hidden_states)
        enc_output = Lambda(lambda x: K.l2_normalize(x, axis=1),
                            name=BioactivityLSTM.ENCODING_NAME)(enc_output)

        ### output layer
        out_layer = Dense(1, kernel_regularizer=L1L2(l2=self.l2))(enc_output)
        self.model = Model(inputs=enc_input, outputs=out_layer)
        self.model.compile(optimizer=self.optimizer, loss='mse')
Beispiel #3
0
def build_fr_model(input_shape):
    resent_model = InceptionResNetV2(include_top=False,
                                     weights='imagenet',
                                     input_shape=input_shape,
                                     pooling='avg')
    image_input = resent_model.input

    # get the output from last layer
    x = resent_model.layers[-1].output

    # add a dense layer
    out = Dense(128)(x)

    # build the embedding model
    embedder_model = Model(inputs=[image_input], outputs=[out])

    # input layer
    input_layer = Input(shape=input_shape)

    # embed the input
    x = embedder_model(input_layer)
    output = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)

    model = Model(inputs=[input_layer], outputs=[output])

    return model
def build_fr_model(input_shape):
    resent_model = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=input_shape, pooling='avg')
    image_input = resent_model.input
    x = resent_model.layers[-1].output
    out = Dense(128)(x)
    embedder_model = Model(inputs=[image_input], outputs=[out])

    input_layer = Input(shape=input_shape)

    x = embedder_model(input_layer)
    output = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)

    model = Model(inputs=[input_layer], outputs=[output])
    return model
Beispiel #5
0
def triplet_loss(y_true, y_pred):
    y_pred = K.l2_normalize(y_pred, axis=1)
    batch = BAT_SIZE
    # print(batch)
    ref1 = y_pred[0:batch, :]
    pos1 = y_pred[batch:batch + batch, :]
    neg1 = y_pred[batch + batch:3 * batch, :]
    dis_pos = K.sum(K.square(ref1 - pos1), axis=1, keepdims=True)
    dis_neg = K.sum(K.square(ref1 - neg1), axis=1, keepdims=True)
    dis_pos = K.sqrt(dis_pos)
    dis_neg = K.sqrt(dis_neg)
    a1 = 0.6
    d1 = dis_pos + K.maximum(0.0, dis_pos - dis_neg + a1)
    return K.mean(d1)
Beispiel #6
0
def cos_distance(y_true, y_pred):
    y_true = K.l2_normalize(y_true, axis=-1)
    y_pred = K.l2_normalize(y_pred, axis=-1)
    return K.mean(1 - K.sum((y_true * y_pred), axis=-1))
Beispiel #7
0
def my_cosine_distance(tensors):
    if (len(tensors) != 2):
        raise 'oops'
    a = K.l2_normalize(tensors[0], axis=-1)
    b = K.l2_normalize(tensors[1], axis=-1)
    return 1 - K.mean(a * b)
Beispiel #8
0
def l2_normalize_layer():
    return Lambda(lambda x: K.l2_normalize(x, axis=-1))
Beispiel #9
0
def my_cosine_proximity(tensors):
    a = K.l2_normalize(tensors[0], axis=-1)
    b = K.l2_normalize(tensors[1], axis=-1)
    return 1 - K.mean(a * b)