コード例 #1
0
def contrastive_loss_old(labels, dists):

    label_first = labels[0:1, :]
    other_labels = labels[1:, :]

    labels_shifted = K.concatenate(
        [labels, other_labels, label_first],
        axis=0)  #   [ l1 ........ ln  | l2 ... ln-1 ln ]
    labels_orig = K.concatenate(
        [labels, labels], axis=0)  #   [ l1 ........ ln  | l1 ... ln-2 ln ]
    zeros = K.zeros_like(labels_orig)  #   [ 0  ........  0  | 0  ...   0   0 ]
    h = K.cast(K.equal(labels_orig - labels_shifted, zeros),
               dtype='float32')  #   [ 1  1 ......  1  | 0  ...   1   0 ]
    # h:   ALL ONES       |    MOST ZEROS
    # h[i] = 1  where labels_orig[i] == labels_shifted[i]  (i-th image correlated with i+1-th image, i.e. same artwork)
    # h[i] = 0  where labels_orig[i] != labels_shifted[i]

    first_dist = dists[0:1]
    other_dists = dists[1:]
    shifted_dists = K.concatenate(
        [dists, other_dists, first_dist],
        axis=0)  # [ d1 ........ dn  | d1 ... dn-2 dn ]

    # equation:  Lcon = (1/2N) SUM[ h(i) d(i)^2 + (1-h(i)) max(1-d(i), 0)^2
    Z = K.zeros_like(shifted_dists)
    max_z_sd = K.max(K.stack([1 - shifted_dists, Z]), axis=0, keepdims=False)
    #max_z_sd = K.sqrt(K.cast(K.shape(shifted_dists)[0], dtype='float32')) - shifted_dists

    first_operand = h * K.square(shifted_dists)
    second_operand = (1 - h) * K.square(max_z_sd)
    tensor_sum = first_operand + second_operand
    sum = K.sum(tensor_sum, axis=0) / K.cast(K.shape(shifted_dists)[0],
                                             dtype='float32')

    return K.mean(sum)
def spatial_attention(input_feature):
    kernel_size = 7
    if K.image_data_format() == "channels_first":
        channel = input_feature._keras_shape[1]
        cbam_feature = Permute((2, 3, 1))(input_feature)
    else:
        channel = input_feature._keras_shape[-1]
        cbam_feature = input_feature

    avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
    assert avg_pool._keras_shape[-1] == 1
    max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)
    assert max_pool._keras_shape[-1] == 1
    concat = Concatenate(axis=3)([avg_pool, max_pool])
    assert concat._keras_shape[-1] == 2
    cbam_feature = Conv2D(filters=1,
                          kernel_size=kernel_size,
                          strides=1,
                          padding='same',
                          activation='sigmoid',
                          kernel_initializer='he_normal',
                          use_bias=False)(concat)
    assert cbam_feature._keras_shape[-1] == 1

    if K.image_data_format() == "channels_first":
        cbam_feature = Permute((3, 1, 2))(cbam_feature)

    return multiply([input_feature, cbam_feature])
コード例 #3
0
    def softmax_with_temp(args):
        logits, temperature = args
        repeat_num = K.shape(logits)[1]
        temperature_repeated = RepeatVector(repeat_num)(temperature)
        # shape == (batch_size, seq_len)
        scaled_logits = logits / temperature_repeated
        # shape == (batch_size, seq_len, vocab_size)

        # for numerical stability (e.g. for low temperatures):
        scaled_logits = scaled_logits - K.max(
            scaled_logits, axis=2, keepdims=True)
        # shape == (batch_size, seq_len, vocab_size)
        transformed_probs = K.softmax(scaled_logits)
        # shape == (batch_size, seq_len, vocab_size)
        return transformed_probs
コード例 #4
0
def lstm_attention(X_train, y_train, X_test, y_test, vocab_size):
    X_train = sequence.pad_sequences(X_train, maxlen=MAX_LEN)
    X_test = sequence.pad_sequences(X_test, maxlen=MAX_LEN)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Build model...')
    model = Sequential()

    # data
    data = Input(shape=(MAX_LEN, ), dtype='int32', name='data')

    # embedding
    embedding = Embedding(vocab_size,
                          EMBED_SIZE,
                          input_length=MAX_LEN,
                          dropout=0.2)
    data_embedding = embedding(data)

    # dropout
    dropout = Dropout(0.25)
    data_dropout = dropout(data_embedding)

    # rnn
    rnn = RNN(HIDDEN_SIZE)

    data_rnn = RNN(data_dropout)
    #data_dropout = dropout(data_rnn)

    # maxpooling
    maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False),
                     output_shape=lambda x: (x[0], x[2]))
    data_pool = maxpool(data_dropout)

    rnn = AttentionLSTM(HIDDEN_SIZE, data_pool)
コード例 #5
0
 def triplet_loss(y_true, y_pred):
     return K.max(0, margin + y_pred)
コード例 #6
0
def reed_hard_loss(y_true, y_pred):
    '''Expects a binary class matrix instead of a vector of scalar classes.
    '''
    return -K.log(K.max(y_pred, axis=1, keepdims=True) + 1e-8)
コード例 #7
0
from keras.layers import Dense, Embedding, LSTM, TimeDistributed, K, Lambda, Merge

f = pickle.load(open("words.pkl", 'rb'), encoding="bytes")
embeddings = f['embeddings']
f = pickle.load(open("data.pkl", 'rb'), encoding="bytes")
train1, train2, y = f['train1'][:, :40], f['train2'][:, :40], f['y']

q1 = Sequential()
q1.add(
    Embedding(len(embeddings),
              output_dim=100,
              weights=[embeddings],
              input_length=train1.shape[1],
              trainable=False))
q1.add(TimeDistributed(Dense(100, activation='relu')))
q1.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(100, )))

q2 = Sequential()
q2.add(
    Embedding(len(embeddings),
              output_dim=100,
              weights=[embeddings],
              input_length=train1.shape[1],
              trainable=False))
q2.add(TimeDistributed(Dense(100, activation='relu')))
q2.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(100, )))

model = Sequential()
model.add(Merge([q1, q2], mode='concat'))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, activation='relu'))
コード例 #8
0
ファイル: absa_model.py プロジェクト: dutyhong/ABSA_Keras
 def sequence_mask(sequence):
     return K.sign(K.max(K.abs(sequence), 2))