示例#1
0
def cosine_error(x):  #x=[x1,x2,x3,x4] ,xi.shape=(batch_size,input_dim)
    cos1=cosine(x[0],x[1]) #cos shape=(batch_size,)
    cos2=cosine(x[0],x[2])
    cos3=cosine(x[0],x[3])
    cos4=cosine(x[0],x[4])
    cos5=cosine(x[0],x[5])
    cos6=cosine(x[0],x[6])
    delta=5 
    p=K.exp(cos1*delta)/(K.exp(cos1*delta)+K.exp(cos2*delta)+K.exp(cos3*delta)+K.exp(cos4*delta)+K.exp(cos5*delta)+K.exp(cos6*delta)) #softmax
    f=-K.log(p) #objective function:-log  #f.shape=(batch_size,)
    return K.reshape(f,(K.shape(p)[0],1))  #return.sahpe=(batch_size,1)
示例#2
0
 def call(self,inputs,mask=None):
     #w_c=K.repeat(self.W_c,self.input_num)
     #w_m=K.repeat(self.W_m,self.input_num)
 
     x=inputs[0]
     mem_vector=inputs[1]
     
     c=K.dot(x,self.W_c)+self.b_c #context向量
     m=K.dot(x,self.W_m)+self.b_m #memory向量
     mem_vec=K.repeat(mem_vector,self.input_num) #与问题进行内积
     m=K.sum(m*mem_vec,axis=2,keepdims=False)
     s=K.softmax(m)  #softmax
     s=K.reshape(s,(-1,self.input_num,1))
     ctx=self.activation(c*s)
     
     return ctx#self.activation(ctx)
示例#3
0
    def call(self, inputs, mask=None):
        #w_c=K.repeat(self.W_c,self.input_num)
        #w_m=K.repeat(self.W_m,self.input_num)

        x = inputs[0]
        mem_vector = inputs[1]

        c = K.dot(x, self.W_c) + self.b_c  #context向量
        m = K.dot(x, self.W_m) + self.b_m  #memory向量
        mem_vec = K.repeat(mem_vector, self.input_num)  #与问题进行内积
        m = K.sum(m * mem_vec, axis=2, keepdims=False)
        s = K.softmax(m)  #softmax
        s = K.reshape(s, (-1, self.input_num, 1))
        ctx = self.activation(c * s)

        return ctx  #self.activation(ctx)
def LSTNet(trainX1, trainX2, trainY, config):

    input1 = Input(shape=(trainX1.shape[1], trainX1.shape[2]))
    conv1 = Conv1D(filters=48, kernel_size=6, strides=1,
                   activation='relu')  # for input1
    # It's a probelm that I can't find any way to use the same Conv1D layer to train the two inputs,
    conv2 = Conv1D(filters=48, kernel_size=6, strides=1,
                   activation='relu')  # for input2
    conv2.set_weights(conv1.get_weights())  # at least use same weight

    conv1out = conv1(input1)
    lstm1out = CuDNNLSTM(64)(conv1out)
    lstm1out = Dropout(config.dropout)(lstm1out)

    input2 = Input(shape=(trainX2.shape[1], trainX2.shape[2]))
    conv2out = conv2(input2)
    lstm2out = CuDNNLSTM(64)(conv2out)
    lstm2out = Dropout(config.dropout)(lstm2out)

    lstm_out = concatenate([lstm1out, lstm2out])
    output = Dense(trainY.shape[1])(lstm_out)

    #highway  使用Dense模拟AR自回归过程,为预测添加线性成份,同时使输出可以响应输入的尺度变化。
    highway_window = config.highway_window
    #截取近3个窗口的时间维 保留了所有的输入维度
    z = Lambda(lambda k: k[:, -highway_window:, :])(input1)
    z = Lambda(lambda k: K.permute_dimensions(k, (0, 2, 1)))(z)
    z = Lambda(lambda k: K.reshape(k,
                                   (-1, highway_window * trainX1.shape[2])))(z)
    z = Dense(trainY.shape[1])(z)

    output = add([output, z])
    output = Activation('sigmoid')(output)
    model = Model(inputs=[input1, input2], outputs=output)

    return model