def call(self, X, mask=None):
    
     input_shape = self.input_spec[0].shape
     
     x = K.reshape(X[0], (-1, input_shape[2]))
     target = X[1].flatten() if self.trainable else None
     
     Y = h_softmax(x, K.shape(x)[0], self.output_dim, 
                           self.n_classes, self.n_outputs_per_class,
                           self.W1, self.b1, self.W2, self.b2, target)
     
     output_dim = 1 if self.trainable else self.output_dim    
     input_length = K.shape(X[0])[1]
    
     y = K.reshape(Y, (-1, input_length, output_dim))
     return y
 def get_output(self, train=False):
     X = self.get_input(train)
     
     x = K.reshape(X[0], (-1, self.input_dim))  # (samples * timesteps, input_dim)
     
     target =  X[1].flatten() if train else None
     
     Y = h_softmax(x, K.shape(x)[0], self.output_dim, 
                           self.n_classes, self.n_outputs_per_class,
                           self.W1, self.b1, self.W2, self.b2, target)
     
     flex_output = 1 if train else self.output_dim
     
     output = K.reshape(Y, (-1, self.input_length, flex_output))
     
     return output
    # Second level of h_softmax
    W2 = np.asarray(np.random.normal(0, 0.001,
                                     size=(h_softmax_level1_size, d, h_softmax_level2_size)),
                    dtype=floatX)
    W2 = shared(W2)
    b2 = shared(
        np.asarray(np.zeros((h_softmax_level1_size,
                             h_softmax_level2_size)), dtype=floatX))

    #############
    # Build graph
    #############

    # This only computes the output corresponding to the target
    y_hat_tg = h_softmax(hid, m, output_size, h_softmax_level1_size,
                         h_softmax_level2_size, W1, b1, W2, b2, y_indexes)

    # This computes all the outputs
    # output = h_softmax(hid, m, output_size, h_softmax_level1_size,
    #                       h_softmax_level2_size, W1, b1, W2, b2)

    loss = -tensor.mean(tensor.log(y_hat_tg))

    params.extend([W1, b1, W2, b2])
else:
    raise ValueError


######################
# TRAINING FUNCTIONS #
######################
    W2 = np.asarray(np.random.normal(0,
                                     0.001,
                                     size=(h_softmax_level1_size, d,
                                           h_softmax_level2_size)),
                    dtype=floatX)
    W2 = shared(W2)
    b2 = shared(
        np.asarray(np.zeros((h_softmax_level1_size, h_softmax_level2_size)),
                   dtype=floatX))

    #############
    # Build graph
    #############

    # This only computes the output corresponding to the target
    y_hat_tg = h_softmax(hid, m, output_size, h_softmax_level1_size,
                         h_softmax_level2_size, W1, b1, W2, b2, y_indexes)

    # This computes all the outputs
    # output = h_softmax(hid, m, output_size, h_softmax_level1_size,
    #                       h_softmax_level2_size, W1, b1, W2, b2)

    loss = -tensor.mean(tensor.log(y_hat_tg))

    params.extend([W1, b1, W2, b2])
else:
    raise ValueError

######################
# TRAINING FUNCTIONS #
######################
grad = theano.grad(loss, [sub] + params)
Пример #5
0
 def perform(self, x, y=None):
     if y is not None:
         return NN.h_softmax(x, x.shape[0], self.out_dim, self.h_level1_size, self.h_level2_size, self.params[0], self.params[1], self.params[2], self.params[3], y)
     else:
         return NN.h_softmax(x, x.shape[0], self.out_dim, self.h_level1_size, self.h_level2_size, self.params[0], self.params[1], self.params[2], self.params[3])