예제 #1
0
def tanh_trans(X, mode='ff'):
    """Compute feedforward and backprop for tanh nonlinearity."""
    if (mode == 'ff'):
        F = gp.tanh(X)
    if (mode == 'bp'):
        F = (1.0 - X['A']**2.0) * X['dLdA']
    return F
예제 #2
0
def tanh_trans(X, mode='ff'):
    """Compute feedforward and backprop for tanh nonlinearity."""
    if (mode == 'ff'):
        F = gp.tanh(X)
    if (mode == 'bp'):
        F = (1.0 - X['A']**2.0) * X['dLdA']
    return F
예제 #3
0
    def feedforward(self, train_set_x):
        self.activations = []

        self.activations.append(train_set_x)

        for i in range(self.n_layers):
            current_activations = gnp.tanh(gnp.dot(self.activations[i], self.W_params[i]) + self.b_params[i])
            self.activations.append(current_activations)

        #output layers
        self.final_layer_output = gnp.dot(self.activations[self.n_layers], self.W_params[self.n_layers]) + self.b_params[self.n_layers]
예제 #4
0
    def parameter_prediction(self, test_set_x):
        test_set_x = gnp.as_garray(test_set_x)

        current_activations = test_set_x

        for i in range(self.n_layers):
            current_activations = gnp.tanh(gnp.dot(current_activations, self.W_params[i]) + self.b_params[i])

        final_layer_output = gnp.dot(current_activations, self.W_params[self.n_layers]) + self.b_params[self.n_layers]

        return  final_layer_output.as_numpy_array()
예제 #5
0
    def parameter_prediction(self, test_set_x):
        test_set_x = gnp.as_garray(test_set_x)

        current_activations = test_set_x

        for i in range(self.n_layers):
            current_activations = gnp.tanh(gnp.dot(current_activations, self.W_params[i]) + self.b_params[i])

        final_layer_output = gnp.dot(current_activations, self.W_params[self.n_layers]) + self.b_params[self.n_layers]

        return  final_layer_output.as_numpy_array()
예제 #6
0
    def feedforward(self, train_set_x):
        self.activations = []

        self.activations.append(train_set_x)

        for i in range(self.n_layers):
            current_activations = gnp.tanh(gnp.dot(self.activations[i], self.W_params[i]) + self.b_params[i])
            self.activations.append(current_activations)

        #output layers
        self.final_layer_output = gnp.dot(self.activations[self.n_layers], self.W_params[self.n_layers]) + self.b_params[self.n_layers]
예제 #7
0
파일: nonlin.py 프로젝트: barapa/HF-RNN
 def __call__(self, X):
     return g.tanh(X)
예제 #8
0
    def parameter_prediction_trajectory(self, test_set_x, test_set_y,
                                        mean_matrix, std_matrix):
        test_set_x = gnp.garray(test_set_x)

        current_activations = test_set_x

        for i in range(self.n_layers):
            input_data = current_activations
            current_activations = gnp.tanh(
                gnp.dot(input_data, self.W_params[i]) + self.b_params[i])

        final_layer_output = gnp.dot(
            current_activations,
            self.W_params[self.n_layers]) + self.b_params[self.n_layers]

        final_layer_output = final_layer_output * gnp.garray(
            std_matrix) + gnp.garray(mean_matrix)
        frame_number = final_layer_output.shape[0]

        final_layer_output = final_layer_output.T
        obs_mat = gnp.zeros((60, frame_number * 3))
        traj_err_mat = gnp.zeros((60, frame_number))

        var_base = np.zeros((60, 3))
        static_indice = []
        delta_indice = []
        acc_indice = []

        for i in range(60):
            static_indice.append(i)
            delta_indice.append(i + 60)
            acc_indice.append(i + 120)

        obs_mat[:, 0:frame_number] = final_layer_output[static_indice, :]
        obs_mat[:, frame_number:frame_number *
                2] = final_layer_output[delta_indice, :]
        obs_mat[:, frame_number * 2:frame_number *
                3] = final_layer_output[acc_indice, :]

        var_base[:, 0] = std_matrix[0, static_indice].T
        var_base[:, 1] = std_matrix[0, delta_indice].T
        var_base[:, 2] = std_matrix[0, acc_indice].T

        var_base = np.reshape(var_base, (60 * 3, 1))
        var_base = var_base**2

        sub_dim_list = []
        for i in range(60):
            sub_dim_list.append(1)

        sub_dim_start = 0
        for sub_dim in sub_dim_list:
            wuw_mat, wu_mat = self.pre_wuw_wu(
                frame_number, sub_dim,
                var_base[sub_dim_start * 3:sub_dim_start * 3 + sub_dim * 3])

            obs_mu = obs_mat[sub_dim_start:sub_dim_start + sub_dim, :].reshape(
                (frame_number * 3 * sub_dim, 1))
            wuwwu = gnp.dot(wuw_mat, wu_mat)
            mlpg_traj = gnp.dot(wuwwu, obs_mu)

            sub_std_mat = std_matrix[:, static_indice].T
            sub_mu_mat = mean_matrix[:, static_indice].T
            sub_std_mat = sub_std_mat[sub_dim_start:sub_dim_start +
                                      sub_dim, :].reshape(
                                          (frame_number * sub_dim, 1))
            sub_mu_mat = sub_mu_mat[sub_dim_start:sub_dim_start +
                                    sub_dim, :].reshape(
                                        (frame_number * sub_dim, 1))

            ref_y = test_set_y[:, static_indice].T
            ref_y = ref_y[sub_dim_start:sub_dim_start + sub_dim, :].reshape(
                (frame_number * sub_dim, 1))

            ref_y = ref_y * sub_std_mat + sub_mu_mat
            traj_err = (mlpg_traj - ref_y)  # mlpg_traj ref_y

            traj_err_mat[sub_dim_start:sub_dim_start +
                         sub_dim, :] = traj_err.reshape(
                             (sub_dim, frame_number))

            sub_dim_start = sub_dim_start + sub_dim

        validation_losses = gnp.sum(traj_err_mat[1:60, :].T**2, axis=1)
        validation_losses = validation_losses**0.5

        return validation_losses.as_numpy_array()
예제 #9
0
 def activation(self, netInput):
     return gnp.tanh(netInput)
예제 #10
0
 def forward(self, A):
     return gnp.tanh(A)
예제 #11
0
 def activation(self, netInput):
     return gnp.tanh(netInput)
예제 #12
0
 def forward_prop(self, x):
     return gnp.tanh(x)
예제 #13
0
 def backwardOneStep(self, a):
     z = gp.dot(a, self.W2) + self.b2
     return gp.tanh(z)
예제 #14
0
 def forwardOneStep(self, a):
     z = gp.dot(a, self.W1) + self.b1
     return gp.tanh(z)
예제 #15
0
파일: layer.py 프로젝트: jakesnell/pynn
 def forward_prop(self, x):
     return gnp.tanh(x)
예제 #16
0
파일: st_dnn_cm.py 프로젝트: ronanki/merlin
 def parameter_prediction_trajectory(self, test_set_x, test_set_y, mean_matrix, std_matrix):
     test_set_x = gnp.garray(test_set_x)
     
     current_activations = test_set_x
             
     for i in xrange(self.n_layers):
         input_data = current_activations
         current_activations = gnp.tanh(gnp.dot(input_data, self.W_params[i]) + self.b_params[i])
     
     final_layer_output = gnp.dot(current_activations, self.W_params[self.n_layers]) + self.b_params[self.n_layers]
     
     final_layer_output = final_layer_output * gnp.garray(std_matrix) + gnp.garray(mean_matrix)
     frame_number = final_layer_output.shape[0]
     
     final_layer_output = final_layer_output.T
     obs_mat = gnp.zeros((60, frame_number*3))
     traj_err_mat = gnp.zeros((60, frame_number))
     
     var_base = np.zeros((60, 3))
     static_indice = []
     delta_indice = []
     acc_indice = []
     
     for i in xrange(60):
         static_indice.append(i)
         delta_indice.append(i+60)
         acc_indice.append(i+120)
         
     obs_mat[:, 0:frame_number] = final_layer_output[static_indice, :]
     obs_mat[:, frame_number:frame_number*2] = final_layer_output[delta_indice, :]
     obs_mat[:, frame_number*2:frame_number*3] = final_layer_output[acc_indice, :]
     
     var_base[:, 0] = std_matrix[0, static_indice].T
     var_base[:, 1] = std_matrix[0, delta_indice].T
     var_base[:, 2] = std_matrix[0, acc_indice].T
     
     var_base = np.reshape(var_base, (60*3, 1))
     var_base = var_base ** 2
     
     sub_dim_list = []
     for i in xrange(60):
         sub_dim_list.append(1)
         
     sub_dim_start = 0
     for sub_dim in sub_dim_list:
         wuw_mat, wu_mat = self.pre_wuw_wu(frame_number, sub_dim, var_base[sub_dim_start*3:sub_dim_start*3+sub_dim*3])
         
         obs_mu = obs_mat[sub_dim_start:sub_dim_start+sub_dim, :].reshape((frame_number*3*sub_dim, 1))
         wuwwu = gnp.dot(wuw_mat, wu_mat)
         mlpg_traj = gnp.dot(wuwwu, obs_mu)
         
         sub_std_mat = std_matrix[:, static_indice].T
         sub_mu_mat  = mean_matrix[:, static_indice].T
         sub_std_mat = sub_std_mat[sub_dim_start:sub_dim_start+sub_dim, :].reshape((frame_number*sub_dim, 1))
         sub_mu_mat = sub_mu_mat[sub_dim_start:sub_dim_start+sub_dim, :].reshape((frame_number*sub_dim, 1))
         
         ref_y = test_set_y[:, static_indice].T
         ref_y = ref_y[sub_dim_start:sub_dim_start+sub_dim, :].reshape((frame_number*sub_dim, 1))
         
         ref_y = ref_y * sub_std_mat + sub_mu_mat
         traj_err = (mlpg_traj - ref_y)  #mlpg_traj ref_y
         
         traj_err_mat[sub_dim_start:sub_dim_start+sub_dim, :] = traj_err.reshape((sub_dim, frame_number))
         
         sub_dim_start = sub_dim_start + sub_dim
     
     validation_losses = gnp.sum(traj_err_mat[1:60, :].T ** 2, axis=1)
     validation_losses = validation_losses ** 0.5
     
     return  validation_losses.as_numpy_array()
예제 #17
0
 def activate(self, net_input):
     return gnp.tanh(net_input)
예제 #18
0
 def forward(self, A):
     return gnp.tanh(A)