def prep_layer(self): _, input_dim = self.input_shape z_dim = self.h_units + input_dim # concatenate (h_units, vocabulary_size) vector # gate weights self.W_update = init(self.init_method).initialize_weights( (z_dim, self.h_units)) self.W_reset = init(self.init_method).initialize_weights( (z_dim, self.h_units)) self.W_cell = init(self.init_method).initialize_weights( (z_dim, self.h_units)) self.W_states = init(self.init_method).initialize_weights( (z_dim, self.h_units)) # gate hidden bias self.b_update = np.zeros((self.h_units, )) self.b_reset = np.zeros((self.h_units, )) self.b_cell = np.zeros((self.h_units, )) self.b_states = np.zeros((self.h_units, )) # final output to nodes weights (input_dim is the vocab size and also the ouput size) self.W_final = init(self.init_method).initialize_weights( (self.h_units, input_dim)) # final output to nodes bias (input_dim is the vocab size and also the ouput size) self.b_final = np.zeros((input_dim, ))
def prep_layer(self): _, input_dim = self.input_shape z_dim = self.h_units + input_dim # concatenate (h_units, vocabulary_size) vector # gate weights self.W_input = init(self.init_method).initialize_weights( (z_dim, self.h_units)) self.W_forget = init(self.init_method).initialize_weights( (z_dim, self.h_units)) self.W_output = init(self.init_method).initialize_weights( (z_dim, self.h_units)) # gate bias self.b_input = np.zeros((self.h_units, )) self.b_forget = np.zeros((self.h_units, )) self.b_output = np.zeros((self.h_units, )) # cell weights self.W_cell = init(self.init_method).initialize_weights( (z_dim, self.h_units)) # cell bias self.b_cell = np.zeros((self.h_units, )) # final output weights self.W_final = init(self.init_method).initialize_weights( (self.h_units, input_dim)) # final output bias self.b_final = np.zeros((input_dim, ))
def prep_layer(self): _, input_dim = self.input_shape self.W_input = init(self.init_method).initialize_weights( (self.h_units, input_dim)) self.W_output = init(self.init_method).initialize_weights( (input_dim, self.h_units)) self.W_recur = init(self.init_method).initialize_weights( (self.h_units, self.h_units)) self.b_output = np.zeros((input_dim, )) self.b_input = np.zeros((self.h_units, ))
def run(self, f, df, params=1, epochs=10, tol=1e-4, verbose=False): self.inputs = init(self.init_method).initialize_weights((params, 1)) self.inputs *= 3 self.f0 = f(self.inputs) # initial function value (fsolve) self.df = df self.epochs = epochs self.tol = tol self.fsolve = np.zeros((self.epochs, 1)) self.weights = np.zeros((self.epochs, 1, params)) for i in np.arange(self.epochs): self.inputs = optimize(self.optimizer).update( self.inputs, self.df(self.inputs)) self.weights[i, :, :] = self.inputs.T f_solution = f(self.inputs) self.fsolve[i, :] = f_solution eps = self.f0 - f_solution if verbose: if i % 5 == 0: print('Epoch-{} weights: {:.20}'.format( i + 1, self.npstring(self.inputs.T))) print('Epoch-{} eps: {:.20}'.format( i + 1, self.npstring(eps)))
def __init__(self, epochs, loss = 'mean_squared_error', init_method = 'he_uniform', optimizer = {}, penalty = 'ridge', penalty_weight = 0.5, l1_ratio = 0.5): self.epochs = epochs self.loss = objective(loss) self.init_method = init(init_method) self.optimizer = optimize(optimizer) self.regularization = regularize(penalty, penalty_weight, l1_ratio = l1_ratio)
def __init__(self, epochs, loss='binary_crossentropy', init_method='he_normal', optimizer={}, penalty='lasso', penalty_weight=0, l1_ratio=0.5): self.epochs = epochs self.loss = objective(loss) self.init_method = init(init_method) self.optimizer = optimize(optimizer) self.activate = activation('sigmoid') self.regularization = regularize(penalty, penalty_weight, l1_ratio=l1_ratio)