def __init__(self, in_dim, h_dim, name=None, init='orthogonal', soft_init='normal'): if name is not None: self.set_name(name) self.name = name self.in_dim = in_dim self.h_dim = h_dim self.init = initializations.get(init) self.soft_init = initializations.get(soft_init) self.init_params()
def __init__(self, lid, bacth_size, num_lstm, epsilon=1e-10, axis=1, momentum=0.99, **kwargs): self.init = initializations.get("one") self.epsilon = epsilon self.batch_size = bacth_size self.axis = axis self.momentum = momentum self.num_lstm = num_lstm self.id = lid
def __init__(self, in_dim, name=None, init='uniform', activation='linear'): if name is not None: self.set_name(name) self.name = name self.in_dim = in_dim self.init = initializations.get(init) self.activation = activations.get(activation) self.init_params()
def __init__(self, in_dim, h_dim, ctx_dim, pctx_dim, name=None, selector=True, init='orthogonal', soft_init='normal'): if name is not None: self.set_name(name) self.in_dim = in_dim self.h_dim = h_dim self.pctx_dim = pctx_dim self.ctx_dim = ctx_dim self.init = initializations.get(init) self.soft_init = initializations.get(soft_init) self.selector = selector self.init_params()
def __init__(self, input_dim, output_dim, init='uniform', inner_init='orthogonal', activation='sigmoid', weights=None, truncate_gradient=-1, return_sequences=False): self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.input_dim = input_dim self.output_dim = output_dim self.truncate_gradient = truncate_gradient self.activation = activations.get(activation) self.return_sequences = return_sequences self.input = T.matrix() self.W = self.init((self.input_dim, self.output_dim)) self.U = self.init((self.output_dim, self.output_dim)) self.b = shared_zeros((self.output_dim)) self.params = [self.W, self.U, self.b] if weights is not None: self.set_weights(weights)
def __init__(self, input_shape, epsilon=1e-6, weights=None): self.init = initializations.get("uniform") self.input_shape = input_shape self.epsilon = epsilon self.gamma = self.init((self.input_shape)) self.beta = shared_zeros(self.input_shape) self.params = [self.gamma, self.beta] if weights is not None: self.set_weights(weights)
def __init__(self, input_dim, output_dim, init='uniform', weights=None): self.init = initializations.get(init) self.input_dim = input_dim self.output_dim = output_dim self.input = T.imatrix() self.W = self.init((self.input_dim, self.output_dim)) self.params = [self.W] if weights is not None: self.set_weights(weights)
def __init__(self, input_dim, output_dim=128, init='uniform', inner_init='orthogonal', activation='tanh', inner_activation='hard_sigmoid', truncate_gradient=-1, weights=None, return_sequences=False): self.input_dim = input_dim self.output_dim = output_dim self.truncate_gradient = truncate_gradient self.return_sequences = return_sequences self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.input = T.matrix() self.W_i = self.init((self.input_dim, self.output_dim)) self.U_i = self.inner_init((self.output_dim, self.output_dim)) self.b_i = shared_zeros((self.output_dim)) self.W_f = self.init((self.input_dim, self.output_dim)) self.U_f = self.inner_init((self.output_dim, self.output_dim)) self.b_f = shared_zeros((self.output_dim)) self.W_c = self.init((self.input_dim, self.output_dim)) self.U_c = self.inner_init((self.output_dim, self.output_dim)) self.b_c = shared_zeros((self.output_dim)) self.W_o = self.init((self.input_dim, self.output_dim)) self.U_o = self.inner_init((self.output_dim, self.output_dim)) self.b_o = shared_zeros((self.output_dim)) self.params = [ self.W_i, self.U_i, self.b_i, self.W_c, self.U_c, self.b_c, self.W_f, self.U_f, self.b_f, self.W_o, self.U_o, self.b_o, ] if weights is not None: self.set_weights(weights)
def __init__(self, input_dim, output_dim, init='uniform', activation='linear', weights=None): self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim self.output_dim = output_dim self.input = T.matrix() self.W = self.init((self.input_dim, self.output_dim)) self.b = shared_zeros((self.output_dim)) self.params = [self.W, self.b] if weights is not None: self.set_weights(weights)
def shared(shape, name): #{{{ """ Create a shared object of a numpy array. """ init = initializations.get('glorot_uniform') if len(shape) == 1: value = np.zeros(shape) # bias are initialized with zeros return theano.shared(value=value.astype(theano.config.floatX), name=name) else: drange = np.sqrt(6. / (np.sum(shape))) value = drange * np.random.uniform(low=-1.0, high=1.0, size=shape) return init(shape=shape, name=name)
def __init__(self, in_dim, out_dim, name=None, init='xavier', activation='relu'): if name is not None: self.set_name(name) self.name = name self.in_dim = in_dim self.out_dim = out_dim self.init = initializations.get(init) self.activation = activations.get(activation) self.init_params()
def __init__(self, input_n, output_n, init='glorot_uniform', activation='linear'): super(FullyConnected, self).__init__() self.input_n = input_n self.output_n = output_n self.init = initializations.get(init) self.activation = activations.get(activation) self.input = T.matrix() self.W = self.init((self.input_n, self.output_n)) self.b = shared_zeros((self.output_n)) self.params = [self.W, self.b]
def __init__(self, lid, bacth_size, num_lstm, epsilon=1e-10, axis=1, momentum=0.99, **kwargs): self.init = initializations.get("one") self.epsilon = epsilon self.batch_size = bacth_size self.axis = axis self.momentum = momentum self.num_lstm = num_lstm self.id = lid
def __init__(self, nb_filter, stack_size, nb_row, nb_col, init='uniform', activation='linear', weights=None, image_shape=None, border_mode='valid', subsample=(1,1)): self.init = initializations.get(init) self.activation = activations.get(activation) self.subsample = subsample self.border_mode = border_mode self.image_shape = image_shape self.input = T.tensor4() self.W_shape = (nb_filter, stack_size, nb_row, nb_col) self.W = self.init(self.W_shape) self.b = shared_zeros((nb_filter,)) self.params = [self.W, self.b] if weights is not None: self.set_weights(weights)
from collections import OrderedDict from scipy import io as sio from sklearn.decomposition import PCA def unzip(zipped): """ When we pickle the model. Needed for the GPU stuff. """ new_params = OrderedDict() for kk, vv in zipped.items(): new_params[kk] = vv.get_value() return new_params f_init_ = initializations.get('glorot_uniform') f_init = initializations_2.get('glorot_uniform') f_init2 = initializations_2.get('uniform') f_inner_init = initializations.get('orthogonal') f_forget_bias_init = initializations.get('one') def init_params(options): dim_frame = options['dim_frame'] att_frame = options['att_frame'] steps = options['steps'] hidden_dim = options['hidden_dim'] # batch_size = options['batch_size'] params = OrderedDict()
regularizers = [] return regularizers def get_updates(self): if hasattr(self, 'bn'): updates = self.bn.updates #print 'has updates',len(updates) else: #print 'no updates' updates = [] return updates f_init = initializations.get('glorot_uniform') f_init2 = initializations.get('uniform2') f_inner_init = initializations.get('orthogonal') f_forget_bias_init = initializations.get('one') class LstmParams(object): def __init__(self, num_lstm, dim_frame): self.num_lstm = num_lstm self.dim_frame = dim_frame self.W_in_to_ingate = f_init((self.dim_frame, self.num_lstm)) self.W_in_to_forgetgate = f_init((self.dim_frame, self.num_lstm)) self.W_in_to_cell = f_init((self.dim_frame, self.num_lstm)) self.W_in_to_outgate = f_init((self.dim_frame, self.num_lstm))