def __init__(self, sf0, ell0, Z0, U0, whiten=False, summ=False, fix_ell=True, fix_sf=True, fix_Z=True, fix_U=False): with tf.name_scope('Brownian'): Zg = Param(Z0, name="Z", summ=False, fixed=fix_Z) Ug = Param(U0, name="U", summ=False, fixed=fix_U) self.kern = OperatorKernel(sf0=sf0, ell0=ell0, ktype="id", name='Kernel', summ=summ, fix_ell=fix_ell, fix_sf=fix_sf) self.Zg = Zg() self.Ug = Ug() self.jitter = 1e-6 self.whiten = whiten self.fix_Z = fix_Z self.fix_U = fix_U
def __init__(self, X, Y, kern, likelihood, mean_function=Zero(), num_latent=None): """ X is a data matrix, size N x D Y is a data matrix, size N x R kern, likelihood, mean_function are appropriate GPflow objects This is the variational objective for the Variational Gaussian Process (VGP). The key reference is @article{Opper:2009, title = {The Variational Gaussian Approximation Revisited}, author = {Opper, Manfred and Archambeau, C{\'e}dric}, journal = {Neural Comput.}, year = {2009}, pages = {786--792}, } The idea is that the posterior over the function-value vector F is approximated by a Gaussian, and the KL divergence is minimised between the approximation and the posterior. It turns out that the optimal posterior precision shares off-diagonal elements with the prior, so only the diagonal elements of the prcision need be adjusted. The posterior approximation is q(f) = N(f | K alpha, [K^-1 + diag(square(lambda))]^-1) """ GPModel.__init__(self, X, Y, kern, likelihood, mean_function) self.num_data = X.shape[0] self.num_latent = num_latent or Y.shape[1] self.q_alpha = Param(np.zeros((self.num_data, self.num_latent))) self.q_lambda = Param(np.ones((self.num_data, self.num_latent)), transforms.positive)
def __init__(self, X, Y, kern, likelihood, Z, mean_function=Zero(), num_latent=None, q_diag=False, whiten=True): """ X is a data matrix, size N x D Y is a data matrix, size N x R kern, likelihood, mean_function are appropriate GPflow objects Z is a matrix of pseudo inputs, size M x D num_latent is the number of latent process to use, default to Y.shape[1] q_diag is a boolean. If True, the covariance is approximated by a diagonal matrix. whiten is a boolean. It True, we use the whitened represenation of the inducing points. """ GPModel.__init__(self, X, Y, kern, likelihood, mean_function) self.q_diag, self.whiten = q_diag, whiten self.Z = Param(Z) self.num_latent = num_latent or Y.shape[1] self.num_inducing = Z.shape[0] self.q_mu = Param(np.zeros((self.num_inducing, self.num_latent))) if self.q_diag: self.q_sqrt = Param(np.ones((self.num_inducing, self.num_latent)), transforms.positive) else: self.q_sqrt = Param( np.array([ np.eye(self.num_inducing) for _ in range(self.num_latent) ]).swapaxes(0, 2))
def InitUI(self): self.SetTitle('ResVisard') self.Centre() self.Show(True) self.Maximize(True) self.dx, self.dy = wx.DisplaySize() # returns a tuple ''' Display size logic | 20 | ..... | 20 | 250 | 20 | | Pad | Plot | Pad | SL | 20 | ''' self.pnl_param = Param(self, (1000, 20), (350, self.dy)) self.xfig, self.yfig = 900, self.dy - 40 self.pnl_plot = wx.Panel( self, pos=(20, 20), size=(self.xfig, self.yfig)) #,size=(self.dx-180,self.dy-40)) # self.pnl_plot.SetForegroundColour('Blue') self.fitb = wx.Button(self, pos=(930, 20), size=(60, self.dy / 2 - 100), style=wx.BU_EXACTFIT, label='FIT') self.fitb.Bind(wx.EVT_BUTTON, self.on_fit) self.resetb = wx.Button(self, pos=(930, self.dy / 2 - 80), size=(60, self.dy / 2 - 100), style=wx.BU_EXACTFIT, label='RESET') self.resetb.Bind(wx.EVT_BUTTON, self.on_reset)
def __init__(self, model_path): Param.load(self, model_path / 'tagger_defs.txt') self.extractor = FeatureExtractor(model_path) self.in_dim = self.word_dim + self.char_dim super(BiaffineJaLSTMParser, self).__init__(emb_word=L.EmbedID(self.n_words, self.word_dim), emb_char=L.EmbedID(self.n_chars, 50, ignore_label=IGNORE), conv_char=L.Convolution2D(1, self.char_dim, (3, 50), stride=1, pad=(1, 0)), lstm_f=L.NStepLSTM(self.nlayers, self.in_dim, self.hidden_dim, 0.32), lstm_b=L.NStepLSTM(self.nlayers, self.in_dim, self.hidden_dim, 0.32), arc_dep=L.Linear(2 * self.hidden_dim, self.dep_dim), arc_head=L.Linear(2 * self.hidden_dim, self.dep_dim), rel_dep=L.Linear(2 * self.hidden_dim, self.dep_dim), rel_head=L.Linear(2 * self.hidden_dim, self.dep_dim), biaffine_arc=Biaffine(self.dep_dim), biaffine_tag=L.Bilinear(self.dep_dim, self.dep_dim, len(self.targets)))
def __init__(self, lancer_exp=True, MatCode=False, db="Points", defaultPoint="Point0", setTimer=True): # Initialisation variables self.db = filedb.fileDB(db=db) self.__lastpoint = Point.get_db_point(defaultPoint, self.db) self.__com = Communication('/dev/ttyACM0') self.__Oparam = Param() self.__Oparam.config() self.__Oparam.calib() self.__side = Switch.cote() if not self.__side: self.__lastpoint.mirror() self.__move = Move(self.__Oparam.odrv0) self.__MatCode = MatCode self.__traj = Trajectoire(param=self.__Oparam, move=self.__move, initial_point=self.__lastpoint, Solo=self.__MatCode) self.__com.waitEndMove(Communication.MSG["Initialisation"]) if setTimer: self.__lidar = RPLidar( '/dev/ttyUSB0') #self.__lidar = Lidar('/dev/ttyUSB0') self.__timer = RIR_timer( self.__com, (self.__Oparam, self.__move), self.__lidar, lancer_exp) # Test: placé avant __init_physical self.__lidar.start_motor() self.set_ready()
def __init__(self, input_dim, variance=1.0, lengthscales=None, active_dims=None, ARD=False): """ input_dim is the dimension of the input to the kernel variance is the (initial) value for the variance parameter lengthscales is the initial value for the lengthscales parameter --defaults to 1.0 (ARD=False) or np.ones(input_dim) (ARD=True). active_dims is a list of length input_dim which controls thwich columns of X are used. ARD specified whether the kernel has one lengthscale per dimension (ARD=True) or a single lengthscale (ARD=False). """ Kern.__init__(self, input_dim, active_dims) self.variance = Param(variance, transforms.positive) if ARD: if lengthscales is None: lengthscales = np.ones(input_dim) else: lengthscales = lengthscales * np.ones( input_dim) # accepts float or array self.lengthscales = Param(lengthscales, transforms.positive) self.ARD = True else: if lengthscales is None: lengthscales = 1.0 self.lengthscales = Param(lengthscales, transforms.positive) self.ARD = False
def __init__(self, x0, t, Y, Z0, U0, sn0, kern, jitter=jitter0, summ=False, whiten=True, fix_Z=False, fix_U=False, fix_sn=False): """ Constructor for the NPODE model Args: x0: Numpy matrix of size TxD of initial values. T is the number of input sequences and D is the problem dimensionality. t: Python array of T numpy vectors storing observation times Y: Python array of T numpy matrices storing observations. Observations are stored in rows. Z0: Numpy matrix of initial inducing points of size MxD, M being the number of inducing points. U0: Numpy matrix of initial inducing vectors of size MxD, M being the number of inducing points. sn0: Numpy vector of size 1xD for initial signal variance kern: Kernel object for GP interpolation jitter: Float of jitter level whiten: Boolean. Currently we perform the optimization only in the white domain summ: Boolean for Tensorflow summary fix_Z: Boolean - whether inducing locations are fixed or optimized fix_U: Boolean - whether inducing vectors are fixed or optimized fix_sn: Boolean - whether noise variance is fixed or optimized """ self.name = 'npode' self.whiten = whiten self.kern = kern self.jitter = jitter with tf.name_scope("NPDE"): Z = Param(Z0, name="Z", summ=False, fixed=fix_Z) U = Param(U0, name="U", summ=False, fixed=fix_U) sn = Param(np.array(sn0), name="sn", summ=summ, fixed=fix_sn, transform=transforms.Log1pe()) self.Z = Z() self.U = U() self.sn = sn() self.D = U.shape[1] self.x0 = x0 self.t = t self.Y = Y self.integrator = ODERK4(self, x0, t)
def test(self, data, numbers): params = Param().convert_param(numbers) model = self.design_model(data, params) self.test_model(data, model) print('') print('best params : %s' % str(numbers)) print('') for k, v in sorted(params.items()): print('%-13s: %s' % (k, v))
def __init__(self, A=np.ones((1, 1)), b=np.zeros(1)): """ A is a matrix which maps each element of X to Y, b is an additive constant. If X has N rows and D columns, and Y is intended to have Q columns, then A must be D x Q, b must be a vector of length Q. """ MeanFunction.__init__(self) self.A = Param(np.atleast_2d(A)) self.b = Param(b)
def get_tp(self): tp_f1 = Param(self.obj_tp_f1, self.sc_dist) # tp_f1_ft_result = tp_f1.get_ft_results() # print('tp_f1_ft_result', tp_f1_ft_result) tp_f1_alog_ys = tp_f1.get_alog_y() # print('tp_f1_alog_ys', tp_f1_alog_ys) tp_f2 = Param(self.obj_tp_f2, self.sc_dist) # tp_f2_ft_result = tp_f2.get_ft_results() # print('tp_f2_ft_result', tp_f2_ft_result) tp_f2_alog_ys = tp_f2.get_alog_y() # print('tp_f2_alog_ys', tp_f2_alog_ys) tp_f3 = Param(self.obj_tp_f3, self.sc_dist) # tp_f3_ft_result = tp_f3.get_ft_results() # print('tp_f3_ft_result', tp_f3_ft_result) tp_f3_alog_ys = tp_f3.get_alog_y() # print('tp_f3_alog_ys', tp_f3_alog_ys) sum_tp_f = [] idx_cnt = 0 tp_arr = [] while idx_cnt < 2: sum_tp_f.append(tp_f1_alog_ys[idx_cnt] + tp_f2_alog_ys[idx_cnt] + tp_f3_alog_ys[idx_cnt]) if sum_tp_f[idx_cnt] == 0: raise SystemExit("ERROR: sum of Is anti log y are 0") else: tps = sum_tp_f[idx_cnt] * math.pow(self.TNT_EQ_WT, 0.3333) tps = round(tps, 3) tp_arr.append(tps) idx_cnt = idx_cnt + 1 return tp_arr
def build_param_store(self): # self.track.mixer_device.volume.add_value_listener(self.on_volume_changed) # self.track.mixer_device.panning.add_value_listener(self.on_panning_changed) for device in self.track.devices: if device.class_name != "Looper": for param in device.parameters: self.params.append(Param(self.parent, param, self.song)) self.params.append( Param(self.parent, self.track.mixer_device.panning, self.song)) self.params.append( Param(self.parent, self.track.mixer_device.volume, self.song)) for send in self.track.mixer_device.sends: self.params.append(Param(self.parent, send, self.song))
def __init__(self, pi=None, learnPi=False, variance=1.0, name='SpikeAndSlabPrior', **kw): super(SpikeAndSlabPrior, self).__init__(name=name, **kw) self.variance = Param('variance', variance) self.learnPi = learnPi if learnPi: self.pi = Param('Pi', pi, Logistic(1e-10, 1. - 1e-10)) else: self.pi = Param('Pi', pi, __fixed__) self.link_parameter(self.pi)
def __init__(self, Z0, U0, sn0, kern, jitter=jitter0, summ=False, whiten=True, fix_Z=False, fix_U=False, fix_sn=False): """ Constructor for the NPODE model Args: Z0: Numpy matrix of initial inducing points of size MxD, M being the number of inducing points. U0: Numpy matrix of initial inducing vectors of size MxD, M being the number of inducing points. sn0: Numpy vector of size 1xD for initial signal variance kern: Kernel object for GP interpolation jitter: Float of jitter level whiten: Boolean. Currently we perform the optimization only in the white domain summ: Boolean for Tensorflow summary fix_Z: Boolean - whether inducing locations are fixed or optimized fix_U: Boolean - whether inducing vectors are fixed or optimized fix_sn: Boolean - whether noise variance is fixed or optimized """ self.name = 'npode' self.whiten = whiten self.kern = kern self.jitter = jitter with tf.name_scope("NPDE"): Z = Param(Z0, name="Z", summ=False, fixed=fix_Z) U = Param(U0, name="U", summ=False, fixed=fix_U) sn = Param(np.array(sn0), name="sn", summ=summ, fixed=fix_sn, transform=transforms.Log1pe()) self.Z = Z() self.U = U() self.sn = sn() self.D = U.shape[1] self.integrator = ODERK4(self) self.fix_Z = fix_Z self.fix_sn = fix_sn self.fix_U = fix_U
def get_ps(self): ps = Param(self.obj_ps, self.sc_dist) ps_ft_results = ps.get_ft_results() # print('ps ft result', ps_ft_results) ps_alog_ys = ps.get_alog_y() # print('ps_alog_ys', ps_alog_ys) ps_arr = [] for idx, ft_result in enumerate(ps_ft_results): if ft_result == 0: raise SystemExit("filtered result: 0") else: p_s = ps_alog_ys[idx] p_s = round(p_s, 3) ps_arr.append(p_s) return ps_arr
def __init__(self, input_dim, variance=1.0, active_dims=None, ARD=False): """ input_dim is the dimension of the input to the kernel variance is the (initial) value for the variance parameter(s) -- if ARD=True, there is one variance per input active_dims is a list of length input_dim which controls which columns of X are used. """ Kern.__init__(self, input_dim, active_dims) self.ARD = ARD if ARD: self.variance = Param( np.ones(self.input_dim) * variance, transforms.positive) else: self.variance = Param(variance, transforms.positive) self.parameters = [self.variance]
def __init__(self, means=None, variances=None, name='latent space', *a, **kw): super(VariationalPosterior, self).__init__(name=name, *a, **kw) self.mean = Param("mean", means) self.variance = Param("variance", variances, Logexp()) self.ndim = self.mean.ndim self.shape = self.mean.shape self.num_data, self.input_dim = self.mean.shape self.link_parameters(self.mean, self.variance) self.num_data, self.input_dim = self.mean.shape if self.has_uncertain_inputs(): assert self.variance.shape == self.mean.shape, "need one variance per sample and dimenion"
def __init__(self, means, variances, binary_prob, name='latent space'): """ binary_prob : the probability of the distribution on the slab part. """ super(SpikeAndSlabPosterior, self).__init__(means, variances, name) self.gamma = Param("binary_prob", binary_prob) self.link_parameter(self.gamma)
def deserialise(self, t): if not type(t) == dict or not CLASS_KEY in t: return t if JSON_DEBUG: print(type(t)) print("deserialise to ( %s )" % ((t[CLASS_KEY]))) print("keys: %s" % t.keys()) c = t[CLASS_KEY] for i in t.keys(): if t[i] == dict: t[i] = self.deserialise_map(t[i]) # instantiation rules if c == str(Param): return Param(t) if c == str(Command): return Command(t) if c == str(CommandMode): return CommandMode(t) if c == str(ParamTree): return ParamTree(t) if c == str(CommandTree): return CommandTree(t) raise TypeError("No instantiation rule for %s %s" % (CLASS_KEY, c))
def get_ta(self): ta = Param(self.obj_ta, self.sc_dist) # ta_ft_result = ta.get_ft_results() # print('ta_ft_result', ta_ft_result) ta_alog_ys = ta.get_alog_y() # print('ta_alog_ys', ta_alog_ys) ta_arr = [] for idx, ft_result in enumerate(ta_alog_ys): if ft_result == 0: raise SystemExit("filtered result: 0") else: ta = ta_alog_ys[idx] * math.pow(self.TNT_EQ_WT, 0.3333) ta = round(ta, 3) ta_arr.append(ta) return ta_arr
def get_pr(self): pr = Param(self.obj_pr, self.sc_dist) # pr_ft_result = pr.get_ft_results() # print('pr ft result', pr_ft_result) pr_alog_ys = pr.get_alog_y() # print('pr_alog_ys', pr_alog_ys) pr_arr = [] for idx, ft_result in enumerate(pr_alog_ys): if ft_result == 0: raise SystemExit("filtered result: 0") else: p_r = pr_alog_ys[idx] p_r = round(p_r, 3) pr_arr.append(p_r) return pr_arr
def get_ir(self): ir = Param(self.obj_ir, self.sc_dist) # ir_ft_result = ir.get_ft_results() # print('ir_ft_result', ir_ft_result) ir_alog_ys = ir.get_alog_y() # print('ir_alog_ys', ir_alog_ys) ir_arr = [] for idx, ft_result in enumerate(ir_alog_ys): if ft_result == 0: raise SystemExit("filtered result: 0") else: i_r = ir_alog_ys[idx] * math.pow(self.TNT_EQ_WT, 0.3333) i_r = round(i_r, 3) ir_arr.append(i_r) return ir_arr
def get_u(self): u = Param(self.obj_u, self.sc_dist) # u_ft_result = u.get_ft_results() # print('u_ft_result', u_ft_result) u_alog_ys = u.get_alog_y() # print('u_alog_ys', u_alog_ys) u_arr = [] for idx, ft_result in enumerate(u_alog_ys): if ft_result == 0: raise SystemExit("filtered result: 0") else: u = u_alog_ys[idx] u = round(u, 3) u_arr.append(u) return u_arr
def __init__(self, model_path): Param.load(self, model_path / 'tagger_defs.txt') self.extractor = FeatureExtractor(model_path, length=True) self.in_dim = self.word_dim + 8 * self.afix_dim super(FastBiaffineLSTMParser, self).__init__( emb_word=L.EmbedID(self.n_words, self.word_dim, ignore_label=IGNORE), emb_suf=L.EmbedID(self.n_suffixes, self.afix_dim, ignore_label=IGNORE), emb_prf=L.EmbedID(self.n_prefixes, self.afix_dim, ignore_label=IGNORE), lstm_f=FixedLengthNStepLSTM(self.nlayers, self.in_dim, self.hidden_dim, 0.32), lstm_b=FixedLengthNStepLSTM(self.nlayers, self.in_dim, self.hidden_dim, 0.32), arc_dep=Linear(2 * self.hidden_dim, self.dep_dim), arc_head=Linear(2 * self.hidden_dim, self.dep_dim), rel_dep=Linear(2 * self.hidden_dim, self.dep_dim), rel_head=Linear(2 * self.hidden_dim, self.dep_dim), biaffine_arc=Biaffine(self.dep_dim), biaffine_tag=Bilinear(self.dep_dim, self.dep_dim, len(self.targets)))
def train(self, data, numbers): params = Param().convert_param(numbers) model = self.design_model(data, params) log = self.train_model(data, model, params, save_model=True, log_train=True)
def get_population(self): # Make population pop = [] for i in range(N_POP): ind = Param().make_param(N_HIDDEN_LAYER) pop.append(ind) return pop
def __init__(self,sf0,ell0,name="kernel",learning_rate=0.01, summ=False,fix_sf=False,fix_ell=False): with tf.name_scope(name): sf = Param(sf0, transform=transforms.Log1pe(), name="sf", learning_rate = learning_rate, summ = summ, fixed = fix_sf) ell = Param(ell0, transform=transforms.Log1pe(), name="ell", learning_rate = learning_rate, summ = summ, fixed = fix_ell) self.sf = sf() self.ell = ell() self.fix_sf = fix_sf self.fix_ell = fix_ell
def main(self, data, numbers): start = time.time() params = Param().convert_param(numbers) model = self.design_model(data, params) log = self.train_model(data, model, params) end = time.time() time_cost = (end - start) / 60 return log['test_accuracy'], time_cost
def _removeTieParam(self, idx): """idx within tied_param""" new_buf = np.empty((self.tied_param.size - len(idx), )) bool_list = np.ones((self.tied_param.size, ), dtype=np.bool) bool_list[idx] = False new_buf[:] = self.tied_param.param_array[bool_list] self.remove_parameter(self.tied_param) self.tied_param = Param('tied', new_buf) self.add_parameter(self.tied_param) buf_idx_new = self._highest_parent_._raveled_index_for(self.tied_param) self._shrink_label_buf(self.buf_idx, buf_idx_new, bool_list) self.buf_idx = buf_idx_new
def get_is(self): is_f1 = Param(self.obj_is_f1, self.sc_dist) # is_f1_ft_result = is_f1.get_ft_results() # print('is_f1 ft result', is_f1_ft_result) is_f1_alog_ys = is_f1.get_alog_y() # print('is_f1_alog_ys', is_f1_alog_ys) is_f2 = Param(self.obj_is_f2, self.sc_dist) # is_f2_ft_result = is_f2.get_ft_results() # print('is_f2 ft result', is_f2_ft_result) is_f2_alog_ys = is_f2.get_alog_y() # print('is_f2_alog_ys', is_f2_alog_ys) sum_is_f = [] idx_cnt = 0 is_arr = [] while idx_cnt < 2: sum_is_f.append(is_f1_alog_ys[idx_cnt] + is_f2_alog_ys[idx_cnt]) if sum_is_f[idx_cnt] == 0: raise SystemExit("ERROR: sum of Is anti log y are 0") else: iss = sum_is_f[idx_cnt] * math.pow(self.TNT_EQ_WT, 0.3333) iss = round(iss, 3) is_arr.append(iss) idx_cnt = idx_cnt + 1 return is_arr
def parse_schema(self, schema): for field in schema: param = Param() param.name = field['name'] param.type = field['type'] if 'required' in field: param.required = field['required'] if "allowed_values" in field: param.allowed_values = field['allowed_values'] if "min_value" in field: param.min_value = field['min_value'] if "max_value" in field: param.max_value = field['max_value'] if "regex" in field: param.regex = field['regex'] self.params.append(param)