def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False): GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) self.Z = Z self.num_inducing = Z.shape[0] self.likelihood = likelihood if X_variance is None: self.has_uncertain_inputs = False else: assert X_variance.shape == X.shape self.has_uncertain_inputs = True self.X_variance = X_variance if normalize_X: self.Z = (self.Z.copy() - self._Xoffset) / self._Xscale # normalize X uncertainty also if self.has_uncertain_inputs: self.X_variance /= np.square(self._Xscale)
def setstate(self, state): self.iterations = state.pop() self._permutation = state.pop() self.Y = state.pop() self._grad_trace = state.pop() self._ll_trace = state.pop() self._vb_steplength_trace = state.pop() self._param_steplength_trace = state.pop() self._param_trace = state.pop() self.data_prop = state.pop() self.momentum = state.pop() self.epochs = state.pop() self.batchsize = state.pop() self.batchcounter = state.pop() steplength_params = state.pop() (self.hbar_t, self.tau_t, self.gbar_t, self.gbar_t1, self.gbar_t2, self.hbar_tp, self.tau_tp, self.gbar_tp, self.adapt_param_steplength, self.adapt_vb_steplength, self.vb_steplength, self.param_steplength) = steplength_params self.X_variance_batch = state.pop() self.X_batch = state.pop() self.X_variance = state.pop() self.has_uncertain_inputs = state.pop() self.num_inducing = state.pop() self.Z = state.pop() vb_param = state.pop() GPBase.setstate(self, state) self.set_vb_param(vb_param)
def plot_f(self, samples=0, plot_limits=None, which_data_rows='all', which_data_ycols='all', which_parts='all', resolution=None, full_cov=False, fignum=None, ax=None): """ Plot the GP's view of the world, where the data is normalized and the - In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In two dimsensions, a contour-plot shows the mean predicted function - Not implemented in higher dimensions :param samples: the number of a posteriori samples to plot :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :param which_data_rows: which if the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice self.X, self.Y :param which_parts: which of the kernel functions to plot (additively) :type which_parts: 'all', or list of bools :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D :type resolution: int :param full_cov: :type full_cov: bool :param fignum: figure to plot on. :type fignum: figure number :param ax: axes to plot on. :type ax: axes handle :param output: which output to plot (for multiple output models only) :type output: integer (first output is 0) """ if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) if fignum is None and ax is None: fignum = fig.num if which_data_rows is 'all': which_data_rows = slice(None) GPBase.plot_f(self, samples=samples, plot_limits=plot_limits, which_data_rows=which_data_rows, which_data_ycols=which_data_ycols, which_parts=which_parts, resolution=resolution, fignum=fignum, ax=ax) if self.X.shape[1] == 1: if self.has_uncertain_inputs: Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], xerr=2 * np.sqrt(self.X_variance[which_data, 0]), ecolor='k', fmt=None, elinewidth=.5, alpha=.5) Zu = self.Z * self._Xscale + self._Xoffset ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) elif self.X.shape[1] == 2: Zu = self.Z * self._Xscale + self._Xoffset ax.plot(Zu[:, 0], Zu[:, 1], 'wo') else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
def getstate(self): steplength_params = [self.hbar_t, self.tau_t, self.gbar_t, self.gbar_t1, self.gbar_t2, self.hbar_tp, self.tau_tp, self.gbar_tp, self.adapt_param_steplength, self.adapt_vb_steplength, self.vb_steplength, self.param_steplength] return GPBase.getstate(self) + \ [self.get_vb_param(), self.Z, self.num_inducing, self.has_uncertain_inputs, self.X_variance, self.X_batch, self.X_variance_batch, steplength_params, self.batchcounter, self.batchsize, self.epochs, self.momentum, self.data_prop, self._param_trace, self._param_steplength_trace, self._vb_steplength_trace, self._ll_trace, self._grad_trace, self.Y, self._permutation, self.iterations ]
def __init__(self, X, likelihood, kernel, Z, q_u=None, batchsize=10, X_variance=None): GPBase.__init__(self, X, likelihood, kernel, normalize_X=False) self.batchsize=batchsize self.Y = self.likelihood.Y.copy() self.Z = Z self.num_inducing = Z.shape[0] self.batchcounter = 0 self.epochs = 0 self.iterations = 0 self.vb_steplength = 0.05 self.param_steplength = 1e-5 self.momentum = 0.9 if X_variance is None: self.has_uncertain_inputs = False else: self.has_uncertain_inputs = True self.X_variance = X_variance if q_u is None: q_u = np.hstack((np.random.randn(self.num_inducing*self.output_dim),-.5*np.eye(self.num_inducing).flatten())) self.set_vb_param(q_u) self._permutation = np.random.permutation(self.num_data) self.load_batch() self._param_trace = [] self._ll_trace = [] self._grad_trace = [] #set the adaptive steplength parameters self.hbar_t = 0.0 self.tau_t = 100.0 self.gbar_t = 0.0 self.gbar_t1 = 0.0 self.gbar_t2 = 0.0 self.hbar_tp = 0.0 self.tau_tp = 10000.0 self.gbar_tp = 0.0 self.adapt_param_steplength = True self.adapt_vb_steplength = True self._param_steplength_trace = [] self._vb_steplength_trace = [] self.ensure_default_constraints()
def getstate(self): """ Get the current state of the class, here just all the indices, rest can get recomputed """ return GPBase.getstate(self) + [ self.Z, self.num_inducing, self.has_uncertain_inputs, self.X_variance ]
def getstate(self): """ Get the current state of the class, here just all the indices, rest can get recomputed """ return GPBase.getstate(self) + [self.Z, self.num_inducing, self.has_uncertain_inputs, self.X_variance]
def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, fignum=None, ax=None): if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) if which_data is 'all': which_data = slice(None) GPBase.plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, ax=ax) if self.X.shape[1] == 1: if self.has_uncertain_inputs: Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], xerr=2 * np.sqrt(self.X_variance[which_data, 0]), ecolor='k', fmt=None, elinewidth=.5, alpha=.5) Zu = self.Z * self._Xscale + self._Xoffset ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) elif self.X.shape[1] == 2: Zu = self.Z * self._Xscale + self._Xoffset ax.plot(Zu[:, 0], Zu[:, 1], 'wo')
def plot(self, ax=None, fignum=None, Z_height=None, **kwargs): if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) #horrible hack here: data = self.likelihood.data.copy() self.likelihood.data = self.Y GPBase.plot(self, ax=ax, **kwargs) self.likelihood.data = data Zu = self.Z * self._Xscale + self._Xoffset if self.input_dim==1: ax.plot(self.X_batch, self.likelihood.data, 'gx',mew=2) if Z_height is None: Z_height = ax.get_ylim()[0] ax.plot(Zu, np.zeros_like(Zu) + Z_height, 'r|', mew=1.5, markersize=12) if self.input_dim==2: ax.scatter(self.X[:,0], self.X[:,1], 20., self.Y[:,0], linewidth=0, cmap=pb.cm.jet) ax.plot(Zu[:,0], Zu[:,1], 'w^')
def setstate(self, state): GPBase.setstate(self, state) self._set_params(self._get_params())
def setstate(self, state): self.X_variance = state.pop() self.has_uncertain_inputs = state.pop() self.num_inducing = state.pop() self.Z = state.pop() GPBase.setstate(self, state)
def __init__(self, X, likelihood, kernel, normalize_X=False): GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) self._set_params(self._get_params())
def plot(self, plot_limits=None, which_data_rows='all', which_data_ycols='all', which_parts='all', fixed_inputs=[], plot_raw=False, levels=20, samples=0, fignum=None, ax=None, resolution=None): """ Plot the posterior of the sparse GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In two dimsensions, a contour-plot shows the mean predicted function - In higher dimensions, use fixed_inputs to plot the GP with some of the inputs fixed. Can plot only part of the data and part of the posterior functions using which_data_rowsm which_data_ycols and which_parts :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :type plot_limits: np.array :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice self.X, self.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_rows: 'all' or a list of integers :param which_parts: which of the kernel functions to plot (additively) :type which_parts: 'all', or list of bools :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v. :type fixed_inputs: a list of tuples :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D :type resolution: int :param levels: number of levels to plot in a contour plot. :type levels: int :param samples: the number of a posteriori samples to plot :type samples: int :param fignum: figure to plot on. :type fignum: figure number :param ax: axes to plot on. :type ax: axes handle :type output: integer (first output is 0) :param linecol: color of line to plot. :type linecol: :param fillcol: color of fill :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure """ #deal work out which ax to plot on #Need these because we use which_data_rows in this function not just base if which_data_rows == 'all': which_data_rows = slice(None) if which_data_ycols == 'all': which_data_ycols = np.arange(self.output_dim) if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) #work out what the inputs are for plotting (1D or 2D) fixed_dims = np.array([i for i,v in fixed_inputs]) free_dims = np.setdiff1d(np.arange(self.input_dim),fixed_dims) #call the base plotting GPBase.plot(self, samples=samples, plot_limits=plot_limits, which_data_rows=which_data_rows, which_data_ycols=which_data_ycols, fixed_inputs=fixed_inputs, which_parts=which_parts, resolution=resolution, levels=20, fignum=fignum, ax=ax) if len(free_dims) == 1: #plot errorbars for the uncertain inputs if self.has_uncertain_inputs: Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now ax.errorbar(Xu[which_data_rows, 0], self.likelihood.data[which_data_rows, 0], xerr=2 * np.sqrt(self.X_variance[which_data_rows, 0]), ecolor='k', fmt=None, elinewidth=.5, alpha=.5) #plot the inducing inputs Zu = self.Z * self._Xscale + self._Xoffset ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) elif len(free_dims) == 2: Zu = self.Z * self._Xscale + self._Xoffset ax.plot(Zu[:, 0], Zu[:, 1], 'wo') else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
def __init__(self, X, likelihood, kernel, normalize_X=False): GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) self.update_likelihood_approximation()
def getstate(self): return GPBase.getstate(self)
def plot(self, plot_limits=None, which_data_rows='all', which_data_ycols='all', which_parts='all', fixed_inputs=[], plot_raw=False, levels=20, samples=0, fignum=None, ax=None, resolution=None): """ Plot the posterior of the sparse GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In two dimsensions, a contour-plot shows the mean predicted function - In higher dimensions, use fixed_inputs to plot the GP with some of the inputs fixed. Can plot only part of the data and part of the posterior functions using which_data_rowsm which_data_ycols and which_parts :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :type plot_limits: np.array :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice self.X, self.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_rows: 'all' or a list of integers :param which_parts: which of the kernel functions to plot (additively) :type which_parts: 'all', or list of bools :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v. :type fixed_inputs: a list of tuples :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D :type resolution: int :param levels: number of levels to plot in a contour plot. :type levels: int :param samples: the number of a posteriori samples to plot :type samples: int :param fignum: figure to plot on. :type fignum: figure number :param ax: axes to plot on. :type ax: axes handle :type output: integer (first output is 0) :param linecol: color of line to plot. :type linecol: :param fillcol: color of fill :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure """ #deal work out which ax to plot on #Need these because we use which_data_rows in this function not just base if which_data_rows == 'all': which_data_rows = slice(None) if which_data_ycols == 'all': which_data_ycols = np.arange(self.output_dim) if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) #work out what the inputs are for plotting (1D or 2D) fixed_dims = np.array([i for i, v in fixed_inputs]) free_dims = np.setdiff1d(np.arange(self.input_dim), fixed_dims) #call the base plotting GPBase.plot(self, samples=samples, plot_limits=plot_limits, which_data_rows=which_data_rows, which_data_ycols=which_data_ycols, fixed_inputs=fixed_inputs, which_parts=which_parts, resolution=resolution, levels=20, fignum=fignum, ax=ax) if len(free_dims) == 1: #plot errorbars for the uncertain inputs if self.has_uncertain_inputs: Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now ax.errorbar(Xu[which_data_rows, 0], self.likelihood.data[which_data_rows, 0], xerr=2 * np.sqrt(self.X_variance[which_data_rows, 0]), ecolor='k', fmt=None, elinewidth=.5, alpha=.5) #plot the inducing inputs Zu = self.Z * self._Xscale + self._Xoffset ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) elif len(free_dims) == 2: Zu = self.Z * self._Xscale + self._Xoffset ax.plot(Zu[:, 0], Zu[:, 1], 'wo') else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions"