def mutate(self): #Mutate sigma self.sigma[0] = self.sigma[0] * math.exp(self.tau_prim * norm.rvs() + \ self.tau * norm.rvs()); #Mutate l new_l = self.genes[0] + norm.rvs(scale=self.sigma[0]) if not self.lsup and not self.linf: self.genes[0] = new_l elif not self.lsup: if new_l < self.linf: self.genes[0] = self.linf else: self.genes[0] = new_l elif not self.linf: if new_l > self.lsup: self.genes[0] = self.lsup else: self.genes[0] = new_l else: if new_l <= self.lsup and new_q >= self.linf: self.genes[0] = new_l elif new_l > self.lsup: self.genes[0] = self.lsup elif new_l < self.linf: self.genes[0] = self.linf
def mutate(self): #Mutate sigma random_number = norm.rvs() for i in range(len(self.sigma)): self.sigma[i] = self.sigma[i] * math.exp(self.tau_prim * \ random_number + self.tau * norm.rvs()) #Mutate gammas for i in range(len(self.genes)): new_gamma = self.genes[i] + norm.rvs(scale=self.sigma[i]) if not self.lsup and not self.linf: self.genes[i] = new_gamma elif not self.lsup: if new_gamma < self.linf: self.genes[i] = self.linf else: self.genes[i] = new_gamma elif not self.linf: if new_gamma > self.lsup: self.genes[i] = self.lsup else: self.genes[i] = new_gamma else: if new_gamma <= self.lsup and new_q >= self.linf: self.genes[i] = new_gamma elif new_l > self.lsup: self.genes[i] = self.lsup elif new_l < self.linf: self.genes[i] = self.linf
def mutate(self): #Mutate sigma random_number = norm.rvs() for i in range(len(self.sigma)): self.sigma[i] = self.sigma[i] * math.exp(self.tau_prim * \ random_number + self.tau * norm.rvs()); #Mutate gammas for i in range(len(self.genes)): new_gamma = self.genes[i] + norm.rvs(scale=self.sigma[i]) if not self.lsup and not self.linf: self.genes[i] = new_gamma elif not self.lsup: if new_gamma < self.linf: self.genes[i] = self.linf else: self.genes[i] = new_gamma elif not self.linf: if new_gamma > self.lsup: self.genes[i] = self.lsup else: self.genes[i] = new_gamma else: if new_gamma <= self.lsup and new_q >= self.linf: self.genes[i] = new_gamma elif new_l > self.lsup: self.genes[i] = self.lsup elif new_l < self.linf: self.genes[i] = self.linf
def mutate(self): #Mutate sigma self.sigma[0] = self.sigma[0] * math.exp(self.tau_prim * norm.rvs() + \ self.tau * norm.rvs()) #Mutate l new_l = self.genes[0] + norm.rvs(scale=self.sigma[0]) if not self.lsup and not self.linf: self.genes[0] = new_l elif not self.lsup: if new_l < self.linf: self.genes[0] = self.linf else: self.genes[0] = new_l elif not self.linf: if new_l > self.lsup: self.genes[0] = self.lsup else: self.genes[0] = new_l else: if new_l <= self.lsup and new_q >= self.linf: self.genes[0] = new_l elif new_l > self.lsup: self.genes[0] = self.lsup elif new_l < self.linf: self.genes[0] = self.linf
def _gen_noisy_sequence(self, pattern): source = np.zeros((np.shape(pattern)[0], np.shape(pattern)[1], np.shape(pattern)[2], self.inputs.number_of_blocks * 2)) #g = self._make_gaussian(self.inputs.fwhm * 2) for i in range(self.inputs.number_of_blocks): source[:, :, :, i] = gaussian_filter(norm.rvs(size=np.shape(pattern)) + pattern, self.inputs.sigma) source[:, :, :, i + self.inputs.number_of_blocks] = gaussian_filter(norm.rvs(size=np.shape(pattern)), self.inputs.sigma); return (source - source.min())*100 + 1750
def _gen_noisy_sequence(self, pattern): source = np.zeros( (np.shape(pattern)[0], np.shape(pattern)[1], np.shape(pattern)[2], self.inputs.number_of_blocks * 2)) #g = self._make_gaussian(self.inputs.fwhm * 2) for i in range(self.inputs.number_of_blocks): source[:, :, :, i] = gaussian_filter( norm.rvs(size=np.shape(pattern)) + pattern, self.inputs.sigma) source[:, :, :, i + self.inputs.number_of_blocks] = gaussian_filter( norm.rvs(size=np.shape(pattern)), self.inputs.sigma) return (source - source.min()) * 100 + 1750
def mutate(self): #Mutate sigma self.sigma[0] = self.sigma[0] * math.exp(self.tau_prim * norm.rvs() + \ self.tau * norm.rvs()) #Mutate h alpha = norm.rvs(scale=self.sigma[0]) inc = 0 if alpha < -self.sigma[0]: inc = -1 elif alpha > self.sigma[0]: inc = 1 new_h = self.genes[0] + inc if new_h >= self.linf and new_h <= self.lsup: self.genes[0] = new_h
def mutate(self): #Mutate sigma self.sigma[0] = self.sigma[0] * math.exp(self.tau_prim * norm.rvs() + \ self.tau * norm.rvs()); #Mutate h alpha = norm.rvs(scale=self.sigma[0]) inc = 0 if alpha < -self.sigma[0]: inc = -1 elif alpha > self.sigma[0]: inc = 1 new_h = self.genes[0] + inc if new_h >= self.linf and new_h <= self.lsup: self.genes[0] = new_h
def __random_gen(self): if self.lsup == None and self.linf == None: return norm.rvs() elif self.lsup == None: return uniform.rvs(self.linf, 2.0*(self.linf+1)) elif self.linf == None: return uniform.rvs(self.lsup - self.lsup, self.lsup) return uniform.rvs(self.linf, self.lsup)
def test_model2_table(self): """Tests model2 table is created correctly.""" # seed R at 0 by altering the command str exp = array([[33.,19.,53.,32.,4.,14.,17.,10.,9.,7.], [17.,29.,10.,34.,7.,30.,5.,14.,68.,51.], [10.,19.,16.,21.,54.,33.,43.,26.,12.,17.], [40.,33.,21.,13.,35.,23.,35.,50.,11.,25.]]) obs = model2_table([1,1.1,1.4,1.5],10,100,10) assert_array_almost_equal(exp, obs) # test with a random array to make sure sequencing depth (col sums) are # preserved obs = model2_table(abs(norm.rvs(size=10)),50,1000,100) self.assertTrue(all(obs.sum(0)==1000))
def test_model2_table(self): """Tests model2 table is created correctly.""" # seed R at 0 by altering the command str exp = array([[33., 19., 53., 32., 4., 14., 17., 10., 9., 7.], [17., 29., 10., 34., 7., 30., 5., 14., 68., 51.], [10., 19., 16., 21., 54., 33., 43., 26., 12., 17.], [40., 33., 21., 13., 35., 23., 35., 50., 11., 25.]]) obs = model2_table([1, 1.1, 1.4, 1.5], 10, 100, 10) assert_array_almost_equal(exp, obs) # test with a random array to make sure sequencing depth (col sums) are # preserved obs = model2_table(abs(norm.rvs(size=10)), 50, 1000, 100) self.assertTrue(all(obs.sum(0) == 1000))
def run(self, specification, coefficients, dataset, index=None, chunk_specification=None, years = 4, data_objects=None, run_config=None, debuglevel=0): """ For info on the arguments see RegressionModel. """ if data_objects is not None: self.dataset_pool.add_datasets_if_not_included(data_objects) if self.filter_attribute <> None: res = Resources({"debug":debuglevel}) index = dataset.get_filtered_index(self.filter_attribute, threshold=0, index=index, dataset_pool=self.dataset_pool, resources=res) init_outcome = RegressionModelWithAdditionInitialResiduals.run(self, specification, coefficients, dataset, index, chunk_specification=chunk_specification, run_config=run_config, debuglevel=debuglevel) initial_error_name = "_init_error_%s" % self.outcome_attribute.get_alias() initial_error = dataset[initial_error_name][index] mean = init_outcome - initial_error rmse = dataset.compute_variables("paris.establishment.rmse_ln_emp_ratio") _epsilon = norm.rvs(location=0, scale=rmse) / years # convert lump prediction to annual prediction _epsilon_name = "_epsilon_%s" % self.outcome_attribute.get_alias() if _epsilon_name not in dataset.get_known_attribute_names(): dataset.add_primary_attribute(name=_epsilon_name, data=zeros(dataset.size(), dtype="float32")) dataset.set_values_of_one_attribute(_epsilon_name, _epsilon, index) outcome = mean + _epsilon[index] if (outcome == None) or (outcome.size <= 0): return outcome if index == None: index = arange(dataset.size()) if re.search("^ln_", self.outcome_attribute.get_alias()): # if the outcome attr. name starts with 'ln_' # the results will be exponentiated. outcome_attribute_name = self.outcome_attribute.get_alias()[3:len(self.outcome_attribute.get_alias())] outcome = exp(outcome) else: outcome_attribute_name = self.outcome_attribute.get_alias() if outcome_attribute_name in dataset.get_known_attribute_names(): values = dataset.get_attribute(outcome_attribute_name).copy() dataset.delete_one_attribute(outcome_attribute_name) else: values = zeros(dataset.size(), dtype='f') values[index] = outcome.astype(values.dtype) dataset.add_primary_attribute(name=outcome_attribute_name, data=values) self.correct_infinite_values(dataset, outcome_attribute_name, clip_all_larger_values=True) return outcome
def __mutation(self, factor=7): """ Mutation Phase. """ self.__offspring = [] offspring_star = norm.rvs(loc=factor*len(self.__parents)) total_fitness = 0.0 for i in range(len(self.__parents)): total_fitness += self.__parents[i].score for i in range(len(self.__parents)): p = self.__parents[i] offspring_factor = 7 if total_fitness != 0.0: if self.__problem == "max": offspring_factor = offspring_star * (p.score / total_fitness) else: offspring_factor = offspring_star * (1-(p.score / total_fitness)) for j in range(int(offspring_factor)): self.__id_count += 1 o = p.mutate() o.id = self.__id_count o.update_score() self.__offspring.append(o)
n_endog = endog.shape[0] n_part = np.ceil(n_endog / partitions) ii = 0 while ii < n_endog: jj = int(min(ii + n_part, n_endog)) yield endog[ii:jj] ii += int(n_part) # Next we generate some random data to serve as an example. X = np.random.normal(size=(1000, 25)) beta = np.random.normal(size=25) beta *= np.random.randint(0, 2, size=25) y = norm.rvs(loc=X.dot(beta)) m = 5 # This is the most basic fit, showing all of the defaults, which are to # use OLS as the model class, and the debiasing procedure. debiased_OLS_mod = DistributedModel(m) debiased_OLS_fit = debiased_OLS_mod.fit(zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2}) # Then we run through a slightly more complicated example which uses the # GLM model class. from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Gaussian
def test_0(size_N, mu_0, sgm_0): x = spnorm.rvs(size=size_N, loc=mu_0, scale=sgm_0) inv_x_0 = spnorm.ppf(x, loc=mu_0, scale=sgm_0) opencl_kernel = " "" "" "
def segment(image, n_segments=2, burn_in=1000, samples=1000, lag=5): """ Return image segment samples. Parameters ---------- image : (N,M) ndarray Pixel array with single-dimension values (e.g. hue) Returns ------- labels : (samples,N,M) ndarray The image segment label array emission_params: (samples,K,2) ndarray The Gaussian emission distribution parameters (mean, precision) log_probs : (samples,) ndarray """ # allocate arrays res_labels = zeros((samples, image.shape[0], image.shape[1]), dtype=int) res_emission_params = zeros((samples, n_segments, 6)) res_log_prob = zeros((samples,)) padded_labels = ones((image.shape[0] + 2, image.shape[1] + 2), dtype=int)*-1 labels = padded_labels[1:-1, 1:-1] emission_params = zeros((n_segments, 6)) log_prob = None conditional = zeros((n_segments,)) # init emission_params sample_mean_r = image[:,:,0].mean() sample_mean_g = image[:,:,1].mean() sample_mean_b = image[:,:,2].mean() sample_var_r = image[:,:,0].var() sample_var_g = image[:,:,1].var() sample_var_b = image[:,:,2].var() sample_prec_r = 1./sample_var_r sample_prec_g = 1./sample_var_g sample_prec_b = 1./sample_var_b for k in xrange(n_segments): """ emission_params[k,0] = norm.rvs(sample_mean_r, sqrt(sample_var_r/n_segments)) emission_params[k,1] = sample_prec_r emission_params[k,2] = norm.rvs(sample_mean_g, sqrt(sample_var_g/n_segments)) emission_params[k,3] = sample_prec_g emission_params[k,4] = norm.rvs(sample_mean_b, sqrt(sample_var_b/n_segments)) emission_params[k,5] = sample_prec_b """ emission_params[k,0] = norm.rvs(0.5, 0.1) emission_params[k,1] = 1/(0.25**2) emission_params[k,2] = norm.rvs(0.5, 0.1) emission_params[k,3] = 1/(0.25**2) emission_params[k,4] = norm.rvs(0.5, 0.1) emission_params[k,5] = 1/(0.25**2) # init labels for n in xrange(image.shape[0]): for m in xrange(image.shape[1]): labels[n,m] = randint(0, n_segments) try: # gibbs for i in xrange(burn_in + samples*lag - (lag - 1)): for n in xrange(image.shape[0]): for m in xrange(image.shape[1]): # resample label for k in xrange(n_segments): labels[n,m] = k conditional[k] = 0. conditional[k] += phi_blanket( memoryview(padded_labels), n, m, memoryview(FS)) """ for x in xrange(max(n-2,0), min(n+3,image.shape[0])): for y in xrange(max(m-2,0), min(m+3, image.shape[1])): clique = padded_labels[x:x+3,y:y+3] conditional[k] += phi(clique) """ mean_r = emission_params[k, 0] var_r = 1./emission_params[k, 1] mean_g = emission_params[k, 2] var_g = 1./emission_params[k, 3] mean_b = emission_params[k, 4] var_b = 1./emission_params[k, 5] conditional[k] += log(norm.pdf(image[n,m,0], mean_r, sqrt(var_r))) conditional[k] += log(norm.pdf(image[n,m,1], mean_g, sqrt(var_g))) conditional[k] += log(norm.pdf(image[n,m,2], mean_b, sqrt(var_b))) labels[n,m] = sample_categorical(conditional) for k in xrange(n_segments): mask = (labels == k) # resample label mean red mean_r = emission_params[k, 0] prec_r = emission_params[k, 1] numer_r = TAU_0*MU_0 + prec_r*sum(image[mask][:, 0]) denom_r = TAU_0 + prec_r*sum(mask) post_mean_r = numer_r/denom_r post_var_r = 1./(denom_r) emission_params[k, 0] = norm.rvs(post_mean_r, sqrt(post_var_r)) # resample label var red post_alpha_r = ALPHA_0 + sum(mask)/2. post_beta_r = BETA_0 + sum((image[mask][:, 0] - emission_params[k,0])**2)/2. post_r = gamma(post_alpha_r, scale=1./post_beta_r) emission_params[k, 1] = post_r.rvs() # resample label mean green mean_g = emission_params[k, 2] prec_g = emission_params[k, 3] numer_g = TAU_0*MU_0 + prec_g*sum(image[mask][:, 1]) denom_g = TAU_0 + prec_g*sum(mask) post_mean_g = numer_g/denom_g post_var_g = 1./(denom_g) emission_params[k, 2] = norm.rvs(post_mean_g, sqrt(post_var_g)) # resample label var green post_alpha_g = ALPHA_0 + sum(mask)/2. post_beta_g = BETA_0 + sum((image[mask][:, 1] - emission_params[k,2])**2)/2. post_g = gamma(post_alpha_g, scale=1./post_beta_g) emission_params[k, 3] = post_g.rvs() # resample label mean blue mean_b = emission_params[k, 4] prec_b = emission_params[k, 5] numer_b = TAU_0*MU_0 + prec_b*sum(image[mask][:, 2]) denom_b = TAU_0 + prec_b*sum(mask) post_mean_b = numer_b/denom_b post_var_b = 1./(denom_b) emission_params[k, 4] = norm.rvs(post_mean_b, sqrt(post_var_b)) # resample label var blue post_alpha_b = ALPHA_0 + sum(mask)/2. post_beta_b = BETA_0 + sum((image[mask][:, 2] - emission_params[k,4])**2)/2. post_b = gamma(post_alpha_b, scale=1./post_beta_b) emission_params[k, 5] = post_b.rvs() log_prob = 0. for n in xrange(image.shape[0]): for m in xrange(image.shape[1]): #clique = padded_labels[n:n+3,m:m+3] label = labels[n,m] mean_r = emission_params[label, 0] var_r = 1./emission_params[label, 1] mean_g = emission_params[label, 2] var_g = 1./emission_params[label, 3] mean_b = emission_params[label, 4] var_b = 1./emission_params[label, 5] #log_prob += phi(clique) log_prob += log(norm.pdf(image[n,m,0], mean_r, sqrt(var_r))) log_prob += log(norm.pdf(image[n,m,1], mean_g, sqrt(var_g))) log_prob += log(norm.pdf(image[n,m,2], mean_b, sqrt(var_b))) # prior on theta? log_prob += phi_all(memoryview(padded_labels), memoryview(FS)) sys.stdout.write('\riter {} log_prob {}'.format(i, log_prob)) sys.stdout.flush() if i < burn_in: pass elif not (i - burn_in)%lag: res_i = i/lag res_emission_params[res_i] = emission_params[:] res_labels[res_i] = labels res_log_prob[i] = log_prob sys.stdout.write('\n') return res_labels, res_emission_params, res_log_prob except KeyboardInterrupt: return res_labels, res_emission_params, res_log_prob
_mask = _im>_thresh # TODO: mask image data # # compute ratio of non-zero coefficients # _ratio = np.sum(_mask) / _mask.size return _mask * _im, _ratio if __name__ == "__main__": # define test image # im = sitk.ReadImage(os.path.abspath('image_head.dcm')) im = sitk.GetArrayViewFromImage(im).squeeze().astype(np.float) # add noise to input image # im += norm.rvs(scale=2, size=im.shape) # TODO: add noise to image # print(pywt.wavelist()) # define LOW and HIGH pass filter kernels (for decomposition and reconstruction) # wl_name = 'haar' # TODO: vary type of wavelet # lp_d, hp_d, lp_r, hp_r = map(np.array, pywt.Wavelet(wl_name).filter_bank) # number of decomposition levels # num_levels = 2 # TODO: vary number of decomposition levels # # padding to avoid border effects # padding = num_levels * 2 * len(lp_d) # resize the image (convenience for testing the code) # sz = (1024, 1024) im = resize(im, output_shape=tuple(map(lambda x: x - 2 * padding, sz)))
n_endog = endog.shape[0] n_part = np.ceil(n_endog / partitions) ii = 0 while ii < n_endog: jj = int(min(ii + n_part, n_endog)) yield endog[ii:jj] ii += int(n_part) # Next we generate some random data to serve as an example. X = np.random.normal(size=(1000, 25)) beta = np.random.normal(size=25) beta *= np.random.randint(0, 2, size=25) y = norm.rvs(loc=X.dot(beta)) m = 5 # This is the most basic fit, showing all of the defaults, which are to # use OLS as the model class, and the debiasing procedure. debiased_OLS_mod = DistributedModel(m) debiased_OLS_fit = debiased_OLS_mod.fit( zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2}) # Then we run through a slightly more complicated example which uses the # GLM model class. from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Gaussian