Example #1
0
 def test_property(self):
     M = self.M
     for universe in[self.cube_universe, self.infinite_universe]:
         for p in [M.AtomProperty(universe, "mass", "amu",
                      IN.array(NR.normal(size=(universe.number_of_atoms,)))),
                   M.SiteProperty(universe, "foo", "",
                      IN.array(NR.normal(size=(universe.number_of_sites, 3)))),
                   M.TemplateAtomProperty(universe, "mass", "amu",
                      IN.array(NR.normal(size=(universe.number_of_template_atoms,
                                               1)))),
                   M.TemplateSiteProperty(universe, "foo", "bar",
                      IN.array(NR.normal(size=(universe.number_of_template_sites,
                                               2, 2)))),
                   ]:
             self.assertTrue(is_valid(p))
             with TestHDF5Store('w') as store:
                 self.assertRaises(IOError,
                                   lambda: store.store("property", p))
                 store.store("universe", universe)
                 store.store("property", p)
                 self.assertTrue(store.retrieve("property") is p)
             with TestHDF5Store('r') as store:
                 p_r = store.retrieve("property")
             self.assertFalse(p_r is p)
             self.assertTrue(p_r.is_equivalent(p))
             self.assertTrue(is_valid(p_r))
def friedman(n_samples=100, n_features=10, noise_std=1):
    """Function creating simulated data with non linearities

    cf. Friedman 1993

    X = np.random.normal(0, 1)

    y = 10 * sin(X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
            + 10 * X[:, 3] + 5 * X[:, 4]

    The number of features is at least 5.

    Parameters
    ----------
    n_samples : int
        number of samples (default is 100).

    n_features : int
        number of features (default is 10).

    noise_std : float
        std of the noise, which is added as noise_std*NR.normal(0,1)

    Returns
    -------
    X : numpy array of shape (n_samples, n_features) for input samples
    y : numpy array of shape (n_samples,) for labels
    """
    X = nr.normal(loc=0, scale=1, size=(n_samples, n_features))
    y = 10 * np.sin(X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
            + 10 * X[:, 3] + 5 * X[:, 4]
    y += noise_std * nr.normal(loc=0, scale=1, size=n_samples)
    return X, y
Example #3
0
def generate_misclassifications(top_words):
    log("Generating artificial misclassification rate ..")
    from numpy.random import normal
    w = len(top_words)
    mis = np.zeros((w, w))
    for i in xrange(w):
        for j in xrange(i+1):
            distance = edit_distance(top_words[i], top_words[j])
            mis[i][j] = max(0.0, normal(0.4 ** distance, 0.05))
            mis[j][i] = max(0.0, normal(0.4 ** distance, 0.05))
    normalize_matrix(mis)
    mostly_wrong = list(sorted([(mis[i][i], i) for i in xrange(w)]))
    log("Top 10 words likely to be wrong:")
    for prob, idx in mostly_wrong[:10]:
        log("    %s (%.3lf%%) => %s", top_words[idx], prob*100.0,
                " ".join(["%s (%.3lf%%)" % (top_words[cand],
                    mis[idx][cand]*100.0)
                    for cand in reversed(np.argsort(mis[idx])[-4:])]))
    log("Top 10 words likely to be right:")
    for prob, idx in mostly_wrong[-10:]:
        log("    %s (%.3lf%%) => %s", top_words[idx], prob*100.0,
                " ".join(["%s (%.3lf%%)" % (top_words[cand],
                    mis[idx][cand]*100.0)
                    for cand in reversed(np.argsort(mis[idx])[-4:])]))
    return mis
Example #4
0
    def test_configuration(self):
        M = self.M

        conf = M.Configuration(self.cube_universe,
                               IN.array(NR.normal(size=(40,3))),
                               IN.array(10., N.float64))
        self.assertTrue(is_valid(conf))
        with TestHDF5Store('w') as store:
            self.assertRaises(IOError,
                              lambda : store.store("configuration", conf))
            store.store("universe", self.cube_universe)
            store.store("configuration", conf)
            self.assertTrue(store.retrieve("configuration") is conf)
        with TestHDF5Store('r') as store:
            conf_r = store.retrieve("configuration")
        self.assertFalse(conf_r is conf)
        self.assertTrue(conf_r.is_equivalent(conf))
        self.assertTrue(is_valid(conf_r))

        conf = M.Configuration(self.infinite_universe,
                               IN.array(NR.normal(size=(40,3))),
                               None)
        self.assertTrue(is_valid(conf))
        with TestHDF5Store('w') as store:
            self.assertRaises(IOError,
                              lambda : store.store("configuration", conf))
            store.store("universe", self.infinite_universe)
            store.store("configuration", conf)
            self.assertTrue(store.retrieve("configuration") is conf)
        with TestHDF5Store('r') as store:
            conf_r = store.retrieve("configuration")
        self.assertFalse(conf_r is conf)
        self.assertTrue(conf_r.is_equivalent(conf))
        self.assertTrue(is_valid(conf_r))
def sparse_uncorrelated(n_samples=100, n_features=10):
    """Function creating simulated data with sparse uncorrelated design

    cf.Celeux et al. 2009,  Bayesian regularization in regression)

    X = NR.normal(0, 1)
    Y = NR.normal(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3])
    The number of features is at least 10.

    Parameters
    ----------
    n_samples : int
        number of samples (default is 100).
    n_features : int
        number of features (default is 10).

    Returns
    -------
    X : numpy array of shape (n_samples, n_features) for input samples
    y : numpy array of shape (n_samples) for labels
    """
    X = nr.normal(loc=0, scale=1, size=(n_samples, n_features))
    y = nr.normal(loc=X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3],
                  scale=np.ones(n_samples))
    return X, y
def posterior(steps = 10):

	zs = interval(steps)

	f_z_list = []

	f_z_list.append(normal(0, 1))


	for cnt in xrange(1,steps):

		z_cnt = zs[cnt]

		k_x = np.asarray([[kernel(z_cnt, x) for x in zs[:cnt]]])
		K = np.zeros((cnt, cnt)) 
		for i in xrange(cnt):
			for j in xrange(cnt):
				K[i][j] = kernel(zs[i],zs[j])


		f = np.transpose(np.asarray([f_z_list]))

		first_op = np.dot(k_x, np.linalg.inv(K))
		mu = np.dot(first_op, f)
		
		second_op = np.dot(k_x, np.linalg.inv(K))

		var = 1 - np.dot(second_op, np.transpose(k_x))

		f_z_list.append(normal(mu, var ** .5))

	return zs, f_z_list 
Example #7
0
    def updateParticles(self):
        # Update positions with velocity
        self.particles[:, 0:2] += self.particles[:, 4:6]
        #np.clip(self.particles[:,0], 0, self.bounds[0], self.particles[:,0])
        #np.clip(self.particles[:,1], 0, self.bounds[1], self.particles[:,1])

        # Add noise to w,h
        if self.SIGMA_size > 0.0001:
            self.particles[:, 2:4] += random.normal(0, self.SIGMA_size, (self.particles.shape[0], 2))
        #np.clip(self.particles[:,2], 1, self.bounds[0], self.particles[:,2])
        #np.clip(self.particles[:,3], 1, self.bounds[1], self.particles[:,3])
        # Add noise to velocities and clip
        self.particles[:, 4:6] += random.normal(
            0, self.SIGMA_velocity, (self.particles.shape[0], 2))
        #np.clip(self.particles[:,4:6], -MAX_velocity,MAX_velocity, self.particles[:,4:6])

        lb = [0, 0, 1, 1, -MAX_velocity, -MAX_velocity, 0]
        ub = [self.bounds[1],
              self.bounds[0],
              self.bounds[1],
              self.bounds[0],
              MAX_velocity,
              MAX_velocity,
              1]
        np.clip(self.particles, lb, ub, self.particles)
        if np.max(self.particles[:, 0]) > self.bounds[1]:
            print "Not clipped"
        self.iterations += 1
def generate_swirl(N, L, zmin, zmax, cycle_rate, bias_x, bias_y, roll_num=0,
                   noise=0, fromcentre=True):
    """Generate N points on a 3D 'swirl' (a cyclic trajectory on a
    1 x L "swiss roll" manifold).

    cycle_rate is measured in radians.
    roll_num is the number of full rolls in the swiss roll.

    The origin is at the centre of the set unless fromcentre=False.
    """
    X = zeros((N, 3), float)
    r = 0.5
    assert L > 0
    assert zmax >= 1
    assert zmin > 0
    assert zmax > zmin
    zscale = (zmax - zmin)/L
    def roll(x):
        r = zmax - x*zscale
        rho = x/L*(roll_num*2*pi)
        return (r*cos(rho), r*sin(rho))
    theta = 0
    for i in xrange(N):
        theta += cycle_rate
        x = r*cos(theta)+bias_x*i
        x_rolled, z_rolled = roll(x)
        X[i,:] = array([x_rolled + random.normal(0,noise),
                        r*sin(theta) + bias_y*i + random.normal(0,noise),
                        z_rolled + random.normal(0,noise)])
    return X
Example #9
0
def makeNoise(x, y, name=""):
	#stuff for red/green mask
	npts = x * y
		
	random.seed()
	rednoise=normal(0,255,npts)  #mean, std dev, num pts
	red = rednoise.reshape(x, y)

	greennoise=normal(0,255,npts)
	green = greennoise.reshape(x, y)

	blue = zeros(npts)
	blue = blue.reshape(x, y)

	#print rednoise
	#print greennoise

	pixels = array([red, green, blue])

	#print pixels
	pixels = pixels.T

	arr = pixels.__array_interface__
	shape = arr['shape']
	ndim = len(shape)	
	print ndim
	

	img = Image.fromarray(pixels, mode="RGB")
	if name:
		img.save("%s.BMP" % name)
	else:
		img.save("mask.BMP")
Example #10
0
    def setup_method(self, method):

        import matplotlib as mpl
        from pandas.plotting._matplotlib import compat
        mpl.rcdefaults()

        self.mpl_ge_2_2_3 = compat._mpl_ge_2_2_3()
        self.mpl_ge_3_0_0 = compat._mpl_ge_3_0_0()
        self.mpl_ge_3_1_0 = compat._mpl_ge_3_1_0()

        self.bp_n_objects = 7
        self.polycollection_factor = 2
        self.default_figsize = (6.4, 4.8)
        self.default_tick_position = 'left'

        n = 100
        with tm.RNGContext(42):
            gender = np.random.choice(['Male', 'Female'], size=n)
            classroom = np.random.choice(['A', 'B', 'C'], size=n)

            self.hist_df = DataFrame({'gender': gender,
                                      'classroom': classroom,
                                      'height': random.normal(66, 4, size=n),
                                      'weight': random.normal(161, 32, size=n),
                                      'category': random.randint(4, size=n)})

        self.tdf = tm.makeTimeDataFrame()
        self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
                                    "B": np.random.uniform(size=20),
                                    "C": np.arange(20) + np.random.uniform(
                                        size=20)})
 def setUp(self):
     # ensure reproducibility by setting random seed for random.method
     np.random.seed(1234)
     nsize = 100
     self._x = normal(0, 1, nsize)
     self._y = normal(0.5, 1, nsize)
     self._z = normal(0.1, 2, nsize)
Example #12
0
 def _em_one_pass(self, centered=None, numcmpt=1, thresh=1e-16, out=None):
     """
     With numcmpt = 1, computes the first principal component
     of the data. Otherwise computes an unnormalized, non-orthogonal
     spanning set for the first numcmpt principal components. Assumes
     rows are variables, columns are data points.
     """
     csize = (self.ndim, numcmpt)
     if out != None:
         assert out.shape == csize
         comp = out
         comp[:] = random.normal(size=csize)
     else:
         comp = random.normal(size=csize)
     
     # Initialize 'old' array to infinity
     comp_old = np.empty(csize) + np.inf
     
     if centered == None:
         # Center the data with respect to the dataset mean
         centered = self._data - self._mean
         
     # Compensate for the shape of the data
     if not self._rowvar:
         centered = centered.T
     
     while linalg.norm(comp_old - comp, np.inf) > thresh:
         pinvc_times_data = np.dot(linalg.pinv(comp), centered)
         comp_old[:] = comp
         comp[:] = np.dot(centered, linalg.pinv(pinvc_times_data))
     
     # Normalize the eigenvectors we obtained.
     comp /= np.apply_along_axis(linalg.norm, 0, comp)[np.newaxis, :]
Example #13
0
    def _generate_individual(self):
        # selection of pairing, anti-proportional selection using 
        # the intervals between [0, 1]
        parents = []
        while(len(parents) < 2): 
            x = random.random()
            for individual, start, end in self._current_population:
                if(start <= x < end):
                    parents.append(individual) 

        child = 0.5 * (parents[0] + parents[1])

        # mutation of sigma
        self._global_sigma_mutation = exp(self._tau0 * normal(0, 1))
        child[SIGMA] = self._mat_mutate_sig(child[SIGMA])
        child[SIGMA] = self._global_sigma_mutation * child[SIGMA]

        if(self._infeasibles % self._pi == 0):
            self._delta *= self._theta
 
        # minimum step size
        child[SIGMA] = self._mat_reducer(child[SIGMA])

        # mutation of position with new step size
        X = normal(0, child[SIGMA], size=(1, self._d))
        child[POS] = child[POS] + (self._new_basis * matrix(X).T).T

        return child
Example #14
0
 def test_grouped_hist_layout(self):
     import matplotlib.pyplot as plt
     n = 100
     df = DataFrame({'gender': np.array(['Male',
                                         'Female'])[random.randint(2,
                                                                   size=n)],
                     'height': random.normal(66, 4, size=n),
                     'weight': random.normal(161, 32, size=n),
                     'category': random.randint(4, size=n)})
     self.assertRaises(ValueError, df.hist, column='weight', by=df.gender,
                       layout=(1, 1))
     self.assertRaises(ValueError, df.hist, column='weight', by=df.gender,
                       layout=(1,))
     self.assertRaises(ValueError, df.hist, column='height', by=df.category,
                       layout=(1, 3))
     self.assertRaises(ValueError, df.hist, column='height', by=df.category,
                       layout=(2, 1))
     self.assertEqual(df.hist(column='height', by=df.gender,
                              layout=(2, 1)).shape, (2,))
     plt.close('all')
     self.assertEqual(df.hist(column='height', by=df.category,
                              layout=(4, 1)).shape, (4,))
     plt.close('all')
     self.assertEqual(df.hist(column='height', by=df.category,
                              layout=(4, 2)).shape, (4, 2))
Example #15
0
    def test_axis_shared(self):
        # GH4089
        import matplotlib.pyplot as plt
        def tick_text(tl):
            return [x.get_text() for x in tl]

        n = 100
        df = DataFrame({'gender': np.array(['Male', 'Female'])[random.randint(2, size=n)],
                        'height': random.normal(66, 4, size=n),
                        'weight': random.normal(161, 32, size=n)})
        ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True)
        self.assert_(ax1._shared_x_axes.joined(ax1, ax2))
        self.assertFalse(ax1._shared_y_axes.joined(ax1, ax2))
        self.assert_(ax2._shared_x_axes.joined(ax1, ax2))
        self.assertFalse(ax2._shared_y_axes.joined(ax1, ax2))
        plt.close('all')

        ax1, ax2 = df.hist(column='height', by=df.gender, sharey=True)
        self.assertFalse(ax1._shared_x_axes.joined(ax1, ax2))
        self.assert_(ax1._shared_y_axes.joined(ax1, ax2))
        self.assertFalse(ax2._shared_x_axes.joined(ax1, ax2))
        self.assert_(ax2._shared_y_axes.joined(ax1, ax2))
        plt.close('all')

        ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True,
                           sharey=True)
        self.assert_(ax1._shared_x_axes.joined(ax1, ax2))
        self.assert_(ax1._shared_y_axes.joined(ax1, ax2))
        self.assert_(ax2._shared_x_axes.joined(ax1, ax2))
        self.assert_(ax2._shared_y_axes.joined(ax1, ax2))
    def _generate_measured_outputs(self):

        # Add measurement noise to the kinematic data.
        if np.allclose(0.0, self.platform_accel_noise_std):
            self.measured['a'] = self.actual['a']
        else:
            self.measured['a'] = self.actual['a'] + normal(
                scale=self.platform_accel_noise_std,
                size=self.actual['a'].shape)

        x = self.actual['x']
        if np.allclose(0.0, self.coordinate_noise_std):
            coord_noise = np.zeros_like(x[:, :2])
        else:
            coord_noise = normal(scale=self.coordinate_noise_std,
                                 size=x[:, :2].shape)
        if np.allclose(0.0, self.speed_noise_std):
            speed_noise = np.zeros_like(x[:, 2:])
        else:
            speed_noise = normal(scale=self.speed_noise_std,
                                 size=x[:, 2:].shape)
        x_noise = np.hstack((coord_noise, speed_noise))
        self.measured['x'] = x + x_noise

        # Add measurement noise to the joint torques.
        u = self.actual['u']
        if np.allclose(0.0, self.torque_noise_std):
            u_meas = u
        else:
            u_meas = u + normal(scale=self.torque_noise_std, size=u.shape)
        self.measured['u'] = u_meas
Example #17
0
    def test_axis_shared(self):
        # GH4089
        import matplotlib.pyplot as plt

        def tick_text(tl):
            return [x.get_text() for x in tl]

        n = 100
        df = DataFrame(
            {
                "gender": np.array(["Male", "Female"])[random.randint(2, size=n)],
                "height": random.normal(66, 4, size=n),
                "weight": random.normal(161, 32, size=n),
            }
        )
        ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True)
        self.assert_(ax1._shared_x_axes.joined(ax1, ax2))
        self.assertFalse(ax1._shared_y_axes.joined(ax1, ax2))
        self.assert_(ax2._shared_x_axes.joined(ax1, ax2))
        self.assertFalse(ax2._shared_y_axes.joined(ax1, ax2))
        plt.close("all")

        ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True)
        self.assertFalse(ax1._shared_x_axes.joined(ax1, ax2))
        self.assert_(ax1._shared_y_axes.joined(ax1, ax2))
        self.assertFalse(ax2._shared_x_axes.joined(ax1, ax2))
        self.assert_(ax2._shared_y_axes.joined(ax1, ax2))
        plt.close("all")

        ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True)
        self.assert_(ax1._shared_x_axes.joined(ax1, ax2))
        self.assert_(ax1._shared_y_axes.joined(ax1, ax2))
        self.assert_(ax2._shared_x_axes.joined(ax1, ax2))
        self.assert_(ax2._shared_y_axes.joined(ax1, ax2))
Example #18
0
def point_source_r(origin=(0.,0.,0.),direction=(0.,0.,0),span=pi/8,num_rays=100,wavelength=0.58929, label=""):
    """Point source, with a ranrom beam distribution
    
    This function creates a point source, where the rays are organized in a 
    random grid. 
    
    Parameters:
   
    
    *origin*
        Tuple with the coordinates of the central ray origin 
    *direction*
        Tuple with the rotation of the beam around the XYZ axes.
    *span*
        Tuple angular size of the ray pencil.
    *num_rays*
        Number of rays used to create the beam
    *label*
        String used to identify the ray source
    """

    ret_val=[]
    
    for n_ in range(num_rays):
        rx=normal(0,span)
        ry=normal(0,span)
        temp_ray=Ray(pos=(0,0,0),dir=(0,0,1),wavelength=wavelength, label=label).ch_coord_sys_inv((0,0,0),(rx,ry,0))
        ret_val.append(temp_ray.ch_coord_sys_inv(origin,direction))
    return ret_val
Example #19
0
    def __init__(self,
                 model,
                 tst_X_mean_shift=3,
                 tr_X_mean=0,
                 tr_X_sd=2,
                 tst_X_sd=1,
                 n_samples=200,
                 noise_sd=1,
                 tst_ratio=0.2):
        self.model = model

        n_tst = int(round(tst_ratio * n_samples))
        n_tr = n_samples - n_tst
        tr_X = rnd.normal(tr_X_mean, tr_X_sd, (n_tr, 1))
        tst_X_mean = tr_X_mean + tst_X_mean_shift
        tst_X = rnd.normal(tst_X_mean, tst_X_sd, (n_tst, 1))

        def model_noisy(x):
            return model(x) + rnd.normal(0, noise_sd, (x.shape[0], 1))

        self.tr = DataSet.from_X(tr_X, model_noisy)
        self.tst = DataSet.from_X(tst_X, model_noisy)
        self.X = np.vstack((tr_X, tst_X))
        self.X_range = [np.min(self.X), np.max(self.X)]

        self.y_ = self.model(self.X)
        self.y = np.vstack((self.tr.y, self.tst.y))

        colors = color.get_N_by_hue(3)
        self.color, self.tr.color, self.tst.color = colors
Example #20
0
    def test_nooutput(self):
        logging.debug('')
        logging.debug('test_nooutput')

        # Create cases with missing output 'dc.sum_z'.
        cases = []
        for i in range(2):
            inputs = [('driven.x', numpy_random.normal(size=4)),
                      ('driven.y', numpy_random.normal(size=10))]
            outputs = [('driven.rosen_suzuki', None),
                       ('driven.sum_z', None)]
            cases.append(Case(inputs, outputs))

        self.model.driver.iterator = ListCaseIterator(cases)
        results = ListCaseRecorder()
        self.model.driver.recorders = [results]
        self.model.driver.error_policy = 'RETRY'

        self.model.run()

        self.assertEqual(len(results), len(cases))
        msg = "driver: Exception getting case outputs: " \
            "driven: 'DrivenComponent' object has no attribute 'sum_z'"
        for case in results.cases:
            self.assertEqual(case.msg, msg)
def GenerateTwoScreens(nfft, r0):
    """

      Generate phase screens with a Kolmogorov spectrum of atmospheric
      disturbances [c.f. Tatarski 1961,1971], such that the phase structure
      function is given by

           D(r) = <[phi(r')-phi(r'+r)]**2>
                = 6.88*(r/r0)**5/3

       where r0 is the Fried parameter.

       This version returns two screens, because it's easier to do it that
       way.
    """
    C = sqrt(0.0229*(float(nfft)/r0)**(5.0/3.0))
    # Generate a 2-d array populated with rsquared=xsquared+ysquared
    r = arange(nfft)
    r[nfft/2:] = nfft-r[nfft/2:]
    rsq = r**2
    rsq = add.outer(rsq,rsq)
    rsq[0, 0] = 1.0 # To solve pole at origin problem
    sample = random.normal(size=(nfft, nfft))+1j*random.normal(size=(nfft, nfft))
    sample *= C*rsq**(-11.0/12.0)
    result = fft.fft2(sample)
    return(result.real, result.imag)
Example #22
0
    def test_ave_snr_noise(self):
        with self.context:
            #Test that the average snr in noise is 2
            from numpy.random import normal

            noise = normal(0.0,2,4096*64)
            nplus= TimeSeries(noise,dtype=float32,delta_t=1.0/4096)
            ntilde = make_frequency_series(nplus) / nplus.delta_t
            # Calculate a Faux psd for normalization, replace with better algorithm
            psd = (ntilde).squared_norm()  / float(len(nplus)) * nplus.delta_t *2.0

            snr = matched_filter(self.filt, nplus, psd=psd)

            ave = snr.squared_norm().sum() /len(snr)
            self.assertAlmostEqual(2,ave,places=5)

            noise = normal(0.0,2,4096*64)
            nplus= TimeSeries(noise,dtype=float64,delta_t=1.0/4096)
            ntilde = make_frequency_series(nplus) / nplus.delta_t
            # Calculate a Faux psd for normalization, replace with better algorithm
            psd = (ntilde).squared_norm()  / float(len(nplus)) * nplus.delta_t *2.0

            snr = matched_filter(self.filtD,nplus,psd=psd)
            ave = snr.squared_norm().sum() /len(snr)
            self.assertAlmostEqual(2,ave,places=5)
        def sample_topics(self):
	    # user topics
            for i in xrange(self.N):    
                for k in xrange(self.K_U):
                    z_sum = 0 
                    mean_sum = 0
                    for j in self.I_U[i]:
                        z_sum += self.z_U[i,j] 
                        resid = X[(i,j)] - self.chi_0 - self.d[j,k] - np.dot(self.U[i,:], self.V[:,j])
                        mean_sum += self.z_U[i,j]*resid
                    std = 1./(1./self.sigmaSqd_0 + z_sum/self.Sqd)
                    mean = (self.c_0/self.sigmaSqd_0 + mean_sum/self.Sqd)*std
                    self.c[i,k] = normal(mean, std)
	    # item topics
            for j in xrange(self.M):
                for k in xrange(self.K_V):
                    z_sum = 0
                    mean_sum = 0 
                    for i in self.I_V[j]:
                        z_sum += self.z_V[i,j]
                        resid = X[(i,j)] - self.chi_0 - self.c[i,k] - np.dot(self.U[i,:], self.V[:,j])
                        mean_sum += self.z_V[i,j]*resid
                    std = 1./(1./self.sigmaSqd_0 + z_sum/self.Sqd)
                    mean = (self.c_0/self.sigmaSqd_0 + mean_sum/self.Sqd)*std
                    self.d[j,k] = normal(mean, std)
 def test_heaviside(self):
     """Test yaplf heaviside()."""
     from numpy import random
     f = HeavisideActivationFunction()
     self.assertEqual(f.compute(random.normal()**2), 1)
     self.assertEqual(f.compute(-1*random.normal()**2), 0)
     self.assertEqual(f.compute(0), 1)
def generate_veq(R=1.3, dR=0.1, Prot=6, dProt=0.1,nsamples=1e4,plot=False,
                 R_samples=None,Prot_samples=None):
    """ Returns the mean and std equatorial velocity given R,dR,Prot,dProt

    Assumes all distributions are normal.  This will be used mainly for
    testing purposes; I can use MC-generated v_eq distributions when we go for real.
    """
    if R_samples is None:
        R_samples = R*(1 + rand.normal(size=nsamples)*dR)
    else:
        inds = rand.randint(len(R_samples),size=nsamples)
        R_samples = R_samples[inds]

    if Prot_samples is None:
        Prot_samples = Prot*(1 + rand.normal(size=nsamples)*dProt)
    else:
        inds = rand.randint(len(Prot_samples),size=nsamples)
        Prot_samples = Prot_samples[inds]

    veq_samples = 2*np.pi*R_samples*RSUN/(Prot_samples*DAY)/1e5
    
    if plot:
        plt.hist(veq_samples,histtype='step',color='k',bins=50,normed=True)
        d = stats.norm(scale=veq_samples.std(),loc=veq_samples.mean())
        vs = np.linspace(veq_samples.min(),veq_samples.max(),1e4)
        plt.plot(vs,d.pdf(vs),'r')
    
    return veq_samples.mean(),veq_samples.std()
 def draw_connection(self, p, w, noise=0):
     """
     Decide whether a connection is drawn, given the possibility p.
     w : is the mean weight for the type of connection to be drawn.
     noise : is an optional argument and stands for the relative sigma of the normal distributino
     i.e. noise = 0.1 means sigma = 0.1 * w
     """
     if (p > rnd.random()):
         if (noise != 0 and noise > 0):
             weight = rnd.normal(w, noise)
             # check if sign of weight changed
             # if so, return 0
             if (np.sign(weight) != np.sign(w)):
                 return 0
             return weight
         elif (noise < 0): # stupid user, noise should be > 0
             print "WARNING, negative noise given to draw_connection(p, w, noise)!"
             noise *= (-1.0)
             weight = rnd.normal(w, noise)
             if (np.sign(weight) != np.sign(w)):
                 return 0
             return weight
         elif (noise == 0):
             return w
     return 0
Example #27
0
    def __init__(self, unparsed):
        super().__init__()

        args = self.do_cmdline(unparsed)

        # plotter
        betaplot = BetaPlot(args)

        # get the complex-valued Zernike polynomials object
        czern = betaplot.psfplot.cpsf.czern

        # beta (diffraction-limited), N_beta = czern.nk
        beta = np.zeros(czern.nk, np.complex)
        beta[0] = 1.0

        # set the beta coefficients randomly
        if args.random:
            beta = normal(size=beta.size) + 1j*normal(size=beta.size)
            beta = (args.rms/norm(beta))*beta  # sort of
            beta[0] = 1

        self.rms = args.rms
        self.beta = beta
        self.betaplot = betaplot

        # make gui
        self.make_gui()
Example #28
0
 def r(self, sid):
     if self.exp == 0:
         self.hist = [np.zeros(0)]
         return
     l = self.mlat + sid * self.prec
     r = np.unique(npr.normal(l, self.sd, self.exp).astype(np.int64))
     self.hist = [np.unique(npr.normal(l, self.sd, self.exp).astype(np.int64))]
Example #29
0
def sgd(alpha=0.1, eta0=0.01, power_t=0.25, epochs=3, latent_dimensions=10):
    """ stochastic gradient descent """

    n_users = df['user_id'].nunique()
    n_items = df['hotel_cluster'].nunique()

    U = DataFrame(normal(size=(latent_dimensions, n_users)),
                  columns=df['user_id'].unique())

    V = DataFrame(normal(size=(latent_dimensions, n_items)),
                  columns=df['hotel_cluster'].unique())

    t = 1.0
    index = df.index.values
    random.shuffle(index)

    for epoch in xrange(epochs):

        for count, pos in enumerate(index):

            i, j, rij = df.ix[pos] 
            eta =  eta0 / (t ** power_t)

            rhat = dot(U[i], V[j])

            U[i] = U[i] - eta * ((rhat - rij) * V[j] + alpha * U[i])
            V[j] = V[j] - eta * ((rhat - rij) * U[i] + alpha * V[j])

            if isnan(U.values).any() or isnan(V.values).any():
                raise ValueError('overflow')
            
            t += 1

        return U, V
Example #30
0
def findOptimalRegulizers(trainingSet, trainingLabels, testSet, testLabels, conv, maxIter):
  logL1 = 0
  logL2 = 0
  currentLoss = float("inf")
  numRejects = 0
  while numRejects < 10:
    changeL1 = (R.normal() > 0)
    newLogL1 = logL1
    newLogL2 = logL2
    if (changeL1): newLogL1 = logL1 + R.normal()
    else: newLogL2 = logL2 + R.normal()
    
    L1 = math.exp(newLogL1)
    L2 = math.exp(newLogL2)
    
    params = batchCompute(trainingSet, trainingLabels, L1, L2, conv, maxIter, False)
    avgLoss = computeLossForDataset(testSet, testLabels, params)
    
    accept = avgLoss < currentLoss
    logging.debug("New " + ("L1" if changeL1 else "L2") + ": L1 = " + str(L1) + ", L2 = " + str(L2) + ", loss: " + str(avgLoss) + ", " + ("ACCEPT" if accept else "REJECT"))
    if (accept):
      currentLoss = avgLoss
      logL1 = newLogL1
      logL2 = newLogL2
      numRejects = 0
    else:
      numRejects += 1
  return (math.exp(logL1), math.exp(logL2))
Example #31
0
import src.good_radius as gr
import src.good_center as gc
from numpy.random import randint, normal
import numpy as np
import matplotlib.pyplot as plt
import time
from numpy import round
from scipy.spatial.distance import euclidean

sample_number, k, r = 2**12, 2, 4
center = 100
data_2d = round(normal(center, 50, (sample_number, 2)))
domain_end = max(abs(np.min(data_2d)), np.max(data_2d))

start_time = time.time()

domain, desired_amount_of_points = (domain_end, 1), 2000
approximation, failure, eps, delta, promise = 0.1, 0.1, 0.5, 2**-20, 100
radius = gr.find(data_2d, domain, desired_amount_of_points, failure, eps)
print "the radius: %d" % radius
middle_time = time.time()
print "good-radius run-time: %.2f seconds" % (middle_time - start_time)
center = gc.find(data_2d, sample_number, 2, radius, desired_amount_of_points,
                 failure, approximation, eps, delta)
print "the center: %s" % str(center)
print "good-center run-time: %.2f seconds" % (time.time() - middle_time)
ball = [p for p in data_2d if euclidean(p, center) <= radius]
print "number of points in the resulting ball: %d" % len(ball)

zipped_data = zip(*data_2d)
plt.scatter(*zipped_data, c='b')
Example #32
0
from numpy.random import normal
from numpy import arange


def violin_plot(ax, data, pos, bp=False):
    '''
    create violin plots on an axis
    '''
    dist = max(pos) - min(pos)
    w = min(0.15 * max(dist, 1.0), 0.5)
    for d, p in zip(data, pos):
        k = gaussian_kde(d)  #calculates the kernel density
        m = k.dataset.min()  #lower bound of violin
        M = k.dataset.max()  #upper bound of violin
        x = arange(m, M, (M - m) / 100.)  # support for violin
        v = k.evaluate(x)  #violin profile (density curve)
        v = v / v.max() * w  #scaling the violin to the available space
        ax.fill_betweenx(x, p, v + p, facecolor='y', alpha=0.3)
        ax.fill_betweenx(x, p, -v + p, facecolor='y', alpha=0.3)
    if bp:
        ax.boxplot(data, notch=1, positions=pos, vert=1)


#tests
if __name__ == "__main__":
    pos = range(5)
    data = [normal(size=100) for i in pos]
    fig = figure()
    ax = fig.add_subplot(111)
    violin_plot(ax, data, pos, bp=1)
    show()
Example #33
0
 def random(self):
     """Draw random value from QuadPotential."""
     return normal(size=self.s.shape) * self.inv_s
Example #34
0
 def random(self):
     """Draw random value from QuadPotential."""
     vals = normal(size=self._n).astype(self.dtype)
     return self._inv_stds * vals
Example #35
0
eta_0 = 7*10**-2

N = 1000


def measure_mc(W, WI):
    return memory_capacity(W, WI,
                           memory_max=int(1.1*WI.shape[0]),
                           iterations=1200,
                           iterations_coef_measure=1000,
                           use_input=False,
                           target_later=True,
                           calc_lyapunov=False)

mc = np.zeros([INSTANCES, ORTHOPROCESS_ITERATIONS + 1])

for inst in range(INSTANCES):
    print(inst)
    W = random.normal(0, 1, [N, N])
    W = W * (rho / np.max(np.abs(np.linalg.eig(W)[0])))
    WI = random.uniform(-tau, tau, N)

    mc[inst, 0], _ = measure_mc(W, WI)

    eta = eta_0
    for ix in range(ORTHOPROCESS_ITERATIONS):
        W = learn_orthonormal(W, eta)
        eta = eta * 0.9
        mc[inst, ix + 1], _ = measure_mc(W, WI)
        np.save('mc', mc)
Example #36
0
 def normal_random(self):
     return rn.normal(self.mu, self.sigma)
Example #37
0
def run_viterbi_test():
    """A simple tester for Viterbi algorithm.

    This function generates a bunch of random emission and transition scores,
    and computes the best sequence by performing a brute force search over all
    possible sequences and scoring them. It then runs Viterbi code to see what
    is the score and sequence returned by it.

    Compares both the best sequence and its score to make sure Viterbi is correct.
    """
    from viterbi import run_viterbi
    from numpy import random
    import numpy as np
    from itertools import product

    maxN = 7  # maximum length of a sentence (min is 1)
    maxL = 4  # maximum number of labels (min is 2)
    num_tests = 1000  # number of sentences to generate
    random.seed(0)
    tolerance = 1e-5  # how close do the scores have to be?

    emission_var = 1.0  # variance of the gaussian generating emission scores
    trans_var = 1.0  # variance of the gaussian generating transition scores

    passed_y = 0  # how many times the correct sequence was predicted
    passed_s = 0  # how many times the correct score was returned

    for t in range(num_tests):
        N = random.randint(1, maxN + 1)
        L = random.randint(2, maxL + 1)

        # Generate the scores
        emission_scores = random.normal(0.0, emission_var, (N, L))
        trans_scores = random.normal(0.0, trans_var, (L, L))
        start_scores = random.normal(0.0, trans_var, L)
        end_scores = random.normal(0.0, trans_var, L)

        # run viterbi
        (viterbi_s, viterbi_y) = run_viterbi(emission_scores, trans_scores,
                                             start_scores, end_scores)
        # print "Viterbi", viterbi_s, viterbi_y

        # compute the best sequence and score
        best_y = []
        best_s = -np.inf
        for y in product(range(L), repeat=N):  # all possible ys
            # compute its score
            score = 0.0
            score += start_scores[y[0]]
            for i in range(N - 1):
                score += trans_scores[y[i], y[i + 1]]
                score += emission_scores[i, y[i]]
            score += emission_scores[N - 1, y[N - 1]]
            score += end_scores[y[N - 1]]
            # update the best
            if score > best_s:
                best_s = score
                best_y = list(y)
        # print "Brute", best_s, best_y

        # mismatch if any label prediction doesn't match
        match_y = True
        for i in range(len(best_y)):
            if viterbi_y[i] != best_y[i]:
                match_y = False
        if match_y:
            passed_y += 1
        # the scores should also be very close
        if abs(viterbi_s - best_s) < tolerance:
            passed_s += 1

    print("Passed(y)", passed_y * 100.0 / num_tests)
    print("Passed(s)", passed_s * 100.0 / num_tests)
    assert passed_y == num_tests
    assert passed_s == num_tests
Example #38
0
def aply_gaussian_noise(motor_position):
    if motor_position==0: return 0.
    return motor_position + random.normal(0,power(absolute(motor_position),5)/400)
Example #39
0
            accel_bias, accel_cov, bias_drift_covariance = 0.0) 
    magnetometer = NoisyDeviceDecorator(ReferenceVectorGauge(np.array([1, 0, 0])), 
            mag_bias, mag_cov, bias_drift_covariance = 0.0) 
    real_measurement = np.array([0.0, 0.0, 0.0])
    time_delta = 0.005
    true_orientation = model.Model(Quaternion(axis = [1, 0, 0], angle=0))
    dead_reckoning_estimate = model.Model(Quaternion(axis = [1, 0, 0], angle=0))
    true_rotations = []
    dead_reckoning_rotation_estimates = []
    filtered_rotation_estimates = []

    kalman = kalman.Kalman(true_orientation.orientation, 1.0, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 0.1)
    for i in range(4000):

        if (i % 10 == 0):
          real_measurement = npr.normal(0.0, 1.0, 3)

        gyro_measurement = gyro.measure(time_delta, real_measurement)

        dead_reckoning_estimate.update(time_delta, gyro_measurement)
        dead_reckoning_rotation_estimates.append(dead_reckoning_estimate.orientation)

        true_orientation.update(time_delta, real_measurement)
        true_rotations.append(true_orientation.orientation)
        
        measured_acc = accelerometer.measure(time_delta, true_orientation.orientation)
        measured_mag = magnetometer.measure(time_delta, true_orientation.orientation)

        kalman.update(gyro_measurement, measured_acc, measured_mag, time_delta)
        filtered_rotation_estimates.append(kalman.estimate)
Example #40
0
 def sample_values(self, mu_min, mu_max, sigma_min, sigma_max, size):
     ''' Get a sampling of values from a normal distribution '''
     mu = random.uniform(mu_min, mu_max)
     sigma = random.uniform(sigma_min, sigma_max)
     return list(random.normal(mu, sigma, size))
Example #41
0
        list(reject_objs))

    #Create containers
    metal_matrix = empty((len(objects), MC_iterations))
    Y_matrix = empty((len(objects), MC_iterations))
    m_vector, n_vector = empty(MC_iterations), empty(MC_iterations)
    m_vectorlmfit, n_vectorlmfit = empty(MC_iterations), empty(MC_iterations)
    lmfit_matrix = empty([2, MC_iterations])
    lmfit_error = empty([2, MC_iterations])
    curvefit_matrix = empty([2, MC_iterations])
    kapteyn_matrix = empty([2, MC_iterations])

    #Generate the distributions
    for j in range(len(objects)):
        metal_matrix[j, :] = random.normal(x[j].nominal_value,
                                           x[j].std_dev,
                                           size=MC_iterations)
        Y_matrix[j, :] = random.normal(y[j].nominal_value,
                                       y[j].std_dev,
                                       size=MC_iterations)

    #Run the fits
    for k in range(MC_iterations):
        x_i = metal_matrix[:, k]
        y_i = Y_matrix[:, k]

        m, n, r_value, p_value, std_err = stats.linregress(x_i, y_i)
        m_vector[k], n_vector[k] = m, n

        #Lmfit
        result_lmfit = lmod.fit(y_i, x=x_i, m=0.005, n=0.24709)
Example #42
0
def slembixy(n):
    x = 1 + 5*npr.random(n)
    e = 0.7*npr.normal(size=n)
    y = 1.5*x + 0.3 + e
    return x, y
Example #43
0
    y_samples = list()  # generated observations from model
    # model parameters
    pred_var = 1
    obs_var = 1
    
    # KF initial distribution
    mean = 1
    var = 1
    
    # generate initial samples for PF
    particles = list()
    particle_weights = list()
    normalized_weights = list()
    N = 10000  # number of particles
    for particle in range(1, N+1):
        particles.append(nprand.normal(loc=1, scale=1))
        # initialize with uniform weight
        particle_weights.append(1.0 / N)
    
    print(len(particles))
    print(particles)
    print(particle_weights)
    for idx in range(1, 50 + 1):
        # Generate observations
        x = pxx(x, np.sqrt(pred_var))
        y = pyx(x, np.sqrt(obs_var))
        y_samples.append(y)

        # Kalman filter
        mean = update_mean(y, mean, var, pred_var, obs_var)
        var = update_var(var, pred_var, obs_var)
Example #44
0
 def __init__(self):
     self.age = random.randint(0, 35)
     self.gender = features.gender[random.randint(0, 2)]
     self.height = random.normal(loc=160, scale=7)
     if self.gender == 'female':
         self.weight = random.normal(loc=85, scale=12.5)
     else:
         self.weight = random.normal(loc=135, scale=32.5)
     self.habitant_area = features.habitant_area[random.randint(0, 3)]
     self.habitant_height = random.normal(loc=2000, scale=800)
     self.born_weight = random.normal(loc=325, scale=38)
     self.parent_alive = random.choice(features.parent_alive,
                                       1,
                                       p=[0.6, 0.4])
     self.claw_size = random.normal(loc=4, scale=1)
     self.injury = random.choice(features.injury, 1, p=[0.1, 0.2, 0.4, 0.3])
     self.sickness = random.choice(features.sickness,
                                   1,
                                   p=[0.1, 0.2, 0.4, 0.3])
     tmp = random.normal(loc=5, scale=2)
     if tmp < 1.5:
         self.olfactory_sensation = 'bad'
     elif tmp > 8.5:
         self.olfactory_sensation = 'good'
     else:
         self.olfactory_sensation = 'average'
     self.tail_length = random.normal(loc=8, scale=1)
     self.ear_size = random.normal(loc=10, scale=1)
     self.head_size = random.normal(loc=50, scale=5)
     self.front_paw = random.normal(loc=12, scale=1.5)
     self.rear_paw = random.normal(loc=20, scale=1.5)
     self.fur_length = random.normal(loc=8, scale=2)
     self.fur_color = features.fur_color[random.randint(0, 2)]
     self.shoulder_width = random.normal(loc=65, scale=2)
     self.food_tend = features.food_tend[random.randint(0, 3)]
     self.generate_label()
Example #45
0
    x = sqrt((xr - param.Lx * x0)**2 + (yr - param.Ly * y0)**2)
    y = 4. / (1 + exp(x / (sigma / 2)))
    #y = cos(x/sigma*pi/2)
    #y[x>sigma]=0.
    return y


# 2/ set an initial shear
sigma = 0.05
yy = (yr / param.Ly - 0.5)

# comment/uncomment choice A vs. B

# choice A/ corresponding to a gaussian shaped jet
#vor[:] = (yy/sigma)*exp( - (yy/sigma)**2/2 )

# choice B/ or a cosine shaped jet
vor[:] = sin(yy * pi / sigma)
vor[abs(yy / sigma) > 1] = 0.

# add noise to trigger the instability
noise = random.normal(size=shape(yr)) * grid.msk
noise -= grid.domain_integration(noise) * grid.msk / grid.area
grid.fill_halo(noise)

vor += noise * 1e-3

model.set_psi_from_vorticity()

f2d.loop()
Example #46
0
    def double_population(self, pop: dict, epoch: int):
        ## Sort population based on fronts
        fronts, max_rank = self.fast_non_dominated_sort(pop)
        pop_new = {}
        for front in fronts:
            for stt in front:
                idx = list(pop.keys())[stt]
                _idx = uuid4().hex
                pop_new[_idx] = deepcopy(pop[idx])
                pop_new[_idx][self.ID_IDX] = _idx
        n1, n2 = len(fronts[0]), len(fronts[-1])
        if n1 == 0 or n1 == self.pop_size:
            n1 = int(self.PD * self.pop_size)
        if n2 == 0 or n2 == self.pop_size:
            n2 = int(self.SD * self.pop_size)

        r2 = uniform()  # R2 in [0, 1], the alarm value, random value
        # Using equation (3) update the sparrow’s location;
        for i in range(0, n1):
            while True:
                if r2 < self.ST:
                    x_new = pop_new[list(
                        pop_new.keys())[i]][self.ID_POS] * exp(
                            (epoch + 1) / self.epoch)
                else:
                    x_new = pop_new[list(pop_new.keys())[i]][
                        self.ID_POS] + normal() * ones(self.problem["shape"])
                x_new = self.amend_position_random(x_new)
                schedule = matrix_to_schedule(self.problem, x_new)
                if schedule.is_valid():
                    fit = self.Fit.fitness(schedule)
                    idx = uuid4().hex
                    break
            child = [idx, x_new, fit]
            pop_new[idx] = child

        idx_best, idx_worst = self.get_current_best_worst(pop_new)
        current_best, current_worst = pop_new[list(
            pop_new.keys())[idx_best]], pop_new[list(
                pop_new.keys())[idx_worst]]

        # Using equation (4) update the sparrow’s location;
        for i in range(n1, self.pop_size):
            while True:
                if i > int(self.pop_size / 2):
                    x_new = normal() * exp(
                        (current_worst[self.ID_POS] -
                         pop_new[list(pop_new.keys())[i]][self.ID_POS]) /
                        (i + 1)**2)
                else:
                    x_new = current_best[self.ID_POS] + abs(
                        pop_new[list(pop_new.keys())[i]][self.ID_POS] -
                        current_best[self.ID_POS]) * normal()
                x_new = self.amend_position_random(x_new)
                schedule = matrix_to_schedule(self.problem, x_new)
                if schedule.is_valid():
                    fit = self.Fit.fitness(schedule)
                    idx = uuid4().hex
                    break
            child = [idx, x_new, fit]
            pop_new[idx] = child

        #  Using equation (5) update the sparrow’s location;
        n2_list = choice(list(range(0, self.pop_size)), n2, replace=False)
        for i in n2_list:
            while True:
                child = pop_new[list(pop_new.keys())[i]]
                if i in fronts[0]:
                    x_new = current_best[self.ID_POS] + normal() * abs(
                        child[self.ID_POS] - current_best[self.ID_POS])
                else:
                    dist = sum(
                        sqrt((child[self.ID_FIT] -
                              current_worst[self.ID_FIT])**2))
                    x_new = child[self.ID_POS] + uniform(-1, 1) * (
                        abs(child[self.ID_POS] - current_best[self.ID_POS]) /
                        (dist + self.EPSILON))
                x_new = self.amend_position_random(x_new)
                schedule = matrix_to_schedule(self.problem, x_new)
                if schedule.is_valid():
                    fit = self.Fit.fitness(schedule)
                    idx = uuid4().hex
                    break
            child = [idx, x_new, fit]
            pop_new[idx] = child

        return {**pop, **pop_new}
Example #47
0
                l=2*z*n_y
                if l<0:
                    z*=-1
                elif rnd.rand()<np.exp(P*-l):
                    z*=-1
                Q[x,y]=z
    return Q

#Now I need to define the magnetization function. This gives the Magnetization of any given spin state:
def Magnetization(Q):
    magnetization=np.sum(Q)
    return magnetization

# Here I am saying that the temperature points need to have a midpoint where we expect the disorder to begin and to give us random points between this value as 1<T<5
temp_mid = 2.25;    
Temp=rnd.normal(temp_mid,0.5,temp_points)
Temp=Temp[(Temp>0.5)&(Temp<5)]   
temp_points=np.size(Temp)

# I need to define 0 points for each of the 4 physical quantities I wish to find:
Magnetization=np.zeros(temp_points)
SpecificHeat=np.zeros(temp_points)  
Energy=np.zeros(temp_points)
Susceptibility=np.zeros(temp_points)

# Now I need to implement each function above by creating an Ising Function with them:
for i in range(len(Temp)):
    E_a=M_a=0
    E_b=M_b=0
    Q=startspin(num)
    init_Temp_a=1/Temp[i] 
Example #48
0
"""Simple use of lmfit to fit data."""
# pylint: disable=invalid-name
from Stoner import Data
from numpy import linspace, exp, random

# Make some data
x = linspace(0, 10.0, 101)
y = 2 + 4 * exp(-x / 1.7) + random.normal(scale=0.2, size=101)

d = Data(x, y, column_headers=["Time", "Signal"], setas="xy")

# Do the fitting and plot the result
func = lambda x, A, B, C: A + B * exp(-x / C)
fit = d.lmfit(
    func,
    result=True,
    header="Fit",
    A=1,
    B=1,
    C=1,
    residuals=True,
    output="report",
)

# Reset labels
d.labels = []

# Make nice two panel plot layout
ax = d.subplot2grid((3, 1), (2, 0))
d.setas = "x..y"
d.plot(fmt="g+")
Example #49
0
import cv2
import numpy as np
from numpy.random import normal
from matplotlib import pyplot as plt

path = '/Users/RohanSaxena/Documents/projects/cv'
image = cv2.cvtColor(cv2.imread(path + '/pic.jpeg'), cv2.COLOR_BGR2RGB)

row = image[60, :, 0]

plt.subplot(2, 1, 1)
plt.plot(row)
plt.xlabel('Column')
plt.ylabel('Pixel')
plt.title('Original row')

noise = normal(scale=10, size=np.shape(row))

noisy_row = row + noise
plt.subplot(2, 1, 2)
plt.plot(noisy_row)
plt.xlabel('Column')
plt.ylabel('Pixel')
plt.title('Noisy row')

plt.tight_layout()
plt.show()
Example #50
0
from numpy import hstack
from numpy.random import normal
import numpy as np

# Generate two lists of values with different scale and loc using normal distribution
X1 = np.round(normal(loc=10, scale=2.2, size=6), 2)
X2 = np.round(normal(loc=70, scale=2.5, size=6), 2)
X = hstack((X1, X2))
X = X.reshape((len(X), 1))


#Compute the likelihood that each point belong to a group
def likelihoodMeasureByGaussian(sd, m, X):
    p_xbym = np.zeros(len(X))
    i = 0
    for xi in X:
        p_xbym[i] = (1 / np.sqrt(2 * np.pi * sd * sd)) * (np.exp(-(
            (xi - m)**2 / (2 * sd * sd))))
        i = i + 1
    return p_xbym


#computer Posterior Probability
def posteriorProbability(X, likelihoodMForA, likelihoodMForB, priorProbForA,
                         priorProbForB):

    posteriorProbB = np.zeros(len(X))
    for i in range(len(X)):
        posteriorProbB[i] = (likelihoodMForB[i] * priorProbForB) / (
            likelihoodMForB[i] * priorProbForB +
            likelihoodMForA[i] * priorProbForA)
Example #51
0
def random_step_size():
    #if negative returns 0
    #1 number from a norm dist with mean 2 and std dev of 4
    return max([0, round(float(normal(2, 4, 1)))])
Example #52
0
 def __call__(self):
     return nr.normal(scale=self.s)
Example #53
0
class Root:
    """ This is root of all Algorithms """

    ID_MIN_PROB = 0  # min problem
    ID_MAX_PROB = -1  # max problem

    ID_POS = 0  # Position
    ID_FIT = 1  # Fitness

    EPSILON = 10E-10

    def __init__(self,
                 obj_func=None,
                 lb=None,
                 ub=None,
                 problem_size=50,
                 batch_size=10,
                 verbose=True):
        """
        Parameters
        ----------
        obj_func : function
        lb : list
        ub : list
        problem_size : int, optional
        batch_size: int, optional
        verbose : bool, optional
        """
        self.obj_func = obj_func
        if (lb is None) or (ub is None):
            if problem_size is None:
                print("Problem size must be an int number")
                exit(0)
            elif problem_size <= 0:
                print("Problem size must > 0")
                exit(0)
            else:
                self.problem_size = int(ceil(problem_size))
                self.lb = -1 * ones(problem_size)
                self.ub = 1 * ones(problem_size)
        else:
            if isinstance(lb, list) and isinstance(
                    ub, list) and not (problem_size is None):
                if (len(lb) == len(ub)) and (problem_size > 0):
                    if len(lb) == 1:
                        self.problem_size = problem_size
                        self.lb = lb[0] * ones(problem_size)
                        self.ub = ub[0] * ones(problem_size)
                    else:
                        self.problem_size = len(lb)
                        self.lb = array(lb)
                        self.ub = array(ub)
                else:
                    print(
                        "Lower bound and Upper bound need to be same length. Problem size must > 0"
                    )
                    exit(0)
            else:
                print(
                    "Lower bound and Upper bound need to be a list. Problem size is an int number"
                )
                exit(0)
        self.batch_size = batch_size
        self.verbose = verbose
        self.epoch, self.pop_size = None, None
        self.solution, self.loss_train = None, []

    def create_solution(self, minmax=0):
        """ Return the position position with 2 element: position of position and fitness of position

        Parameters
        ----------
        minmax
            0 - minimum problem, else - maximum problem

        """
        position = uniform(self.lb, self.ub)
        fitness = self.get_fitness_position(position=position, minmax=minmax)
        return [position, fitness]

    def get_fitness_position(self, position=None, minmax=0):
        """     Assumption that objective function always return the original value
        :param position: 1-D numpy array
        :param minmax: 0- min problem, 1 - max problem
        :return:
        """
        return self.obj_func(position) if minmax == 0 else 1.0 / (
            self.obj_func(position) + self.EPSILON)

    def get_fitness_solution(self, solution=None, minmax=0):
        return self.get_fitness_position(solution[self.ID_POS], minmax)

    def get_global_best_solution(self, pop=None, id_fit=None, id_best=None):
        """ Sort a copy of population and return the copy of the best position """
        sorted_pop = sorted(pop, key=lambda temp: temp[id_fit])
        return deepcopy(sorted_pop[id_best])

    def get_global_best_global_worst_solution(self,
                                              pop=None,
                                              id_fit=None,
                                              id_best=None):
        sorted_pop = sorted(pop, key=lambda temp: temp[id_fit])
        if id_best == self.ID_MIN_PROB:
            return deepcopy(sorted_pop[id_best]), deepcopy(
                sorted_pop[self.ID_MAX_PROB])
        elif id_best == self.ID_MAX_PROB:
            return deepcopy(sorted_pop[id_best]), deepcopy(
                sorted_pop[self.ID_MIN_PROB])

    def update_global_best_global_worst_solution(self,
                                                 pop=None,
                                                 id_best=None,
                                                 id_worst=None,
                                                 g_best=None):
        """ Sort the copy of population and update the current best position. Return the new current best position """
        sorted_pop = sorted(pop, key=lambda temp: temp[self.ID_FIT])
        current_best = sorted_pop[id_best]
        g_best = deepcopy(current_best) if current_best[self.ID_FIT] < g_best[
            self.ID_FIT] else deepcopy(g_best)
        return g_best, sorted_pop[id_worst]

    def get_sorted_pop_and_global_best_solution(self,
                                                pop=None,
                                                id_fit=None,
                                                id_best=None):
        """ Sort population and return the sorted population and the best position """
        sorted_pop = sorted(pop, key=lambda temp: temp[id_fit])
        return sorted_pop, deepcopy(sorted_pop[id_best])

    def amend_position(self, position=None):
        return maximum(self.lb, minimum(self.ub, position))

    def amend_position_faster(self, position=None):
        return clip(position, self.lb, self.ub)

    def amend_position_random(self, position=None):
        for t in range(self.problem_size):
            if position[t] < self.lb[t] or position[t] > self.ub[t]:
                position[t] = uniform(self.lb[t], self.ub[t])
        return position

    def amend_position_random_faster(self, position=None):
        return where(logical_and(self.lb <= position, position <= self.ub),
                     position, uniform(self.lb, self.ub))

    def update_global_best_solution(self, pop=None, id_best=None, g_best=None):
        """ Sort the copy of population and update the current best position. Return the new current best position """
        sorted_pop = sorted(pop, key=lambda temp: temp[self.ID_FIT])
        current_best = sorted_pop[id_best]
        return deepcopy(current_best) if current_best[self.ID_FIT] < g_best[
            self.ID_FIT] else deepcopy(g_best)

    def update_sorted_population_and_global_best_solution(
            self, pop=None, id_best=None, g_best=None):
        """ Sort the population and update the current best position. Return the sorted population and the new current best position """
        sorted_pop = sorted(pop, key=lambda temp: temp[self.ID_FIT])
        current_best = sorted_pop[id_best]
        g_best = deepcopy(current_best) if current_best[self.ID_FIT] < g_best[
            self.ID_FIT] else deepcopy(g_best)
        return sorted_pop, g_best

    def create_opposition_position(self, position=None, g_best=None):
        return self.lb + self.ub - g_best[
            self.ID_POS] + uniform() * (g_best[self.ID_POS] - position)

    def levy_flight(self,
                    epoch=None,
                    position=None,
                    g_best_position=None,
                    step=0.001,
                    case=0):
        """
        Parameters
        ----------
        epoch (int): current iteration
        position : 1-D numpy array
        g_best_position : 1-D numpy array
        step (float, optional): 0.001
        case (int, optional): 0, 1, 2

        """
        beta = 1
        # muy and v are two random variables which follow normal distribution
        # sigma_muy : standard deviation of muy
        sigma_muy = power(
            gamma(1 + beta) * sin(pi * beta / 2) / (gamma(
                (1 + beta) / 2) * beta * power(2, (beta - 1) / 2)), 1 / beta)
        # sigma_v : standard deviation of v
        sigma_v = 1
        muy = normal(0, sigma_muy**2)
        v = normal(0, sigma_v**2)
        s = muy / power(abs(v), 1 / beta)
        levy = uniform(self.lb,
                       self.ub) * step * s * (position - g_best_position)

        if case == 0:
            return levy
        elif case == 1:
            return position + 1.0 / sqrt(epoch + 1) * sign(random() -
                                                           0.5) * levy
        elif case == 2:
            return position + normal(0, 1, len(self.lb)) * levy
        elif case == 3:
            return position + 0.01 * levy
# The result, accuracy and computational time
def table(rewardM):
    n_sample = rewardM.shape[0]
    game_size = rewardM.shape[1]

    LP_record = np.zeros((n_sample, 2), dtype=float)
    NN_record = np.zeros((n_sample, 2), dtype=float)
    for i in range(n_sample):
        LP_record[i, 0], LP_record[i, 1] = LP_NE(rewardM[i])
        NN_record[i, 0], NN_record[i, 1] = CNN_NE(rewardM[i])

    LP_mv, LP_mt = LP_record[:, 0].mean(), LP_record[:, 1].mean()
    NN_mv, NN_mt = NN_record[:, 0].mean(), NN_record[:, 1].mean()
    difference_NN = np.abs(LP_record[:, 0] - NN_record[:, 0]).mean()
    gap_NN = np.abs(
        (LP_record[:, 0] - NN_record[:, 0]) / NN_record[:, 0]).mean()

    print(f"** number sample: {n_sample}, game size: {game_size} **\n")
    print(
        f"Linear programming         : mean time: {LP_mt:.4f}, mean value: {LP_mv:.4f} "
    )
    print(
        f"Convolutinal Neural Network: mean time: {NN_mt:.4f}, mean value: {NN_mv:.4f},  mean gap: {gap_NN*100:.2f}%"
    )


rewardM = uniform(-10, 100, (100, 35, 35)) + normal(25, 3,
                                                    (100, 35, 35)) + poisson(
                                                        35, (100, 35, 35))
table(rewardM)
Example #55
0
    shift = pars['shift'].value
    decay = pars['decay'].value

    if abs(shift) > pi/2:
        shift = shift - sign(shift)*pi

    model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
    if data is None:
        return model
    return (model - data)

n = 1500
xmin = 0.
xmax = 250.0
random.seed(0)
noise = random.normal(scale=2.80, size=n)
x     = linspace(xmin, xmax, n)
data  = residual(p_true, x) + noise

fit_params = Parameters()
fit_params.add('amp', value=13.0, max=20, min=0.0)
fit_params.add('period', value=2, max=10)
fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.)
fit_params.add('decay', value=0.02, max=0.10, min=0.00)

out = minimize(residual, fit_params, args=(x,), kws={'data':data})

fit = residual(fit_params, x)

print '# N_func_evals, N_free = ', out.nfev, out.nfree
print '# chi-square, reduced chi-square = % .7g, % .7g' % (out.chisqr, out.redchi)
Example #56
0
 def prop(x):
     return random.normal(x, 1)
 def x_ar(*x_past, loc=black):
     x_curr = alpha * np.sum(x_past) + (1 - len(x_past) * alpha) * normal(loc=loc, scale=sigma)
     return x_curr
Example #58
0
        beta: [0-2]
            + 0-1: small range --> exploit
            + 1-2: large range --> explore
        case: 0, 1, -1
            + 0: return multiplier * s * uniform()
            + 1: return multiplier * s * normal(0, 1)
            + -1: return multiplier * s
        """
        # u and v are two random variables which follow normal distribution
        # sigma_u : standard deviation of u
        sigma_u = power(
            gamma(1 + beta) * sin(pi * beta / 2) / (gamma(
                (1 + beta) / 2) * beta * power(2, (beta - 1) / 2)), 1 / beta)
        # sigma_v : standard deviation of v
        sigma_v = 1
        u = normal(0, sigma_u**2)
        v = normal(0, sigma_v**2)
        s = u / power(abs(v), 1 / beta)
        if case == 0:
            step = multiplier * s * uniform()
        elif case == 1:
            step = multiplier * s * normal(0, 1)
        else:
            step = multiplier * s
        return step

    def get_index_roulette_wheel_selection(self, list_fitness=None):
        """ It can handle negative also. Make sure your list fitness is 1D-numpy array"""
        scaled_fitness = (list_fitness - min(list_fitness)) / ptp(list_fitness)
        minimized_fitness = 1.0 - scaled_fitness
        total_sum = sum(minimized_fitness)
def render_maze(root, children, alpha, std, black=0.0, white=1.0, history=True):
    """
    Render maze walls and cells according to an autoregressive model.

    :param root: Coordinates of root cell.
    :param children: Nested list where children cells are indexed by their root's coordinates.
    :param alpha: Parameter of the AR(1) process
    :param std: Standard deviation of the AR(1) process.
    :param black: Value of the black color associated to walls.
    :param white: Value of the white color associated to cells.
    :param history: Boolean indicating whether to keep a frame-by-frame record of the rendering.
    :return: Rendered maze. If history is set, this is also returned as a frame by frame mask.
    """

    sigma = std * ar_gain(alpha)

    def x_ar(*x_past, loc=black):
        x_curr = alpha * np.sum(x_past) + (1 - len(x_past) * alpha) * normal(loc=loc, scale=sigma)
        return x_curr

    edge_height = len(children) * 2
    edge_width = len(children[0]) * 2

    current_branches = [root]

    rendered_maze = np.zeros(shape=(edge_height, edge_width), dtype=np.float)
    maze_mask = np.zeros(shape=rendered_maze.shape, dtype=np.bool) if history else None

    i_root = 2 * root[0]
    j_root = 2 * root[1]

    for i in range(-1, 2):
        for j in range(-1, 2):
            if history:
                maze_mask[i_root + i, j_root + j] = True
            if i == 0 and j == 0:
                rendered_maze[i_root, j_root] = normal(loc=white, scale=sigma)
            else:
                rendered_maze[i_root + i, j_root + j] = normal(loc=black, scale=sigma)

    maze_history = maze_mask[None, ] if history else None

    while len(current_branches) > 0:
        future_branches = []
        for branch in current_branches:
            for child in children[branch[0]][branch[1]]:

                # Calculate position of cells to render
                direction = (child[0] - branch[0], child[1] - branch[1])

                mid_cell = (branch[0] + child[0], branch[1] + child[1])
                end_cell = (2 * child[0], 2 * child[1])

                void1a = (2 * child[0] + direction[1], 2 * child[1] + direction[0])
                void1b = (2 * child[0] - direction[1], 2 * child[1] - direction[0])
                void2a = (2 * child[0] + direction[0] + direction[1], 2 * child[1] + direction[1] + direction[0])
                void2b = (2 * child[0] + direction[0] - direction[1], 2 * child[1] + direction[1] - direction[0])
                end_void = (2 * child[0] + direction[0], 2 * child[1] + direction[1])

                # Fetch value of already rendered cells
                w0 = rendered_maze[2 * branch[0], 2 * branch[1]]

                b0a = rendered_maze[branch[0] + child[0] + direction[1], branch[1] + child[1] + direction[0]]
                b0b = rendered_maze[branch[0] + child[0] - direction[1], branch[1] + child[1] - direction[0]]

                # Calculate value of newly rendered cells
                w1 = x_ar(w0, loc=white)
                w2 = x_ar(w1, loc=white)

                b1a = x_ar(b0a, loc=black)
                b1b = x_ar(b0b, loc=black)
                b2a = x_ar(b1a, loc=black)
                b2b = x_ar(b1b, loc=black)
                b3 = x_ar(b2a, b2b, loc=black)

                # Assign rendered values
                rendered_maze[mid_cell] = w1
                rendered_maze[end_cell] = w2

                rendered_maze[void1a] = b1a
                rendered_maze[void1b] = b1b
                rendered_maze[void2a] = b2a
                rendered_maze[void2b] = b2b
                rendered_maze[end_void] = b3

                if history:

                    maze_mask[mid_cell] = True
                    maze_mask[end_cell] = True

                    maze_mask[void1a] = True
                    maze_mask[void1b] = True
                    maze_mask[void2a] = True
                    maze_mask[void2b] = True
                    maze_mask[end_void] = True

                future_branches += [child]

        current_branches = future_branches
        if history:
            maze_history = np.append(maze_history, maze_mask[None, ], axis=0)

    if history:
        return rendered_maze, maze_history
    else:
        return rendered_maze
Example #60
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jun  1 14:57:57 2018

@author: mitcdud
"""
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as npr

plt.close("all")
xs = np.linspace(1, 10, 100)
ys = np.linspace(20, 40, 100)
sigma = 1.
newy = ys + npr.normal(0, sigma, 100)
hello = newy[:-10]
ywithsys = newy[-10:] + 3.0
ywsyslist = np.ndarray.tolist(ywithsys)
hello2 = np.ndarray.tolist(hello)
toty = hello2 + ywsyslist
plt.figure(1)
plt.scatter(xs, toty, c='b')
plt.title('Plot with Systematic and Gaussian Error Added')
plt.xlabel('x')
plt.ylabel('y')

#==============================================================================
#Fit data using forward fit (i.e. linear, minimize residuals in y)
forw = np.polyfit(xs, toty, 1)
forslope, foryint = forw[0], forw[1]