Esempio n. 1
0
def newModelAtRandom(data, P, K, featVar, latFeatVar, vocabPrior=VocabPrior, dtype=DTYPE):
    '''
    Creates a new CtmModelState for the given training set and
    the given number of topics. Everything is instantiated purely
    at random. This contains all parameters independent of of
    the dataset (e.g. learnt priors)
    
    Param:
    data - the dataset of words, features and links of which only words and
           features are used in this model
    P - The size of the latent feature-space P << F
    K - the number of topics
    featVar - the prior variance of the feature-space: this is a
              scalar used to scale an identity matrix
    featVar - the prior variance of the latent feature-space: this
               is a scalar used to scale an identity matrix
    
    Return:
    A ModelState object
    '''
    assert K > 1, "There must be at least two topics"
    
    base = newCtmModelAtRandom(data, K, vocabPrior, dtype)
    _,F = data.feats.shape
    Y = rd.random((K,P)).astype(dtype) * 50
    R_Y = latFeatVar * np.eye(P,P, dtype=dtype)
    
    V = rd.random((P,F)).astype(dtype) * 50
    A = Y.dot(V)
    R_A = featVar * np.eye(F,F, dtype=dtype)
    
    return ModelState(F, P, K, A, R_A, featVar, Y, R_Y, latFeatVar, V, base.sigT, base.vocab, base.vocabPrior, base.A, dtype, MODEL_NAME)
Esempio n. 2
0
 def PSO_global(self):
     _list = [rd.uniform(self.low_bound, self.up_bound, self.dimension) for x in range(0, self.particles_count, 1)]
     g_best = _list[0]
     position = _list
     vel = []
     for x in position:
         if self.function(x) < self.function(g_best):
             g_best = x
         vel.append(rd.uniform(-(abs(self.up_bound - self.low_bound)), abs(self.up_bound -self. low_bound), self.dimension))
     count = 0
     while count < self.stop_case:
         for x in range(len(_list)):
             for y in xrange(self.dimension):
                 r_g, r_p = rd.random(1), rd.random(1)
                 vel[x][y] = self.w * vel[x][y] + self.especial_param_p * r_p * (position[x][y] - _list[x][y]) + self.especial_param_g \
                                                                                                       * r_g * (
                                                                                                           g_best[y] -
                                                                                                           _list[x][y])
             _list[x] += vel[x]
             if self.function(_list[x]) < self.function(position[x]):
                 position[x] = _list[x]
                 if self.function(position[x]) < self.function(g_best):
                     g_best = position[x]
         count += 1
     return g_best
Esempio n. 3
0
def spline_iterator():
  from modules.sandSpline import SandSpline

  splines = []
  for _ in range(30):
    guide = f(0.5,0.5)
    pnum = randint(15,100)

    a = random()*TWOPI + linspace(0, TWOPI, pnum)
    # a = linspace(0, TWOPI, pnum)
    path = column_stack((cos(a), sin(a))) * (0.1+random()*0.4)

    scale = arange(pnum).astype('float')*STP

    s = SandSpline(
        guide,
        path,
        INUM,
        scale
        )
    splines.append(s)

  itt = 0
  while True:
    for w, s in enumerate(splines):
      xy = next(s)
      itt += 1
      yield itt, w, xy
Esempio n. 4
0
def randimage(pertsize):
   a0 = random.random([3,3])
   for iter in range(len(pertsize)):
      a0[0,:] = 0.0
      a0[-1,:] = 0.0
      a0[:,0] = 0.0
      a0[:,-1] = 0.0
   
      newsize = [a0.shape[0]*2-1, a0.shape[1]*2-1]
   
      p = random.random(newsize) - 0.5
      # for i = 1:0
      #    p[2:2:end,:) = (p(1:2:end-1,:) + 2*p(2:2:end,:) + p(3:2:end,:)) / 4
   
      a = zeros(newsize)
      a[0::2, 0::2] = a0
      a[0::2, 1::2] = (a[0::2, 0:-1:2] + a[0::2, 2::2]) / 2.0
      a[1::2, 0::2] = (a[0:-1:2, 0::2] + a[2::2, 0::2]) / 2.0
      a[1::2, 1::2] = (a[1::2, 0:-1:2] + a[1::2, 2::2]) / 2.0
      a = a + p * 0.5**iter * pertsize[iter]
      a[0::2, 0::2] = a0
      a0 = a
   
   a = (a - a.min()) / (a.max() - a.min())
   return a
Esempio n. 5
0
def _create_plot_component():

    # Create some data
    numpts = 5000
    x = sort(random(numpts))
    y = random(numpts)

    # Create a plot data obect and give it this data
    pd = ArrayPlotData()
    pd.set_data("index", x)
    pd.set_data("value", y)

    # Create the plot
    plot = Plot(pd)
    plot.plot(("index", "value"),
              type="scatter",
              marker="circle",
              index_sort="ascending",
              color="orange",
              marker_size=3,
              bgcolor="white")

    # Tweak some of the plot properties
    plot.title = "Scatter Plot"
    plot.line_width = 0.5
    plot.padding = 50

    # Attach some tools to the plot
    plot.tools.append(PanTool(plot, constrain_key="shift"))
    zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
    plot.overlays.append(zoom)

    return plot
Esempio n. 6
0
def polynomial_mutation(problem, individual, configuration):
    from numpy.random import random
    eta_m_ = configuration["NSGAIII"]["ETA_M_DEFAULT_"]
    distributionIndex_ = eta_m_
    output = jmoo_individual(problem, individual.decisionValues)

    probability = 1/len(problem.decisions)
    for var in xrange(len(problem.decisions)):
        if random() <= probability:
            y = individual.decisionValues[var]
            yU = problem.decisions[var].up
            yL = problem.decisions[var].low
            delta1 = (y - yL)/(yU - yL)
            delta2 = (yU - y)/(yU - yL)
            rnd = random()

            mut_pow = 1.0/(eta_m_ + 1.0)
            if rnd < 0.5:
                xy = 1.0 - delta1
                val = 2.0 * rnd + (1 - 2 * rnd) * (xy ** (distributionIndex_ + 1.0))
                deltaq = val ** mut_pow - 1
            else:
                xy = 1.0 - delta2
                val = 2.0 * (1.0-rnd) + 2.0 * (rnd-0.5) * (xy ** (distributionIndex_+1.0))
                deltaq = 1.0 - (val ** mut_pow)


            y +=  deltaq * (yU - yL)
            if y < yL: y = yL
            if y > yU: y = yU

            output.decisionValues[var] = y

    return output
Esempio n. 7
0
def _create_plot_component():
    pd = ArrayPlotData(x=random(100), y=random(100))

    # Create some line plots of some of the data
    plot = Plot(pd)

    # Create a scatter plot and get a reference to it (separate from the
    # Plot object) because we'll need it for the regression tool below.
    scatterplot = plot.plot(("x", "y"), color="blue", type="scatter")[0]

    # Tweak some of the plot properties
    plot.padding = 50

    # Attach some tools to the plot
    plot.tools.append(PanTool(plot, drag_button="right"))
    plot.overlays.append(ZoomTool(plot))

    # Add the regression tool and overlay.  These need to be added
    # directly to the scatterplot instance (and not the Plot instance).
    regression = RegressionLasso(scatterplot,
        selection_datasource=scatterplot.index)
    scatterplot.tools.append(regression)
    scatterplot.overlays.append(RegressionOverlay(scatterplot,
                                                  lasso_selection=regression))
    return plot
Esempio n. 8
0
 def test_hfft(self):
     x = random(14) + 1j*random(14)
     x_herm = np.concatenate((random(1), x, random(1)))
     x = np.concatenate((x_herm, x[::-1].conj()))
     assert_array_almost_equal(np.fft.fft(x), np.fft.hfft(x_herm))
     assert_array_almost_equal(np.fft.hfft(x_herm) / np.sqrt(30),
                               np.fft.hfft(x_herm, norm="ortho"))
Esempio n. 9
0
 def test_ifftn(self):
     x = random((30, 20, 10)) + 1j*random((30, 20, 10))
     assert_array_almost_equal(
         np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
         np.fft.ifftn(x))
     assert_array_almost_equal(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
                               np.fft.ifftn(x, norm="ortho"))
Esempio n. 10
0
def _create_plot_component():

    # Create some data
    numpts = 400
    x = sort(random(numpts))
    y = random(numpts)

    xs = ArrayDataSource(x, sort_order='ascending')
    ys = ArrayDataSource(y)

    vectorlen = 15
    vectors = array((random(numpts)*vectorlen,random(numpts)*vectorlen)).T

    vector_ds = MultiArrayDataSource(vectors)
    xrange = DataRange1D()
    xrange.add(xs)
    yrange = DataRange1D()
    yrange.add(ys)
    quiverplot = QuiverPlot(index = xs, value = ys,
                    vectors = vector_ds,
                    index_mapper = LinearMapper(range=xrange),
                    value_mapper = LinearMapper(range=yrange),
                    bgcolor = "white")

    add_default_axes(quiverplot)
    add_default_grids(quiverplot)

    # Attach some tools to the plot
    quiverplot.tools.append(PanTool(quiverplot, constrain_key="shift"))
    zoom = ZoomTool(quiverplot)
    quiverplot.overlays.append(zoom)

    container = OverlayPlotContainer(quiverplot, padding=50)

    return container
Esempio n. 11
0
 def test_toeplitz_real_sym(self):
     src = random(50) - 0.5
     rsp = random(50) - 0.5
     toep = scipy.linalg.toeplitz(src)
     x = np.dot(scipy.linalg.inv(toep), rsp)  # compare to scipy.linalg
     x2 = rf.deconvolve._toeplitz_real_sym(src, rsp)
     np.testing.assert_array_almost_equal(x, x2, decimal=3)
  def wrap(render):

    global np_coords
    global np_vert_coords
    global grains

    ## if fn is a path each image will be saved to that path

    if not render.steps%3:
      f = fn.name()
    else:
      f = None

    grains += (-1)**floor(2*random())
    print(grains)
    if grains<0:
      grains = 0

    res = steps(DF)
    render.set_front(FRONT)

    coord_num = DF.np_get_edges_coordinates(np_coords)
    sandstroke(render,np_coords[:coord_num,:],grains,f)

    if not random()<0.1:
      vert_num = DF.np_get_vert_coordinates(np_vert_coords)
      dots(render,np_vert_coords[:vert_num,:],None)

    return res
Esempio n. 13
0
def newQueryState(data, modelState, withLdaTopics=None):
    '''
    Creates a new CTM Query state object. This contains all
    parameters and random variables tied to individual
    datapoints.
    
    Param:
    data - the dataset of words, features and links of which only words are used in this model
    modelState - the model state object
    withLdaQuery - if not null, this is used to instantiate the
    initial topics. IT IS ALSO USED TO MUTATE THE MODEL
    
    REturn:
    A CtmQueryState object
    '''
    if INIT_WITH_CTM:
        return _newQueryStateFromCtm(data, modelState)
    elif withLdaTopics is not None:
        return _newQueryStateFromLda(data, modelState, withLdaTopics)

    K, vocab, dtype =  modelState.K, modelState.vocab, modelState.dtype
    
    D,T = data.words.shape
    assert T == vocab.shape[1], "The number of terms in the document-term matrix (" + str(T) + ") differs from that in the model-states vocabulary parameter " + str(vocab.shape[1])
    docLens = np.squeeze(np.asarray(data.words.sum(axis=1)))
    
    outMeans = normalizerows_ip(rd.random((D,K)).astype(dtype))
    outVarcs = np.ones((D,K), dtype=dtype)

    inMeans = normalizerows_ip(outMeans + 0.1 * rd.random((D,K)).astype(dtype))
    inVarcs = np.ones((D,K), dtype=dtype)

    inDocCov  = np.ones((D,), dtype=dtype)
    
    return QueryState(outMeans, outVarcs, inMeans, inVarcs, inDocCov, docLens)
 def push_back_students(self):
     more_pushing = True
     students_to_kick = {}
     while more_pushing:
         more_pushing = False
         self.sorted_section_indices.sort(key = lambda sec_ind: -self.section_capacities[sec_ind])
         for sec_ind in self.sorted_section_indices:
             students_to_push = numpy.transpose(numpy.nonzero((self.enroll_orig[:, sec_ind, self.section_schedules[sec_ind,:]].any(axis=1) * (True - self.enroll_final[:,:,self.section_schedules[sec_ind,:]].any(axis=(1,2))))))
             more_pushing |= bool(len(students_to_push))
             for student_ind in students_to_push:
                 self.enroll_final[student_ind, sec_ind, self.section_schedules[sec_ind,:]] = True
                 if self.section_capacities[sec_ind] > 0:
                     self.section_capacities[sec_ind] -= 1
                 else:
                     if not sec_ind in students_to_kick.keys():
                         students_to_kick[sec_ind] = numpy.transpose(numpy.nonzero((self.enroll_final*self.request)[:, sec_ind, self.section_schedules[sec_ind,:]].any(axis=1)))
                         pq = Queue.PriorityQueue()
                         random.shuffle(students_to_kick[sec_ind])
                         for [student] in students_to_kick[sec_ind]:
                             old_sections = numpy.transpose(numpy.nonzero(self.enroll_orig[student, :, self.section_schedules[sec_ind,:]].any(axis=1)))
                             if not len(old_sections):
                                 pq.put((0, random.random(), student), False)
                             for [old_section] in old_sections:
                                 pq.put((self.section_scores[old_section], random.random(), student), False)
                         students_to_kick[sec_ind] = pq
                     try:
                         self.enroll_final[students_to_kick[sec_ind].get(False)[2], sec_ind, self.section_schedules[sec_ind,:]] = False
                     except Queue.Empty:
                         pass
Esempio n. 15
0
def ConnectIzhikevichNetworkLayers(CIJ, NExcitoryLayer, NInhibitoryLayer):
  Dmax = 20 # Maximum Delay
  network = IzNetwork([NExcitoryLayer, NInhibitoryLayer], Dmax)

  NTotalNeurons = NExcitoryLayer + NInhibitoryLayer

  # Set neuron parameters for excitory layer
  rand = rn.rand(NExcitoryLayer)
  network.layer[0].N = NExcitoryLayer
  network.layer[0].a = 0.02 * np.ones(NExcitoryLayer)
  network.layer[0].b = 0.20 * np.ones(NExcitoryLayer)
  network.layer[0].c = -65 + 15*(rand**2)
  network.layer[0].d = 8 - 6*(rand**2)
  
  ## Factor and delay
  network.layer[0].factor[0] = 17
  network.layer[0].factor[1] = 2
  network.layer[0].delay[0] = rn.randint(1,21,size=[NExcitoryLayer,NExcitoryLayer])
  network.layer[0].delay[1] = np.ones([NExcitoryLayer, NInhibitoryLayer])
 
  ## Connectivity matrix (synaptic weights)
  # layer[i].S[j] is the connectivity matrix from layer j to layer i
  # S(i,j) is the strength of the connection from neuron j to neuron i
  # excitory-to-excitory synaptic weights
  network.layer[0].S[0] = CIJ[0]
  # inhibtory-to-excitory synaptic weights
  network.layer[0].S[1] = CIJ[1]
  # inhibtory-to-excitory weights
  rand_array = -1 * rn.random(NInhibitoryLayer*NExcitoryLayer).reshape(NExcitoryLayer,NInhibitoryLayer)
  network.layer[0].S[1] = np.multiply(network.layer[0].S[1],rand_array)

  # Set neuron parameters for inhibitory layer
  rand = rn.rand(NInhibitoryLayer)
  network.layer[1].N = NInhibitoryLayer
  network.layer[1].a = 0.02 + 0.08*rand
  network.layer[1].b = 0.25 - 0.05*rand
  network.layer[1].c = -65 * np.ones(NInhibitoryLayer)
  network.layer[1].d = 2 * np.ones(NInhibitoryLayer)
  
  ## Factor and delay
  network.layer[1].factor[0] = 50
  network.layer[1].factor[1] = 1
  network.layer[1].delay[0] = np.ones([NInhibitoryLayer, NExcitoryLayer])
  network.layer[1].delay[1] = np.ones([NInhibitoryLayer, NInhibitoryLayer])
 
  ## Connectivity matrix (synaptic weights)
  # layer[i].S[j] is the connectivity matrix from layer j to layer i
  # S(i,j) is the strength of the connection from neuron j to neuron i
  # excitory-to-inhibtory synaptic weights
  network.layer[1].S[0] = CIJ[2]
  # inhibtory-to-excitory synaptic weights
  network.layer[1].S[1] = CIJ[3]
  # excitory-to-inhibtory weights
  rand_array = rn.random(NInhibitoryLayer*NExcitoryLayer).reshape(NInhibitoryLayer,NExcitoryLayer)
  network.layer[1].S[0] = np.multiply(network.layer[1].S[0],rand_array)

  # inhibtory-to-inhibtory weights
  rand_array = -1 * rn.random(NInhibitoryLayer*NInhibitoryLayer).reshape(NInhibitoryLayer,NInhibitoryLayer)
  network.layer[1].S[1] = np.multiply(network.layer[1].S[1],rand_array)
  return(network)
Esempio n. 16
0
def prob4():
  result = np.array([])
  full_data = np.array([])
  n = 5000
  data = = r.random((n, 2)) * 2 - 1
  y = np.sin(data * np.pi)

  for i in range(5000):
    d = np.mat(r.random(2) * 2 - 1)
    full_data = np.append(full_data, d)
    d = d.T
    y = np.sin(d * np.pi)
    #print(y)
    a = np.linalg.inv(d.T * d) * d.T * y
    result = np.append(result, a)
    #print(a)
  #print(result)
  gbar = np.mean(result)

  var_array = np.array([])
  for i, d in enumerate(np.array_split(full_data, 5000)):
    y = result[i]
    dvar = np.mean((d * y - d * gbar) ** 2)
    var_array = np.append(var_array, dvar)

  expected_bias = np.mean((gbar * full_data - np.sin(full_data * np.pi)) ** 2)
  expected_var = np.mean(var_array)

  print("bias {} var {} gbar {}".format(expected_bias,expected_var, gbar))
Esempio n. 17
0
    def sample1d(self, nrand):
        """
        Get random |g| from the 1d distribution

        parameters
        ----------
        nrand: int
            Number to generate
        """

        if not hasattr(self,'maxval1d'):
            self.set_maxval1d()

        g = zeros(nrand)

        ngood=0
        nleft=nrand
        while ngood < nrand:

            # generate total g**2 in [0,1)
            grand = random.random(nleft)

            # now finally the height from [0,maxval)
            h = self.maxval1d*random.random(nleft)

            pvals = self.prior1d(grand)

            w,=where(h < pvals)
            if w.size > 0:
                g[ngood:ngood+w.size] = grand[w]
                ngood += w.size
                nleft -= w.size
   
        return g
Esempio n. 18
0
    def test_random_overdet(self):
        for dtype in REAL_DTYPES:
            for (n,m) in ((20,15), (200,2)):
                for lapack_driver in TestLstsq.lapack_drivers:
                    for overwrite in (True, False):
                        a = np.asarray(random([n,m]), dtype=dtype)
                        for i in range(m):
                            a[i,i] = 20 * (0.1 + a[i,i])
                        for i in range(4):
                            b = np.asarray(random([n,3]), dtype=dtype)
                            # Store values in case they are overwritten later
                            a1 = a.copy()
                            b1 = b.copy()
                            try:
                                out = lstsq(a1, b1,
                                            lapack_driver=lapack_driver,
                                            overwrite_a=overwrite,
                                            overwrite_b=overwrite)
                            except LstsqLapackError:
                                if lapack_driver is None:
                                    mesg = ('LstsqLapackError raised with '
                                            'lapack_driver being None.')
                                    raise AssertionError(mesg)
                                else:
                                    # can't proceed, skip to the next iteration
                                    continue

                            x = out[0]
                            r = out[2]
                            assert_(r == m, 'expected efficient rank %s, got '
                                             '%s' % (m, r))
                            assert_allclose(x, direct_lstsq(a, b, cmplx=0),
                                            rtol=25 * _eps_cast(a1.dtype),
                                            atol=25 * _eps_cast(a1.dtype),
                                            err_msg="driver: %s" % lapack_driver)
Esempio n. 19
0
 def test_random_complex_overdet(self):
     for dtype in COMPLEX_DTYPES:
         for (n, m) in ((20, 15), (200, 2)):
                 for lapack_driver in TestLstsq.lapack_drivers:
                     for overwrite in (True, False):
                         a = np.asarray(random([n, m]) + 1j*random([n, m]),
                                        dtype=dtype)
                         for i in range(m):
                             a[i, i] = 20 * (0.1 + a[i, i])
                         for i in range(2):
                             b = np.asarray(random([n, 3]), dtype=dtype)
                             # Store values in case they are overwritten
                             # later
                             a1 = a.copy()
                             b1 = b.copy()
                             out = lstsq(a1, b1, lapack_driver=lapack_driver,
                                         overwrite_a=overwrite,
                                         overwrite_b=overwrite)
                             x = out[0]
                             r = out[2]
                             assert_(r == m, 'expected efficient rank %s, got '
                                              '%s' % (m, r))
                             assert_allclose(x, direct_lstsq(a, b, cmplx=1),
                                             rtol=25 * _eps_cast(a1.dtype),
                                             atol=25 * _eps_cast(a1.dtype),
                                       err_msg="driver: %s" % lapack_driver)
Esempio n. 20
0
 def __init__(self):
     self.NUM_CIRCLES = 4
     lux.register(Parameter( name = "simple_rate",
                             description = "0..1   controls the rate of spinning cubes",
                             default_value = 1.0 ))
     self.scale = 2.0
     self.max_segments = 20 #tweak according to openlase calibration parameters; too high can cause ol to crash
     self.max_cycles = 2 # set high (~50) for maximum glitch factor
     self.time_scale = 0.4
     self.R = 0.25 # big steps
     self.R_frequency =  1/100
     self.r = 0.08 # little steps
     self.r_frequency = 1/370
     self.p = 0.5 # size of the ring
     self.p_frequency = 1/2000
     self.color_time_frequency = 1/10
     self.color_length_frequency = 0 #3/240 #set to 0 to calibrate color
     self.color_angle_frequency = 0.1
     self.spatial_resonance = 3 #ok why is this 5 and not 4?
     self.spatial_resonance_amplitude = 0.1 
     self.spatial_resonance_offset = 0.25
     self.color_phases = random(self.NUM_CIRCLES)*6
     self.z_rotations = random(self.NUM_CIRCLES) * 0.22
     self.y_rotations = random(self.NUM_CIRCLES) * 0.24
     self.x_rotations = random(self.NUM_CIRCLES) * 0.33
     
     self.r_prime = 3 #37 
     self.g_prime = 2 #23 
     self.b_prime = 1 #128 
     
     self.scale = 2
     self.width = self.scale
     self.height = self.scale
     self.bass = 1 # plz hack this to do fft power binning kthx
Esempio n. 21
0
 def test_random_complex_exact(self):
     for dtype in COMPLEX_DTYPES:
         for n in (20, 200):
             for lapack_driver in TestLstsq.lapack_drivers:
                 for overwrite in (True, False):
                     a = np.asarray(random([n, n]) + 1j*random([n, n]),
                                    dtype=dtype)
                     for i in range(n):
                         a[i, i] = 20 * (0.1 + a[i, i])
                     for i in range(2):
                         b = np.asarray(random([n, 3]), dtype=dtype)
                         # Store values in case they are overwritten later
                         a1 = a.copy()
                         b1 = b.copy()
                         out = lstsq(a1, b1, lapack_driver=lapack_driver,
                                     overwrite_a=overwrite,
                                     overwrite_b=overwrite)
                         x = out[0]
                         r = out[2]
                         assert_(r == n, 'expected efficient rank %s, got '
                                          '%s' % (n, r))
                         if dtype is np.complex64:
                             assert_allclose(dot(a, x), b,
                                       rtol=400 * _eps_cast(a1.dtype),
                                       atol=400 * _eps_cast(a1.dtype),
                                       err_msg="driver: %s" % lapack_driver)
                         else:
                             assert_allclose(dot(a, x), b,
                                       rtol=1000 * _eps_cast(a1.dtype),
                                       atol=1000 * _eps_cast(a1.dtype),
                                       err_msg="driver: %s" % lapack_driver)
Esempio n. 22
0
  def sandstroke_orthogonal(self,xys,height=None,steps=10,grains=10):
    pix = self.pix
    rectangle = self.ctx.rectangle
    fill = self.ctx.fill

    if not height:
      height = pix*10

    dx = xys[:,2] - xys[:,0]
    dy = xys[:,3] - xys[:,1]

    aa = arctan2(dy,dx)
    directions = column_stack([cos(aa),sin(aa)])
    dd = sqrt(square(dx)+square(dy))

    aa_orth = aa + pi*0.5
    directions_orth = column_stack([cos(aa_orth),sin(aa_orth)])

    for i,d in enumerate(dd):

      xy_start = xys[i,:2] + \
          directions[i,:]*random((steps,1))*d

      for xy in xy_start:
        points = xy + \
            directions_orth[i,:]*random((grains,1))*height
        for x,y in points:
          rectangle(x,y,pix,pix)
          fill()
Esempio n. 23
0
 def __init__(self):
     
     self.big_geoip_noise_fraction = 0.2
     self.big_geoip_noise_distance = 10
     
     self.wifi_fraction = 0.10
     
     self.wifi_latency_mu = 10
     self.wifi_latency_sigma = 200
     self.wifi_granurality = 18
     self.max_normal_wifi_latency = 900
     
     self.router_router_speed = 7.5e4
     self.peer_router_speed = 2.0e3
     
     self.wi_latency_mu = 5
     self.wi_latency_sigma = 10
     
     self.isp_number = 7
     self.end_router_per_isp = 2000
     #self.global_router_number = 1000
     
     self.end_router_map = numpy.zeros([self.isp_number * self.end_router_per_isp,self.isp_number * self.end_router_per_isp])
     self.router_coordinates = []
     
     for _ in xrange(self.isp_number * self.end_router_per_isp):
         lat_ = 180 * random.random() - 90
         long_ = 360 * random.random() -180
         self.router_coordinates.append((lat_,long_))
  def spawn(self, ratio, age=None):

    num = self.num
    self.potential[:num,0] = self.tmp[:num,0]<self.max_capacity

    inds = self.potential[:num,0].nonzero()[0]

    if age is not None:
      mask = self.age[inds,0]>self.itt-age
      inds = inds[mask]

    selected = inds[random(len(inds))<ratio]

    new_num = len(selected)
    if new_num>0:
      new_xy = self.xy[selected,:]
      theta = random(new_num)*TWOPI
      offset = column_stack([cos(theta), sin(theta)])*self.node_rad*0.5
      self.xy[num:num+new_num,:] = new_xy+offset
      self.num += new_num
      self.age[num:num+new_num] = self.itt

    if age is not None:
      self.decay(age)

    return 0
Esempio n. 25
0
def sample_sphere3d(radius=1., n_samples=1):
    """
    Sample points from 3D sphere.

    @param radius: radius of the sphere
    @type radius: float

    @param n_samples: number of samples to return
    @type n_samples: int

    @return: n_samples times random cartesian coordinates inside the sphere
    @rtype: numpy array
    """
    from numpy.random  import random
    from numpy import arccos, transpose, cos, sin, pi, power

    r = radius * power(random(n_samples), 1 / 3.)
    theta = arccos(2. * (random(n_samples) - 0.5))
    phi = 2 * pi * random(n_samples)

    x = cos(phi) * sin(theta) * r
    y = sin(phi) * sin(theta) * r
    z = cos(theta) * r

    return transpose([x, y, z])
Esempio n. 26
0
File: misc.py Progetto: neerajg/sdap
def init_params(K, L, M, N, X1, X2, no_obs, train_I, train_J):
    # TO DO : need a way to make initialization of sigma in such a way that in the beginning not too much of r gets even out because of this or gets neglected
    # (update r log exp problem)
    alphas = [random(K,),random(L,)]
    alphas[0] = alphas[0]/np.sum(alphas[0])
    alphas[1] = alphas[1]/np.sum(alphas[1])
    gammas = [randint(low = 50, high = 500, size = (M,K)) + random((M,K)), randint(low = 1.46, high = 3, size = (N,L)) + random((N,L))]
    beta_shape = (K,L,1 + X1.shape[1] + X2.shape[1])
    sigmaY_shape = (K,L)
    #randint(low = -1, high = 1, size = beta_shape) + 
    betas = [random(beta_shape), randint(low = 10, high = 50, size = sigmaY_shape) + random(sigmaY_shape)]  
    
    r1 = dirichlet(alphas[0], no_obs)
    r1[r1<1e-4] = 1e-4
    #r1[r1>0.99] = 0.9
    r2 = dirichlet(alphas[1], no_obs)
    r2[r2<1e-6] = 1e-6
    #r2[r2>0.9] = 0.9    
    r = [r1,r2]
    ones = np.ones((len(train_I),))
    mu = sp.csr_matrix((ones, (train_I,train_J)), shape=(M,N)).sum(1)
    mv = sp.csr_matrix((ones, (train_I,train_J)), shape=(M,N)).sum(0)
    mu[mu<1] = 1
    mv[mv<1] = 1
    
    for k in range(K):
        gammas[0][:,k] = alphas[0][k] + np.array(np.divide(sp.csr_matrix((r1[:,k],(train_I,train_J)),shape=(M,N)).sum(1),mu).flatten())[0] # M x K
    for l in range(L):    
        gammas[1][:,l] = alphas[1][l] + np.array(np.divide(sp.csr_matrix((r2[:,l],(train_I,train_J)),shape=(M,N)).sum(0),mv).transpose().flatten())[0] # N x L
                  
    return alphas, gammas, betas, r
Esempio n. 27
0
def second_try(size, nbcolors, board, solution):
	indices1 = board[0][1] 
	essai1 = board[0][0]
	essai2 = [-1] * size
	noirs = indices1.count(2)
	blancs = indices1.count(1)

	for i in xrange(noirs):
		essai2[i] = essai1[i]

	for i in xrange(noirs, noirs + blancs):
		if i + 1 > (size - 1):
			next_indice = noirs
		else:
			next_indice = i + 1
		essai2[next_indice] = essai1[i]

	for a in essai2:
		if a == -1:
			if size < nbcolors:
				essai2[essai2.index(a)] = int((nbcolors - size) * random.random() + size + 1)
			else:
				essai2[essai2.index(a)] = int((nbcolors - 1) * random.random() + 1)

	indices2 = generate_indices(size, nbcolors, essai2, solution)

	return [essai2, indices2];
Esempio n. 28
0
    def next_generation(self):
        # limit selection to best fraction of pop
        survivor_count = int(self.survival_rate * self.pop_size)
        self.population = self.population[:survivor_count]
        # get elite and remove from old population
        new_pop = [self.population.pop(0)]
        ind_generator = self.selector(self.population)
        # ind_generator = tournament_selector(self.population)
        while len(new_pop) < self.pop_size:
            pa = ind_generator.next()
            ma = ind_generator.next()
            if nprand.random() < self.xover_rate:
                # get it on!
                new_ind = crossover(pa, ma)
            else:
                # insert ma or pa, unmodified
                if nprand.rand() < .5:
                    new_ind = pa
                else:
                    new_ind = ma
            if nprand.random() < self.mut_rate:
                # mutate new individual
                new_ind = mutate(new_ind)

            new_pop.append(new_ind)
            #steady_state_reinserter(new_ind, self.population, new_pop)

        # update population to new generation
        self.population = new_pop
Esempio n. 29
0
 def __init__(self,X,k=20):  
     ''''' 
         k  is the length of vector 
     '''  
     self.X=np.array(X)  
     self.k=k  
     self.ave=np.mean(self.X[:,2])  
     print "the input data size is ",self.X.shape  
     self.bi={}  
     self.bu={}  
     self.qi={}  
     self.pu={}  
     self.movie_user={}  
     self.user_movie={}  
     for i in range(self.X.shape[0]):  
         uid=self.X[i][0]  
         mid=self.X[i][1]  
         rat=self.X[i][2]  
         self.movie_user.setdefault(mid,{})  
         self.user_movie.setdefault(uid,{})  
         self.movie_user[mid][uid]=rat  
         self.user_movie[uid][mid]=rat  
         self.bi.setdefault(mid,0)  
         self.bu.setdefault(uid,0)  
         self.qi.setdefault(mid,random((self.k,1))/10*(np.sqrt(self.k)))  
         self.pu.setdefault(uid,random((self.k,1))/10*(np.sqrt(self.k)))  
Esempio n. 30
0
	def _mapCols(e, params):
		from numpy import random
		from disco.core import Params
		m, n = params.m, params.n
		output = []
		if n > 0:
			elems = e.split(",")
			l = range(0, m)
			random.shuffle(l)
			for elem in elems:
				retVal = []
				j = int(elem)
				nnz = m * (1.0 - params.sparsity)
				stepSize = int(m / nnz)
				k = int(random.random() * (m % nnz))
				while k<m:
					i = l[k]
					k += stepSize
					val = params.lb + (params.ub-params.lb) * random.random()
					retVal.append("%d,%d,%.14f" % (i,j,val))
					# break output into tuples so reduce can distribute the load
					if len(retVal) > 1000:
						output += [(";".join(retVal), "")]
						retVal = []
				if len(retVal) > 0:
					output += [(";".join(retVal), "")]
		return output
Esempio n. 31
0
#print(audData)
#for p in audData: print p[0]
signalOne = []
signalTwo = []
signalThree = []
signalFour = []
for x in range(5000, 8000):
    #print audData[x][0]
    signalOne.append(audDataOne[x][0])
    signalTwo.append(audDataTwo[x][0])
    signalThree.append(audDataThree[x][0])
    signalFour.append(audDataFour[x][0])

#training_set_inputs = array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])
training_set_inputs = array([signalOne, signalTwo, signalFour])

training_set_outputs = array([[1, 0, 1]]).T

random.seed(1)

synaptic_weights = 2 * random.random((3000, 1)) - 1

for iteration in xrange(10000):

    output = 1 / (1 + exp(-(dot(training_set_inputs, synaptic_weights))))

    synaptic_weights += dot(training_set_inputs.T,
                            (training_set_outputs - output) * output *
                            (1 - output))

print 1 / (1 + exp(-(dot(array(signalThree), synaptic_weights))))
Esempio n. 32
0
def test_dict(tmpdir):
    for _ in range(100):
        helper({str(idx): round(random(), 5) for idx in range(100)},
               'dict', tmpdir)
Esempio n. 33
0
def test_float(tmpdir):
    """floats should be handled correctly."""
    for _ in range(100):
        helper(round(random(), 5), 'float', tmpdir)
Esempio n. 34
0
Copyright (C)  lockheedphoenix

"""
from __future__ import division
import graph_tool.all as gt
import numpy.random as np
import scipy as sp

File = 'RG.xml.gz'
G = gt.load_graph(File)
k= 0
G = gt.load_graph(File)
for k in range(-20,1):
    p = sp.power(10, k/5)
    for e in G.edges():
        if np.random() < p:
            v = e.source()
            G.remove_edge(e)
            j = np.randint(0, G.num_vertices())
            while j == G.vertex_index[v] or gt.shortest_distance(G,v,G.vertex(j)) == 1:  # no parallel edges
                j = np.randint(0, G.num_vertices())

            G.add_edge(v, G.vertex(j))
    G.save(str(p)+'SW.xml.gz')





Esempio n. 35
0
trainingLabels = trainingLabels[:320]

# 设置网络参数
layer = [2, 3, 1]  # 设置层数和节点数
Lambda = 0.005  # 正则化系数
alpha = 0.2  # 学习速率
num_passes = 10000  # 迭代次数
m = len(trainingSet)  # 样本数量

# 建立网络
# 网络采用列表存储每层的网络结构,网络的层数和各层节点数都可以自由设定
b = []  # 偏置元,共layer-1个元素,b[0]代表第一个隐藏层的偏置元(向量形式)
W = []
for i in range(len(layer) - 1):
    W.append(random.random(
        size=(layer[i + 1],
              layer[i])))  # W[i]表示网络第i层到第i+1层的转移矩阵(NumPy数组),输入层是第0层
    b.append(np.array([0.1] * layer[i + 1]))  # 偏置元,b[i]的规模是1*第i+1个隐藏层节点数
a = [np.array(0)] * (len(W) + 1
                     )  # a[0] = x,即输入,a[1]=f(z[0]),a[len(W)+1] = 最终输出
z = [np.array(0)] * len(W)  # 注意z[0]表示是网络输入层的输出,即未被激活的第一个隐藏层

W = np.array(W)


def costfunction(predict, labels):
    # 不加入正则化项的代价函数
    # 输入参数格式为numpy的向量
    return sum((predict - labels)**2)

Esempio n. 36
0
def ApplyDistort(in_img):
    prob = npr.random()
    if prob > 0.5:
        # Do random brightness distortion.
        out_img = RandomBrightness(in_img, cfg.TRAIN.brightness_prob,
                                   cfg.TRAIN.brightness_delta)
        # cv2.imshow('0 RandomBrightness',out_img.astype(np.uint8))

        # Do random contrast distortion.
        out_img = RandomContrast(out_img, cfg.TRAIN.contrast_prob,
                                 cfg.TRAIN.contrast_lower,
                                 cfg.TRAIN.contrast_upper)
        # cv2.imshow('1 RandomContrast',out_img.astype(np.uint8))

        # Do random saturation distortion.
        out_img = RandomSaturation(out_img, cfg.TRAIN.saturation_prob,
                                   cfg.TRAIN.saturation_lower,
                                   cfg.TRAIN.saturation_upper)
        # cv2.imshow('2 RandomSaturation',out_img.astype(np.uint8))

        # Do random exposure distortion.
        out_img = RandomExposure(out_img, cfg.TRAIN.exposure_prob,
                                 cfg.TRAIN.exposure_lower,
                                 cfg.TRAIN.exposure_upper)
        # cv2.imshow('3 RandomExposure',out_img.astype(np.uint8))

        # Do random hue distortion.
        out_img = RandomHue(out_img, cfg.TRAIN.hue_prob, cfg.TRAIN.hue_delta)
        # cv2.imshow('4 RandomHue',out_img.astype(np.uint8))

        # Do random reordering of the channels.
        out_img = RandomOrderChannels(out_img, cfg.TRAIN.random_order_prob)
        # cv2.imshow('5 RandomOrderChannels',out_img.astype(np.uint8))
    else:
        # Do random brightness distortion.
        out_img = RandomBrightness(in_img, cfg.TRAIN.brightness_prob,
                                   cfg.TRAIN.brightness_delta)
        # cv2.imshow('0 RandomBrightness',out_img.astype(np.uint8))

        # Do random saturation distortion.
        out_img = RandomSaturation(out_img, cfg.TRAIN.saturation_prob,
                                   cfg.TRAIN.saturation_lower,
                                   cfg.TRAIN.saturation_upper)
        # cv2.imshow('1 RandomSaturation',out_img.astype(np.uint8))

        # Do random exposure distortion.
        out_img = RandomExposure(out_img, cfg.TRAIN.exposure_prob,
                                 cfg.TRAIN.exposure_lower,
                                 cfg.TRAIN.exposure_upper)
        # cv2.imshow('2 RandomExposure',out_img.astype(np.uint8))

        # Do random hue distortion.
        out_img = RandomHue(out_img, cfg.TRAIN.hue_prob, cfg.TRAIN.hue_delta)
        # cv2.imshow('3 RandomHue',out_img.astype(np.uint8))

        # Do random contrast distortion.
        out_img = RandomContrast(out_img, cfg.TRAIN.contrast_prob,
                                 cfg.TRAIN.contrast_lower,
                                 cfg.TRAIN.contrast_upper)
        # cv2.imshow('4 RandomContrast',out_img.astype(np.uint8))

        # Do random reordering of the channels.
        out_img = RandomOrderChannels(out_img, cfg.TRAIN.random_order_prob)
        # cv2.imshow('5 RandomOrderChannels',out_img.astype(np.uint8))

    return out_img
Esempio n. 37
0
def Fluctuation(f, t):
    return np.sin(2*np.pi*(f*t+rd.random()))/np.sqrt(f)
 def genetic_operators(population, sample_size, prob_of_mutation=.3):
     return sorted(
         imap(apply, imap(operators.__getitem__, random(sample_size) < prob_of_mutation), repeat((population,))),
         key=fitness_function,
         reverse=True
     )
Esempio n. 39
0
# subplots.py
# -------------------------------------------------------------------------
# Create four plots in the same figure.
# -------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import random

t = np.linspace(0, 1, 101)

plt.figure()
plt.subplot(2, 2, 1)
plt.hist(random(20))
plt.subplot(2, 2, 2)
plt.plot(t, t**2, t, t**3 - t)
plt.subplot(2, 2, 3)
plt.plot(random(20), random(20), 'r*')
plt.subplot(2, 2, 4)
plt.plot(t * np.cos(10 * t), t * np.sin(10 * t))
plt.show()
Esempio n. 40
0
 def anderson(site):
     if random() * 100 <= c:
         return U * (random() - 0.5)
     return 0.0
Esempio n. 41
0
    def __init__(self,
                 f,
                 x0,
                 ranges=None,
                 accelerator=StandardPSO,
                 emax=1e-5,
                 imax=1000):
        '''
        Initializes the optimizer.

        :Parameters:
          f
            A multivariable function to be evaluated. It must receive only one
            parameter, a multidimensional line-vector with the same dimensions
            of the range list (see below) and return a real value, a scalar.

          x0
            A population of first estimates. This is a list, array or tuple of
            one-dimension arrays, each one corresponding to an estimate of the
            position of the minimum. The population size of the algorithm will
            be the same as the number of estimates in this list. Each component
            of the vectors in this list are one of the variables in the function
            to be optimized.

          ranges
            A range of values might be passed to the algorithm, but it is not
            necessary. If this parameter is not supplied, then the ranges will
            be computed from the estimates, but be aware that this might not
            represent the complete search space. If supplied, this parameter
            should be a list of ranges for each variable of the objective
            function. It is specified as a list of tuples of two values,
            ``(x0, x1)``, where ``x0`` is the start of the interval, and ``x1``
            its end. Obviously, ``x0`` should be smaller than ``x1``. It can
            also be given as a list with a simple tuple in the same format. In
            that case, the same range will be applied for every variable in the
            optimization.

          accelerator
            An acceleration method, please consult the documentation on ``acc``
            module. Defaults to StandardPSO, that is, velocities change based on
            local and global bests.

          emax
            Maximum allowed error. The algorithm stops as soon as the error is
            below this level. The error is absolute.

          imax
            Maximum number of iterations, the algorithm stops as soon this
            number of iterations are executed, no matter what the error is at
            the moment.
        '''
        list.__init__(self, [])
        self.__fx = []
        for x in x0:
            x = array(x).ravel()
            self.append(x)
            self.__fx.append(f(x))
        self.__f = f

        # Determine ranges of the variables
        if ranges is None:
            ranges = zip(amin(self, axis=0), amax(self, axis=1))
        else:
            ranges = list(ranges)
            if len(ranges) == 1:
                ranges = array(ranges * len(x0[0]))
            else:
                ranges = array(ranges)
        self.ranges = ranges
        '''Holds the ranges for every variable. Although it is a writable
        property, care should be taken in changing parameters before ending the
        convergence.'''

        # Randomly computes the initial velocities
        s = len(self)
        d = len(x0[0])
        r = self.ranges
        self.__v = (random((s, d)) - 0.5) * (r[:, 1] - r[:, 0]) / 10.

        # Verifies the validity of the acceleration method
        try:
            issubclass(accelerator, Accelerator)
            accelerator = accelerator(self)
        except TypeError:
            pass
        if not isinstance(accelerator, Accelerator):
            raise TypeError, 'not a valid acceleration method'
        else:
            self.__acc = accelerator

        self.__emax = emax
        self.__imax = imax
Esempio n. 42
0
    def anneal(self, state, Tmax, Tmin, steps, updates=0):
        """Minimizes the energy of a system by simulated annealing.
        
        Keyword arguments:
        state -- an initial arrangement of the system
        Tmax -- maximum temperature (in units of energy)
        Tmin -- minimum temperature (must be greater than zero)
        steps -- the number of steps requested
        updates -- the number of updates to print during annealing
        
        Returns the best state and energy found."""

        step = 0
        start = time.time()

        def update(T, E, acceptance, improvement):
            """Prints the current temperature, energy, acceptance rate,
            improvement rate, elapsed time, and remaining time.
            
            The acceptance rate indicates the percentage of moves since the last
            update that were accepted by the Metropolis algorithm.  It includes
            moves that decreased the energy, moves that left the energy
            unchanged, and moves that increased the energy yet were reached by
            thermal excitation.
            
            The improvement rate indicates the percentage of moves since the
            last update that strictly decreased the energy.  At high
            temperatures it will include both moves that improved the overall
            state and moves that simply undid previously accepted moves that
            increased the energy by thermal excititation.  At low temperatures
            it will tend toward zero as the moves that can decrease the energy
            are exhausted and moves that would increase the energy are no longer
            thermally accessible."""

            elapsed = time.time() - start
            if step == 0:
                print ' Temperature        Energy    Accept   Improve     Elapsed   Remaining'
                print '%12.2f  %12.2f                      %s            ' % \
                    (T, E, time_string(elapsed) )
            else:
                remain = (steps - step) * (elapsed / step)
                print '%12.2f  %12.2f  %7.2f%%  %7.2f%%  %s  %s' % \
                    (T, E, 100.0*acceptance, 100.0*improvement,
                        time_string(elapsed), time_string(remain))

        # Precompute factor for exponential cooling from Tmax to Tmin
        if Tmin <= 0.0:
            print 'Exponential cooling requires a minimum temperature greater than zero.'
            sys.exit()
        Tfactor = -math.log(float(Tmax) / Tmin)

        # Note initial state
        T = Tmax
        E = self.energy(state)
        #prevState = copy.deepcopy(state)
        prevState = state[:]
        prevEnergy = E
        #bestState = copy.deepcopy(state)
        bestState = state[:]
        bestEnergy = E
        trials, accepts, improves = 0, 0, 0
        if updates > 0:
            updateWavelength = float(steps) / updates
            update(T, E, None, None)

        # Attempt moves to new states
        while step < steps:
            step += 1
            T = Tmax * math.exp(Tfactor * step / steps)
            self.move(state)
            E = self.energy(state)
            dE = E - prevEnergy
            trials += 1
            if dE > 0.0 and math.exp(-dE / T) < random.random():
                # Restore previous state
                #state = copy.deepcopy(prevState)
                state = prevState[:]
                E = prevEnergy
            else:
                # Accept new state and compare to best state
                accepts += 1
                if dE < 0.0:
                    improves += 1
                #prevState = copy.deepcopy(state)
                prevState = state[:]
                prevEnergy = E
                if E < bestEnergy:
                    #bestState = copy.deepcopy(state)
                    bestState = state[:]
                    bestEnergy = E
            if updates > 1:
                if step // updateWavelength > (step - 1) // updateWavelength:
                    update(T, E,
                           float(accepts) / trials,
                           float(improves) / trials)
                    trials, accepts, improves = 0, 0, 0

        # Return best state and energy
        return bestState, bestEnergy
Esempio n. 43
0
    epsilon = INITIAL_EPSILON
    t = 0
else:  ####### if you continue to run:
    with open('mem.pickle', 'rb') as f:
        (t, epsilon, mem) = pickle.load(f)
    from keras.models import load_model
    model = load_model('model.h5')
##################################

g = jump_API(MIN_MS, MAX_MS, ACTIONS, MASK)  #initialize an API to the game
s_t = g.first_step()

while True:  # start to loop
    print('*********************************')
    print('t=%i,epsilon=%f' % (t, epsilon), end='  ')
    if random.random() <= epsilon:
        print('RANDOM MOVE!')
        a_t = random.choice(ACTIONS)
    else:
        print('Move by model.')
        qs = model.predict(s_t)
        a_t = np.argmax(qs)
    # forward one step
    print('Moving...', end=' ')
    s_t1, r_t, die = g.next_step(a_t)
    print('Done.')

    # save it to memory
    print('=========')
    print('NEW Memory: \na_t=%i,r_t=%i,die=%i' % (a_t, r_t, die))
    if die:
Esempio n. 44
0
import pylab
from numpy import zeros, random, arange
import os  #Imports the modules and functions required

m = zeros(10,
          float)  #Sets up an array of zeros, but as floats instead of integers
for i in range(
        1000
):  #Iterates the code 1000 times - chooses 1000 random floatss from 0 - 1
    x = random.random()  #Randomly generates a float between 0 and 1
    if 0.0 <= x and x < 0.1: m[0] = m[0] + 1
    if 0.1 <= x and x < 0.2: m[1] = m[1] + 1
    if 0.2 <= x and x < 0.3: m[2] = m[2] + 1
    if 0.3 <= x and x < 0.4: m[3] = m[3] + 1
    if 0.4 <= x and x < 0.5: m[4] = m[4] + 1
    if 0.5 <= x and x < 0.6: m[5] = m[5] + 1
    if 0.6 <= x and x < 0.7: m[6] = m[6] + 1
    if 0.7 <= x and x < 0.8: m[7] = m[7] + 1
    if 0.8 <= x and x < 0.9: m[8] = m[8] + 1
    if 0.9 <= x and x < 1.0:
        m[9] = m[
            9] + 1  #IF statements check which limits the float fits in and adds 1 to the value in the appropriate element in the array

print m  #Prints the array full of values of the count of each bin

pylab.bar(arange(0, 1, 0.1), m, width=0.1)  #Plots the bars

pylab.xlabel('Bin Number')
pylab.ylabel('Count')  #Axis labels

pylab.title('Count of each Bin Number')  #Title of graph
Esempio n. 45
0
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 19 19:55:30 2018

@author: alfred_mac
"""

import numpy as np
import matplotlib.pyplot as plt
import numpy.linalg as la
import numpy.random as rand
import pFit
import pEval

d = 100
x = 2 * rand.random(d) - 1

# Test 1 - Function is 1/(1+x)
f = 1 / (1 + x)
p = pFit.pFit(x, f)
fp = pEval.pEval(x, p)

# Test 2 - Function is e^x
f = np.exp(x)
p = pFit.pFit(x, f)
fp = pEval.pEval(x, p)

# Test 3 - Function is tan(x^3)
f = np.tan(x**3)
p = pFit.pFit(x, f)
fp = pEval.pEval(x, p)
Esempio n. 46
0
            i = i + 1
        return x, e


class PSO(ParticleSwarmOptimizer):
    '''
    PSO is an alias to ``ParticleSwarmOptimizer``
    '''
    pass


################################################################################
# Test
if __name__ == "__main__":

    def f(xy):
        x, y = xy
        return (1 - x)**2 + (y - x * x)**2

    i = 0
    x0 = random((5, 2)) * 2
    #p = ParticleSwarmOptimizer(f, x0, [ (0., 2.), (0., 2.) ])
    p = ParticleSwarmOptimizer(f, x0)
    while p.fbest > 5e-7:
        print p
        print p.best
        print p.fbest
        p.step()
        i = i + 1
        print '-' * 50
    print i, p.best, p.fbest
Esempio n. 47
0
import numpy as np
import numpy.random as nr
import matplotlib.pyplot as plt

n = 500000

ls = nr.random(n)
lt = nr.random(n)

lr = []
for x in ls:
    lr.append(np.sqrt(-2 * np.log(x)))

lp = []
for i in lt:
    lp.append(2 * np.pi * i)

#print(lr)
#print(lp)

lx1 = []
lx2 = []
for i in range(n):
    lx1.append(-5.6 + 2.5 * lr[i] * np.cos(lp[i]))
    lx2.append(-5.6 + 2.5 * lr[i] * np.sin(lp[i]))

lx = lx1 + lx2

#print(lx)

hist = []
Esempio n. 48
0
def mytext(x, y, textstr):
    text(x,
         y,
         text=textstr,
         angle=0,
         text_color="#449944",
         text_align="center",
         text_font_size="10pt")


N = 10

hold()

myscatter(random(N) + 2, random(N) + 1, "circle")
myscatter(random(N) + 4, random(N) + 1, "square")
myscatter(random(N) + 6, random(N) + 1, "triangle")
myscatter(random(N) + 8, random(N) + 1, "asterisk")

myscatter(random(N) + 2, random(N) + 4, "circle_x")
myscatter(random(N) + 4, random(N) + 4, "square_x")
myscatter(random(N) + 6, random(N) + 4, "inverted_triangle")
myscatter(random(N) + 8, random(N) + 4, "x")

myscatter(random(N) + 2, random(N) + 7, "circle_cross")
myscatter(random(N) + 4, random(N) + 7, "square_cross")
myscatter(random(N) + 6, random(N) + 7, "diamond")
myscatter(random(N) + 8, random(N) + 7, "cross")

mytext([2.5], [0.5], "circle / o")
Esempio n. 49
0
    def blow(self, n, xy):
        a = random(size=n) * TWOPI
        dxy = column_stack((cos(a), sin(a)))

        new_nodes = self._add_nodes(xy)
        self._add_fracs(dxy, new_nodes)
Esempio n. 50
0
    colors_dic[counter] = i
    counter += 1

print('********Pixel Canvas********')
print('Colors :')
for i in sorted(colors_dic):
    print(str(i) + ' --> ' + str(colors_dic[i]))

color = int(input('Pick a color : '))
x_dimension = input('Enter x dimension (Default = 10):')
y_dimension = input('Enter y dimension (Default = 10):')
save_file = input('Do you want to save the image? [Y/n] :')


if(x_dimension != '' and y_dimension !=  ''):
    Z = random.random((int(x_dimension),int(y_dimension)))
else:
    Z = random.random((10,10))
fig = plt.figure(frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if(save_file == 'Y' or save_file == 'y' or save_file == ''):
    file_format = input('Enter file format : ')
    path = input('Enter Path to save file : ')
    dpi = int(input('Enter dpi : '))
    imshow(Z, cmap=get_cmap(colors_dic[color]), interpolation='nearest')
    fig.savefig('{}.{}'.format(path,file_format), format='{}'.format(file_format), dpi=dpi)
    show()
else:
    imshow(Z, cmap=get_cmap(colors_dic[color]), interpolation='nearest')
Esempio n. 51
0
 def eval(self, values, x):
     values[0] = 1.0 * random() + 0.25
     values[1] = 1.0 * random() + 0.25
Esempio n. 52
0
    def frac_front(self, factor, angle, dbg=False):
        inds = (random(self.anum) < factor).nonzero()[0]

        n = len(inds)
        if n < 1:
            return 0

        cand_aa = self.active[inds, 0]
        cand_ii = self.fid_node[cand_aa, 1]

        num = self.num
        fnum = self.fnum
        anum = self.anum

        xy = self.xy[:num, :]
        visited = self.visited[:num, 0]
        new = arange(fnum, fnum + n)
        orig_dxy = self.dxy[cand_aa, :]

        diff_theta = (-1)**randint(2, size=n) * HPI + (0.5 - random(n)) * angle
        theta = arctan2(orig_dxy[:, 1], orig_dxy[:, 0]) + diff_theta

        fid_node = column_stack((new, cand_ii))
        cand_dxy = column_stack((cos(theta), sin(theta)))

        nactive = arange(n)

        tmp_dxy = self.tmp_dxy[:n, :]

        self.update_zone_map()

        self.cuda_calc_stp(
            npint(self.nz),
            npint(self.zone_leap),
            npint(num),
            npint(n),
            npint(n),
            npfloat(self.frac_dot),
            npfloat(self.frac_dst),
            npfloat(self.frac_stp),
            npint(self.ignore_fracture_sources),
            drv.In(visited),
            drv.In(fid_node),
            drv.In(nactive),
            drv.Out(self.tmp[:n, :]),
            drv.In(xy),
            drv.In(cand_dxy),
            drv.Out(tmp_dxy),
            drv.In(self.zone_num),
            drv.In(self.zone_node),
            block=(self.threads, 1, 1),
            grid=(int(n // self.threads + 1), 1
                  )  # this cant be a numpy int for some reason
        )

        mask = tmp_dxy[:, 0] >= -1.0
        n = mask.sum()

        if n < 1:
            return 0

        nodes = cand_ii[mask]
        self._add_fracs(cand_dxy[mask, :], nodes)

        if dbg:
            self.print_debug(num, fnum, anum, meta='new: {:d}'.format(n))
        return n
Esempio n. 53
0
    winneNo = np.argsort(drawRmses)[0]
    winner = draw[winneNo]

    # append winnerIdx into mate pool, and delete it from single pool
    try:
        winnerIdx = singlepool.index(winner)
        singlepool = list(np.delete(singlepool, winnerIdx))
    except ValueError:
        raise ValueError('The winner {} is not in pre-defined list: {}'.fromat(
            winner, singlepool))
    return [winner] + tournament(rmses, singlepool, tagetSize - 1, tourSize)[:]


if __name__ == '__main__':
    random.seed(0)
    rmses = [round(random.random(), 3) for i in range(10)]
    singleNum = len(rmses)

    print 'rms ranking ...'
    for i in range(11):
        print rms_ranking(rmses, range(singleNum), i)

    print '\nlinear ranking ...'
    for i in range(11):
        random.seed(0)
        print linear_ranking(rmses, range(singleNum), i, 1.5)

    print '\ntournament ranking ...'
    for i in range(11):
        random.seed(0)
        print tournament(rmses, range(singleNum), i, 2)
Esempio n. 54
0
import numpy.random as random
import numpy as np
from ase import Atoms
from ase.neighborlist import NeighborList
from ase.build import bulk

atoms = Atoms(numbers=range(10),
              cell=[(0.2, 1.2, 1.4), (1.4, 0.1, 1.6), (1.3, 2.0, -0.1)])
atoms.set_scaled_positions(3 * random.random((10, 3)) - 1)


def count(nl, atoms):
    c = np.zeros(len(atoms), int)
    R = atoms.get_positions()
    cell = atoms.get_cell()
    d = 0.0
    for a in range(len(atoms)):
        i, offsets = nl.get_neighbors(a)
        for j in i:
            c[j] += 1
        c[a] += len(i)
        d += (((R[i] + np.dot(offsets, cell) - R[a])**2).sum(1)**0.5).sum()
    return d, c


for sorted in [False, True]:
    for p1 in range(2):
        for p2 in range(2):
            for p3 in range(2):
                # print(p1, p2, p3)
                atoms.set_pbc((p1, p2, p3))
Esempio n. 55
0
import numpy as np
import numpy.random as rand

a, b, c = 1, 30, 2

us = rand.random(100 * 1000)
vs = rand.random(100 * 1000)

ths = 2 * np.pi * us
phis = np.arccos(2 * vs - 1)

def r(th, phi):
    return np.sqrt(1 / (np.cos(th)**2*np.sin(phi)**2 / a**2 +
                        np.sin(th)**2*np.sin(phi)**2 / b**2 + 
                        np.cos(phi)**2 / c**2))
    

print 4 * np.pi / 3 * a * b * c
rs = r(ths, phis)
rs = rs**3
print 4 * np.pi / 3 * np.mean(rs)
Esempio n. 56
0
 def __init__(self, number_of_neurons, number_of_inputs_per_neuron):
     self.synaptic_weights = 2 * random.random(
         (number_of_inputs_per_neuron, number_of_neurons)) - 1
Esempio n. 57
0
encode_input = Reshape((int(adjusted_hd / embed_dim), embed_dim))(net)

# Transformer Block (Output 1D segment prediction tensor)
model = get_model_ne(token_num=unique_segments,
                     embed_dim=embed_dim,
                     encode_input=encode_input,
                     encode_start=acous_input,
                     encoder_num=2,
                     decoder_num=2,
                     head_num=4,
                     hidden_dim=100,
                     attention_activation='relu',
                     feed_forward_activation='relu',
                     dropout_rate=0.05,
                     embed_trainable=True,
                     embed_weights=random((unique_segments, embed_dim)))

reflatten = [item for sublist in collapsed for item in sublist]
class_weights = [1, 1, 1] + class_weight.compute_class_weight(
    'balanced', classes=unique(reflatten), y=reflatten).tolist()


# Custom function to compute sparse categorical cross-entropy with class imbalance
def weighted_loss(weights):
    def loss(y_true, y_pred):
        class_weights = tf.constant(weights)
        loss_weights = tf.gather(class_weights, tf.cast(y_true, 'int32'))
        unweighted_losses = tf.keras.losses.sparse_categorical_crossentropy(
            y_true, y_pred)
        weighted_losses = unweighted_losses * loss_weights[:, :, 0]
        return weighted_losses
#print("Shruti Kaushik Class Exercise Lecture 5")

#Part 1 Create your data:

#2 - Work only with these imports:
import numpy as np
from numpy import matrix, array, min, max, random
from matplotlib import pylab as plb, pyplot as plt
#from math import pi

#3 - create a list A with 600 random numbers bound between (0:10)
#A = random.randint(0,10,600) # my solution will only generate integer numbers
#but we will need to make it float later for avg.

A = list(random.random(600) * 10)  #AIlieve solution
#print(A)

#4 - create an array B with 500 elements bound in the range(-3*pi : 2*pi)
#B = np.random.uniform(-3*plb.pi,2*plb.pi,500) # my solution
B = plb.linspace(-plb.pi * 3, plb.pi * 2, 500)  #AIlieve solution

#print(B)


#5 - using if/for/while loop, create a function that ovwerwites every element in A that falls
#outside of the interval[2:9] and overwrite that element with the average between
#the smallest and largest element in A
def overwrite_A(A):
    #A = A.astype(float) # convert A to float as the average is a float number
    #print(A)
Esempio n. 59
0

if __name__ == "__main__":
    from numpy.random import random

    Ns = list(range(1, 22))
    adolc_times = []
    adolc_taping_times = []
    adolc_num_operations = []
    adolc_num_locations = []

    algopy_times = []

    for N in Ns:
        print('N=', N)
        A = random((N, N))
        #A = array([
        #[0.018 ,0.0085 ,0.017 ,0.017],
        #[0.02 ,0.0042 ,0.0072 ,0.016],
        #[0.006 ,0.012 ,0.01 ,0.014],
        #[0.0078 ,0.011 ,0.02 ,0.02]], dtype= float64)

        # with ADOL-C
        # -----------
        N = shape(A)[0]

        t_start = time()
        aA = array([[adouble(A[n, m]) for m in range(N)] for n in range(N)])

        trace_on(0)
        for n in range(N):
def main():

    ensure_dir(PATH)

    rds = redis.Redis(host=HOST, port=PORT)

    MC = MultiCanvas(redis.Redis(host=HOST,port=PORT),\
                     CANVAS_SIZE,GRID_SIZE,PATH,BACK)

    X = zeros(NUM, 'float')
    Y = zeros(NUM, 'float')
    SX = zeros(NUM, 'float')
    SY = zeros(NUM, 'float')
    R = zeros((NUM, NUM), 'float')
    A = zeros((NUM, NUM), 'float')
    F = zeros((NUM, NUM), 'byte')
    C = get_colors(COLOR_PATH)

    for i in xrange(NUM):
        the = random() * TWOPI
        x = RAD * sin(the)
        y = RAD * cos(the)
        X[i] = 0.5 + x
        Y[i] = 0.5 + y

    try:

        for itt in xrange(STEPS):

            try:

                set_distances(X, Y, A, R)

                SX[:] = 0.
                SY[:] = 0.

                for i in xrange(NUM):
                    xF = logical_not(F[i, :])
                    d = R[i, :]
                    a = A[i, :]
                    near = d > NEARL
                    near[xF] = False
                    far = d < FARL
                    far[near] = False
                    near[i] = False
                    far[i] = False
                    speed = FARL - d[far]

                    SX[near] += cos(a[near])
                    SY[near] += sin(a[near])
                    SX[far] -= speed * cos(a[far])
                    SY[far] -= speed * sin(a[far])

                X += SX * STP
                Y += SY * STP

                if random() < FRIENDSHIP_INITIATE_PROB:

                    k = randint(NUM)
                    make_friends(k, F, R)

                connections(MC, itt, C, X, Y, F, A, R)

                if not itt % 100:
                    print 'itteration:', itt

                if not itt % DRAW_ITT:
                    if itt > 0:
                        MC.write_all(itt)

            except KeyboardInterrupt:
                MC.stop_now(itt)
                break

    except Exception, e:
        raise