Example #1
0
def elastic_transform(image, kernel=(11,11), sigma=10, alpha=8):
    from numpy.random import ranf
    import math
    displacement_field_x = np.array([[float(2*ranf(1)-1) 
               for x in range(image.shape[0])] for y in range(image.shape[1])]) * alpha
    displacement_field_y = np.array([[float(2*ranf(1)-1) 
               for x in range(image.shape[0])] for y in range(image.shape[1])]) * alpha
    displacement_field_x = cv2.GaussianBlur(displacement_field_x, kernel, sigma)
    displacement_field_y = cv2.GaussianBlur(displacement_field_y, kernel, sigma)
    result = np.zeros(image.shape)

    for row in range(image.shape[1]):
        for col in range(image.shape[0]):
            low_ii = row + math.floor(displacement_field_x[row, col])            
            high_ii = row + math.ceil(displacement_field_x[row, col])
            math.floor(displacement_field_x[row, col])
            
            low_jj = col + math.floor(displacement_field_y[row, col])
            high_jj = col + math.ceil(displacement_field_y[row, col])

            if low_ii < 0 or low_jj < 0 or high_ii >= image.shape[1] -1 \
               or high_jj >= image.shape[0] - 1:
                continue
            
            x =displacement_field_x[row, col] - math.floor(displacement_field_x[row, col])
            y =displacement_field_y[row, col] - math.floor(displacement_field_y[row, col])
            
            B =np.array([[image[low_ii, low_jj], image[low_ii, high_jj]],
                         [image[high_ii, low_jj], image[high_ii, high_jj]]])
            A =np.array([1-x, x],dtype =float)
            C =np.array([[1-y],[y]],dtype =float)
            
            result[row, col] = np.dot(A, np.dot(B,C))
            
    return result
Example #2
0
def ranvec():
    px = ranf() 
    py = ranf()
    pz = ranf()
    mod = (px**2 + py**2 + pz**2)**(0.5)
    px /= mod
    py /= mod
    pz /= mod
    return px, py, pz
def random_dtw(dimension, lenght_1, lenght_2, distance_for_cdist, nb_try):
    distance_obtained = []
    for i in range(nb_try):
        x = rd.ranf((lenght_1, dimension))
        y = rd.ranf((lenght_2, dimension))
        x = x / x.sum(axis=1)[:, None]
        y = y / y.sum(axis=1)[:, None]

        d = compute_dtw(x, y, dist_for_cdist=distance_for_cdist)
        distance_obtained.append(d)
    return np.mean(np.asarray(distance_obtained))
Example #4
0
def mnarHH(param, pedlist, target, tp2A, tp2B):
    for ped in pedlist:                                   #for each ped
        for i in range(0,2+ped.getSibNum()):              # for each person
            for j in range(0, param["numMarkers"]):       #   for each marker
                if sum(ped.getGeno(i,j)) == 1.0:          #      if het is in the ped
                    if ranf() < tp2A:                     #         use the target specific parameter
                        ped.makeMissing(i,j)              #         one gender will have more missing data.
                else:
                    if ranf() < tp2B:
                        ped.makeMissing(i,j)
    return(pedlist)
Example #5
0
def marPop(param, pedlist, target, tp2A, tp2B):
    for ped in pedlist:                                   #for each ped
        for i in range(0,2+ped.getSibNum()):              # for each person
            for j in range(0, param["numMarkers"]):       #   for each marker
                if ped.getPop() == target:            #      if they are in the target:
                    if ranf() < tp2A:                     #         use the target specific parameter
                        ped.makeMissing(i,j)              #         one gender will have more missing data.
                else:
                    if ranf() < tp2B:
                        ped.makeMissing(i,j)
    return(pedlist)
Example #6
0
def ranortvec(px, py, pz):
    qx = ranf() 
    qy = ranf()
    qz = ranf()
    qx -= qx*px
    qy -= qy*py
    qz -= qz*pz
    mod = (qx**2 + qy**2 + qz**2)**(0.5)
    qx /= mod
    qy /= mod
    qz /= mod
    return qx, qy, qz
def CData( N, L, scale = 1, K=-1 ):
    # N = number of data vectors
    # L = length of data vectors
    # create a random seeds
    if K==-1:
        K = int( random.rand()*N/20 + N/20)
    seeds = random.ranf( (K,L) )
    # create random data based on deviations from seeds
    data = zeros( (N,L), float )
    for i in range( N ):
        pick = int( random.ranf() * K )
        data[i] = seeds[pick] +scale*(0.5*random.ranf(L)-0.25)
    return data
    def testing_walk_extractor(self,
                               feature_extractor,
                               seed_position,
                               iterations=100,
                               stepsize=4,
                               confined_range=None):
        '''
        Start random walk with seed position.
        Input:
            ECG signal feature extractor.
        Output:
            zip(pos_list, results):
                walk path & predict probability at each position.
        '''
        results = list()
        pos_list = list()
        pos_dict = dict()
        # Benchmarking
        feature_collecting_time_cost = 0
        predict_time_cost = 0

        pos = seed_position
        for pi in xrange(0, iterations):
            # Boundary
            if pos < 0:
                pos = 0
            elif pos >= len(feature_extractor.signal_in):
                pos = len(feature_extractor.signal_in) - 1

            pos_list.append(pos)

            if pos not in pos_dict:
                start_time = time.time()
                feature_vector = np.array(feature_extractor.frompos(pos))
                feature_vector = feature_vector.reshape(1, -1)
                feature_collecting_time_cost += time.time() - start_time
                start_time = time.time()
                value = self.regressor.predict(feature_vector)
                predict_time_cost += time.time() - start_time
                pos_dict[pos] = value
            else:
                value = pos_dict[pos]
            results.append(value)

            # Update next position
            threshold = (value + 1.0) / 2.0
            # threshold = self.sigmod_function(threshold)
            direction = -1.0 if random.ranf() >= threshold else 1.0
            pos += int(direction * stepsize)

            # Next position confined range
            if confined_range is not None:
                if pos > confined_range[1]:
                    pos = confined_range[1]
                elif pos < confined_range[0]:
                    pos = confined_range[0]

        # print 'Feature collecting time cost: %f secs.' % feature_collecting_time_cost
        # print 'Prediction time cost %f seconds.' % predict_time_cost
        return zip(pos_list, results)
def generate_matrix(size=10):
	mat = []
	for i in range(size):
		line = random.ranf(size)
		line = line/sum(line)
		mat.append(line)
	return mat
Example #10
0
def uniform_motion(map_width, map_height, posX=None, posY=None, velX=None, velY=None):
    if posX == None or posY == None:
        posX = map_width / 2 + 0.5
        posY = map_height / 2 + 0.5
    if velX == None or velY == None:
        start_velocity = 1.0
        angle = RNG.ranf() * 2 * NP.pi
        velX = start_velocity * NP.cos(angle)
        velY = start_velocity * NP.sin(angle)
    X = posX
    Y = posY
    while True:
        a = yield (X, Y)
        if a != None:
            raise StopIteration
        X += velX
        Y += velY
        while (X < 0) or (X > map_width):
            if X < 0:
                X = -X
                velX = -velX
            if X > map_width:
                X = 2 * map_width - X
                velX = -velX
        while (Y < 0) or (Y > map_width):
            if Y < 0:
                Y = -Y
                velY = -velY
            if Y > map_width:
                Y = 2 * map_width - Y
                velY = -velY
    def skip_test_bma(self):
        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(table_name='dataset',
                            table_data={
                                "id":
                                arange(100) + 1,
                                "attr1":
                                concatenate((random.randint(0, 10, 50),
                                             random.randint(20, 40, 50))),
                                "attr2":
                                random.ranf(100),
                                "outcome":
                                array(50 * [0] + 50 * [1])
                            })

        ds = Dataset(in_storage=storage, in_table_name='dataset', id_name="id")
        specification = EquationSpecification(
            variables=array(["constant", "attr2", "attr1"]),
            coefficients=array(["constant", "ba2", "ba1"]))

        filename = 'bma_output.pdf'
        model = RegressionModel(
            estimate_config={'bma_imageplot_filename': filename})
        model.estimate(specification,
                       ds,
                       "outcome",
                       procedure="opus_core.bma_for_linear_regression_r")
def create_samples(m3dprefix, argv):
	# debug
	print "create samples ..."
	# retrieve mass
	mass = float(sys.argv[1])
	# debug
	print "using mass ...", mass, "mmol P m^(-3)"
	# retrieve tracers
	tracers = sys.argv[2:]
	ntracer = len(tracers)	
	# debug
	print "using tracer(s) ...", tracers
	# read data
	lsm, vol = create_samples_read_data()
	# normalize volumes
	volnorm = vol / sum(vol)
	# divide mass by no of tracers
	masstracer = mass / ntracer
	# loop over tracers
	for itracer in range(ntracer):
		# get random values
		val = 0.5 + 0.25 * npr.ranf(vol.shape)
		# scale
		val = masstracer * val / sum(val * volnorm)
		# debug
# 		print val
# 		print min(val), max(val)
# 		print sum(val * volnorm)
		# save vector
		filepath = "init.%s.petsc" % tracers[itracer]
		write_PETSc_vec(val, filepath)
Example #13
0
    def shootPosAngle(self, n):
        angleList = []
        val = random.ranf(n)
        deltaX, x, y = 0., 0., 0.
        j = 1
        for v in val : # цикл по множеству необходимых значений косинуса
            #print "in first for v = " , v
            
            for i in range(0, FirstOrder.cosTeta.size) :
             #   print "in second for"
                if(self.p_probability[i] >= v) :

                    deltaX = v - self.p_probability[i]
                    y = self.p_energyBin[i] - self.p_energyBin[i-1]
                    x = self.p_probability[i] - self.p_probability[i-1]
                    angleList.append(deltaX*y/x + self.p_energyBin[i])
                    if(deltaX*y/x + self.p_energyBin[i] < -0.999995):
                        print 'Yes, you got it!'
                    if(deltaX*y/x + self.p_energyBin[i] > 0.999995):
                        print 'Ups, you got it!'
                    break
            #    angleList.append(3.) # vot tuta xerznaet
    

        return asarray(angleList)
Example #14
0
def choosePop(param):
    n = range(0,param["numPopulations"])
    x = ranf()
    if x < param["popProportions"]:
        return(0)
    else:
        return(1)
Example #15
0
    def GenerateRandom(cls,
                       nodes_nmb,
                       edges_nmb,
                       capacity_min=0,
                       capacity_max=0):
        """
        Create graph with specified number of nodes and arcs, where random nodes are connected to random nodes.
        Does not contain self loop.
        Set to every arc in the graph random value of the capacity from min to max.
        """

        network_graph = nx.gnm_random_graph(nodes_nmb,
                                            edges_nmb,
                                            directed=True)

        nx.set_edge_attributes(network_graph, {
            edge: np.around(
                (capacity_max - capacity_min) * nrnd.ranf() + capacity_min, 3)
            for edge in nx.edges(network_graph)
        },
                               name='capacity')
        self = cls(network_graph)
        self.capacity_min = capacity_min
        self.capacity_max = capacity_max
        return self
Example #16
0
    def GenerateRandomSrcDst(self, pair_nmb):
        """Create specified number of the random tuples: (source, destination). """

        nodes_nmb = nx.number_of_nodes(self.network_graph)
        nodes_array = np.arange(0, nodes_nmb)
        #path number cant be greater than number of arc excluding self loops
        pair_nmb = np.amin([pair_nmb, nodes_nmb * nodes_nmb - nodes_nmb])
        src_dst_list = pair_nmb * [None]

        #iterate shuffle nodes array and select pair of nodes from it
        for i in range(len(src_dst_list)):

            src_dest_cand = None
            src_dst_list[i] = src_dest_cand

            while src_dest_cand in src_dst_list:
                nrnd.shuffle(nodes_array)
                src_node = nodes_array[0]
                dst_node = nodes_array[1]
                src_dest_cand = (src_node, dst_node)
            src_dst_list[i] = src_dest_cand

            #add arc to the graph if it doesn't exist
            if not nx.has_path(self.network_graph, src_node, dst_node):
                nx.add_path(
                    self.network_graph,
                    src_dst_list[i],
                    capacity=np.around(
                        (self.capacity_max - self.capacity_min) * nrnd.ranf() +
                        self.capacity_min, 3))
        self.SetSrcDst(*src_dst_list)
Example #17
0
def realization(vs,H_XXZ,basis,real):
	"""
	This function computes the entropies for a single disorder realisation.
	--- arguments ---
	vs: vector of ramp speeds
	H_XXZ: time-dep. Heisenberg Hamiltonian with driven zz-interactions
	basis: spin_basis_1d object containing the spin basis
	n_real: number of disorder realisations; used only for timing
	"""
	ti = time() # get start time
	#
	global v # declare ramp speed v a global variable
	#
	seed() # the random number needs to be seeded for each parallel process
	#
	# draw random field uniformly from [-1.0,1.0] for each lattice site
	unscaled_fields=-1+2*ranf((basis.L,))
	# define z-field operator site-coupling list
	h_z=[[unscaled_fields[i],i] for i in range(basis.L)]
	# static list
	disorder_field = [["z",h_z]]
	# compute disordered z-field Hamiltonian
	no_checks={"check_herm":False,"check_pcon":False,"check_symm":False}
	Hz=hamiltonian(disorder_field,[],basis=basis,dtype=np.float64,**no_checks)
	# compute the MBL and ETH Hamiltonians for the same disorder realisation
	H_MBL=H_XXZ+h_MBL*Hz
	H_ETH=H_XXZ+h_ETH*Hz
	#
	### ramp in MBL phase ###
	v=1.0 # reset ramp speed to unity
	# calculate the energy at infinite temperature for initial MBL Hamiltonian
	eigsh_args={"k":2,"which":"BE","maxiter":1E4,"return_eigenvectors":False}
	Emin,Emax=H_MBL.eigsh(time=0.0,**eigsh_args)
	E_inf_temp=(Emax+Emin)/2.0
	# calculate nearest eigenstate to energy at infinite temperature
	E,psi_0=H_MBL.eigsh(time=0.0,k=1,sigma=E_inf_temp,maxiter=1E4)
	psi_0=psi_0.reshape((-1,))
	# calculate the eigensystem of the final MBL hamiltonian
	E_final,V_final=H_MBL.eigh(time=(0.5/vs[-1]))
	# evolve states and calculate entropies in MBL phase
	run_MBL=[_do_ramp(psi_0,H_MBL,basis,v,E_final,V_final) for v in vs]
	run_MBL=np.vstack(run_MBL).T
	#
	###  ramp in ETH phase ###
	v=1.0 # reset ramp speed to unity
	# calculate the energy at infinite temperature for initial ETH hamiltonian
	Emin,Emax=H_ETH.eigsh(time=0.0,**eigsh_args)
	E_inf_temp=(Emax+Emin)/2.0
	# calculate nearest eigenstate to energy at infinite temperature
	E,psi_0=H_ETH.eigsh(time=0.0,k=1,sigma=E_inf_temp,maxiter=1E4)
	psi_0=psi_0.reshape((-1,))
	# calculate the eigensystem of the final ETH hamiltonian
	E_final,V_final=H_ETH.eigh(time=(0.5/vs[-1]))
	# evolve states and calculate entropies in ETH phase
	run_ETH=[_do_ramp(psi_0,H_ETH,basis,v,E_final,V_final) for v in vs]
	run_ETH=np.vstack(run_ETH).T # stack vertically elements of list run_ETH
	# show time taken
	print "realization {0}/{1} took {2:.2f} sec".format(real+1,n_real,time()-ti)
	#
	return run_MBL,run_ETH
 def randomHalf(self, n):
     leftCount = 0
     # Loops through each ball in the current position
     for i in range(int(n)):
         if rand.ranf() < 0.5:  # 50% chance of running
             leftCount += 1     # Adds 1 ball to the total balls bouncing left
     return leftCount
Example #19
0
def genmonster(level):
        if ranf() <= 0.25:
            rang = 3 
            kind = "ranged"
        else:
            rang = 1
            kind = "melee"
        return Monster(level = level,r = rang,kind = kind)
 def generate_initial_estimates(self):
     # deltas is S-by-K matrix
     k = self.count_data.num_categories
     non_zero_means = (self.count_data.arr > 0).sum(axis=0).astype('float64') / self.count_data.N_all
     deltas = non_zero_means.repeat(self.num_segments).reshape([self.num_segments, k], order='F')
     deltas += (random.ranf(size=deltas.size)*0.05).reshape(deltas.shape)
     deltas[deltas >= 1] = 0.95
     return deltas
Example #21
0
def genmonster(level):
    if ranf() <= 0.25:
        rang = 3
        kind = "ranged"
    else:
        rang = 1
        kind = "melee"
    return Monster(level=level, r=rang, kind=kind)
Example #22
0
def map_base_kernel(s, dim, init_hyper_fixed):
    if init_hyper_fixed:
        # TODO: ARD
        # if dim == 1:
        if s == 's':
            k = RBF(dim, lengthscales=1, variance=0.5)
        elif s == 'r':
            k = RQ(dim, lengthscales=1, variance=0.5, alpha=0.5)
        elif s == 'p':
            k = Per(dim, period=1, lengthscales=0.1, variance=0.5)
        else:
            k = Lin(dim, variance=0.5)
        # else:
        #     if s == 's':
        #         k = RBF(dim, lengthscales=1 * np.ones(dim), variance=0.5)
        #     elif s == 'r':
        #         k = RQ(dim, lengthscales=1 * np.ones(dim), variance=0.5, alpha=0.5)
        #     elif s == 'p':
        #         k = Per(dim, period=1, lengthscales=0.1, variance=0.5)
        #     else:
        #         k = Lin(dim, variance=0.5 * np.ones(dim))
    else:
        if dim == 1:
            # this is for reusing hypers of trained models
            if s == 's':
                k = RBF(dim, lengthscales=rnd.ranf() * 5)
            elif s == 'r':
                k = RQ(dim, lengthscales=rnd.ranf() * 5)
            elif s == 'p':
                k = Per(dim,
                        period=rnd.ranf() * 5,
                        lengthscales=rnd.ranf() * 5)
            else:
                k = Lin(dim, variance=rnd.ranf() * 10)
        else:
            if s == 's':
                k = RBF(dim, lengthscales=rnd.ranf(dim) * 5)
            elif s == 'r':
                k = RQ(dim, lengthscales=rnd.ranf(dim) * 5)
            elif s == 'p':
                k = Per(dim,
                        period=rnd.ranf() * 5,
                        lengthscales=rnd.ranf() * 5)
            else:
                k = Lin(dim, variance=rnd.ranf(dim) * 10)
    return k
Example #23
0
def mcar(param, pedlist):
    percMissing = float(param["missingParam1"])
    for ped in pedlist:
        for i in range(0,2+ped.getSibNum()):
            for j in range(0, param["numMarkers"]):
                # if we roll the dice and it comes up missing, choose a missing var.
                if ranf() < percMissing:
                    ped.makeMissing(i,j)
    return(pedlist)
    def _trial(self, person_transition_func, n_days, transition_cdf, transition_cdf_kwargs={}):
        '''Generic trial function of event to create a state transition'''

        transition_performed = False
        if rnd.ranf() < transition_cdf(n_days, **transition_cdf_kwargs):
            person_transition_func()
            transition_performed = True

        return transition_performed
def Test3():
    '''Test case 3: random walk.'''
    qt = QTloader()
    record_name = 'sel46'
    sig = qt.load(record_name)
    raw_sig = sig['sig']

    random_forest_config = dict(max_depth=10)
    walker = RandomWalker(target_label='P',
                          random_forest_config=random_forest_config)

    print 'training...'
    start_time = time.time()
    walker.do_training_on_qt(record_name='sel103')
    print 'trianing used %.3f seconds' % (time.time() - start_time)

    seed_position = 100000
    import matplotlib.pyplot as plt
    plt.figure(1)
    plt.plot(sig['sig'], label=record_name)
    for ti in xrange(0, 20):
        seed_position += int(200.0 * random.ranf())
        print 'testing...(%d)' % seed_position
        start_time = time.time()
        results = walker.testing_walk(sig['sig'],
                                      seed_position,
                                      iterations=100,
                                      stepsize=10)
        print 'testing used %.3f seconds' % (time.time() - start_time)

        pos_list, values = zip(*results)
        predict_pos = np.mean(pos_list[len(pos_list) / 2:])

        # amp_list = [raw_sig[int(x)] for x in pos_list]
        amp_list = []
        bias = raw_sig[pos_list[0]]
        for pos in pos_list:
            amp_list.append(bias)
            bias -= 0.01

        plt.plot(predict_pos,
                 raw_sig[int(predict_pos)],
                 'ro',
                 markersize=14,
                 label='predict position')
        plt.plot(pos_list,
                 amp_list,
                 'r',
                 label='walk path',
                 markersize=3,
                 linewidth=8,
                 alpha=0.3)
        plt.grid(True)
        plt.xlim(min(pos_list) - 100, max(pos_list) + 100)
        plt.legend()
        plt.show(block=False)
        pdb.set_trace()
Example #26
0
def rand_unit_complex(shape, ftype='double'):
    """Return random complex values in the [0, 1) interval.

    Parameters
    ----------
    shape : int or sequence of ints
        Output shape. For instance, if the shape is ``(m, n)``, then ``m * n``
        samples are drawn.
    ftype : str or dtype, optional
        Output float type. Choose between 'single', 'double', or 'longdouble'.
        Default is 'double', in which case the output array will be composed of
        128-bit complex floats.

    Returns
    -------
    out : ndarray of complex floats
    """
    from numpy.random import ranf
    return ranf(shape).astype(ftype) + 1j * ranf(shape).astype(ftype)
Example #27
0
def findFlows(flowModel, flowMeasurements):
    flowGuess = 0.1 + 5.0 * ranf(
        flowModel.nFlows + flowModel.nsched)  # includes timeoffsets
    result = lsq(optFunc,
                 flowGuess,
                 bounds=(flowModel.lowerBounds, flowModel.upperBounds),
                 loss='huber',
                 args=(flowMeasurements, flowModel))
    plotResids = formatResids(flowMeasurements, result.fun)
    return result
Example #28
0
 def model_PO(self, graph: Graph) -> Union[Graph, GraphView]:
     graph_copy = graph
     if self.save:
         self.graph.save("graph.xml.gz")
         print("stored graph and modeled graph")
         return graph
     if self.parameters["Observable"]["Problem"] == "Total":
         return graph_copy
     elif self.parameters["Observable"]["Problem"] == "Partial":
         # Different methods of partial observability
         if "Sample" in self.parameters["Observable"]:
             # "NProbability" = 0.4 means that each node has a 40% chance to be pruned
             if self.parameters["Observable"]["Sample"]["Nodes"] == "Random":
                 graph_copy = GraphView(graph_copy, vfilt=lambda v: ranf() <= self.parameters["Observable"]["Sample"]["NProbability"])
             # elif self.parameters["Observable"]["Sample"]["Nodes"] == "MaxDeg":
             #     graph_copy = GraphView()
             if self.parameters["Observable"]["Sample"]["Edges"] == "Random":
                 graph_copy = GraphView(graph_copy, efilt=lambda v: ranf() <= self.parameters["Observable"]["Sample"]["EProbability"])
     return graph_copy
Example #29
0
    def generate_initial_estimates(self):
        N, K = self.data.arr.shape

        # mus is S-by-K matrix
        mus = self.data.arr.mean(axis=0)
        mus = mus.repeat(self.num_segments).reshape([self.num_segments, K], order='F')
        mus += random.ranf(size=mus.size).reshape(mus.shape)

        # sigmas is S-by-K-by-K array
        sigmas = np.eye(K).repeat(self.num_segments).reshape([self.num_segments, K, K], order='F')
        return mus, sigmas
def Test1():
    '''Test case 1'''
    qt = QTloader()
    record_name = 'sel103'
    sig = qt.load(record_name)
    raw_sig = sig['sig']
    walker = RandomWalker()
    print 'training...'
    walker.do_training_on_qt(record_name=record_name)

    print 'testing...'
    results = walker.testing_n(sig['sig'], 100)

    left_pos_list = [x[0] for x in results if x[1] <= 0]
    left_value_list = [x[1] for x in results if x[1] <= 0]

    right_pos_list = [x[0] for x in results if x[1] > 0]
    right_value_list = [x[1] for x in results if x[1] > 0]

    import matplotlib.pyplot as plt
    plt.figure(1)
    plt.plot(sig['sig'], label=record_name)
    amp_list = [raw_sig[x] for x in left_pos_list]
    plt.plot(left_pos_list, amp_list, 'r<', label='left', markersize=15)
    # Annotate
    for x, y, score in zip(left_pos_list, amp_list, left_value_list):
        plt.annotate('%.3f' % score,
                     xy=(x, y),
                     xytext=(x + random.ranf() * 2.0 - 1, y + random.ranf()),
                     arrowprops=dict(arrowstyle='->', ))
    amp_list = [raw_sig[x] for x in right_pos_list]
    plt.plot(right_pos_list, amp_list, 'b>', label='right', markersize=15)
    # Annotate
    for x, y, score in zip(right_pos_list, amp_list, right_value_list):
        plt.annotate('%.3f' % score,
                     xy=(x, y),
                     xytext=(x + random.ranf() * 2.0 - 1, y - random.ranf()),
                     arrowprops=dict(arrowstyle='->', ))
    plt.grid(True)
    plt.xlim(100000, 110000)
    plt.show()
def TransformTrace (links, fail_links, mttf, mttr, stable, end_time, bootstrap):
  new_trace = []
  ctime = 0.0
  for link in links:
    new_trace.append("%f %s up"%(ctime, link))
  if bootstrap:
    new_trace.append("1.0 compute_and_update") 
  ctime += stable
  up_links = set(links)
  down_links = set()
  set_up_at = defaultdict(list)

  while ctime < end_time: 
    ctime = ctime + mttf * random.ranf()
    set_up = list()
    for t in sorted(set_up_at.keys()):
      if t < ctime:
        for l in set_up_at[t]:
          new_trace.append("%f %s up"%(t, l))
          set_up.append(l)
        del(set_up_at[t])

    to_fail = None
    failable_links = set(fail_links) - set(down_links)

    if len(failable_links) == 0:
      # Nothing to fail
      min_time = sorted(set_up_at.keys())[0]
      ctime = min_time 
    else: 
      to_fail = random.choice(list(failable_links))
      up_links.remove(to_fail)
      down_links.add(to_fail)
      new_trace.append("%f %s down"%(ctime, to_fail))
      recovery_time = random.exponential(mttr)
      assert(recovery_time) > 0
      set_up_at[ctime + recovery_time].append(to_fail)

    for l in set_up:
      new_trace.append("%f %s up"%(t, l))
      up_links.add(l)
      down_links.remove(l)

  for t in sorted(set_up_at.keys()):
    for l in set_up_at[t]:
      new_trace.append("%f %s up"%(t, l))
      up_links.add(l)
      down_links.remove(l)
    ctime = t

  # Otherwise end instantly
  new_trace.append("%f end"%end_time)
  return (end_time, new_trace)
def ScaleMutation(individual, magnitude = 0.1):

	mutated = np.copy(individual)
	valid_layers = np.argmax(mutated == 0) or len(mutated)
	sign = 1 if ranf() > 0.5 else -1
	mutated[:valid_layers] += sign * int((np.sum(mutated) * magnitude) // valid_layers)
	# If there are invalid layer sizes (<= 0), avoid a crash by
	# making them the mean of the valid elements.
	if len(mutated[:valid_layers][mutated[:valid_layers] <= 0]) > 0:
		mean = np.mean(mutated[:valid_layers][mutated[:valid_layers] > 0])
		mutated[:valid_layers][mutated[:valid_layers] <= 0] = int(mean)
	return mutated
def InitializePopulation(pop_size, input_size, max_hidden):

	# Matrix of individuals
	pop = np.zeros((pop_size,max_hidden),dtype=np.int32)
	# Initialize their first layer (used as a base for subsequent ones).
	pop[:,0] = randint(input_size,int(input_size*1.75),
							size=pop.shape[0],dtype=np.int32)
	# Choose how many layers each individual will have.
	active_layers = choice(np.arange(1,max_hidden+1),size=pop_size)

	for i in range(pop_size):

		if active_layers[i] > 1:
			middle = int(active_layers[i]/2.0)+1 # Middle layer is the biggest.
			for j in range(1,middle): # Go from smaller to bigger layers.
				pop[i,j] = pop[i,j-1] + pop[i,j-1] * ranf()
			for j in range(middle,active_layers[i]): # From bigger to smaller.
				pop[i,j] = max(int(pop[i,0]/2.0),
							   int(pop[i,j-1] - pop[i,j-1] * ranf()))

	return pop
    def process(self):
        u = self.data_handler.get_node(self.inputs).get_value()
        for i, val in enumerate(u):
            if val > 0:
                self.active[i] = True
            u[i] = 0 if self.active[i] else val

        out = np.zeros([self.n_extruders, self.n_sensors])

        for e in range(self.n_extruders):
            for s in range(self.n_sensors):
                out[e,
                    s] = self.x1[e] + self.ambient_temp + (ranf() - 0.5) * 0.5

            self.x3[e] = (self.alpha *
                          (u[e] - self.ambient_temp - self.x1[e]) -
                          self.beta * self.x2[e]) * self.dt
            self.x2[e] += self.x3[e]
            self.x1[e] += self.x2[e] + (ranf() - 0.5)

        self.data_handler.get_node(self.outputs).set_value(out.tolist())
Example #35
0
    def set_up(g, params) -> None:
        weight = g.new_edge_property("double")
        weight.set_value(1.0)
        g.edge_properties["weight"] = weight

        threshold = g.new_vertex_property("double", vals=ranf(len(g.get_vertices())))
        g.vertex_properties["threshold"] = threshold

        for v in g.vertices():
            for e in v.out_edges():
                g.edge_properties["weight"][e] = 1 / v.out_degree() if 1 / v.out_degree() < g.edge_properties["weight"][e] else g.edge_properties["weight"][e]
        return g
Example #36
0
def callPheno(peds, params):
    for ped in peds:
        for j in range(0, ped.getSibNum()):
            thisPheno = ped.getSibPheno(j)
            prob = min(1.0,exp(thisPheno))  # cap the probability
            if params['modelType'] == 'random':
                ped.setSibPhenoCall(j, float(random.randint(1,3,1)[0]))
            else:
                if ranf() < prob:
                    ped.setSibPhenoCall(j, 2.0)
                else:
                    ped.setSibPhenoCall(j, 1.0)
    return()
Example #37
0
 def genOneMarker(self, param, i):
     probs = param["markerFreqs"]
     popProbs = probs[self.popStr]
     y = ranf(2)
     if y[0] < popProbs[i]:
         a = 1
     else:
         a = 0
     if y[1] < popProbs[i]:
         b = 1
     else:
         b = 0
     return(a,b)
Example #38
0
def main(msg):
  print 'Probability:', PROBABILITY
  print

  while True:
    for addr in RECIPIENTS:
      A = random.ranf()
      if A < PROBABILITY:
        print
        print '%s : Sending to "%s"' % (time.strftime(DATE_FORMAT), addr),
        sendStopTo((addr,))
    sys.stdout.write('.')
    sys.stdout.flush()
    time.sleep(TIMESTEP)
Example #39
0
 def attack(self,atk,defend):
     """"""
     if ranf() <= atk.accuracy:
         hit = atk.attack - defend.defense
         if type(atk) == Player and self.player.equipped["Weapon"].stat.get("backstab",0) == 1:
             hit = atk.attack*3
         if ranf < 0.05:
             hit = hit*2
         if hit < 0:
             hit = 0
         defend.hp -= hit
         self.outputfunc(" A Hit! {} has {} hp remaining".format(defend.name,defend.hp))
     else:
         self.outputfunc("{} missed!".format(atk.name))
def create_fake_posterior_gram(same, randomi, long, mult_compare_to_noise,
                               lenght_1, lenght_2, dimension):
    if lenght_1 > lenght_2:
        lenght_1, lenght_2 = lenght_2, lenght_1

    if same:
        to_activate = np.random.randint(dimension, size=lenght_1)
        x = rd.ranf((lenght_1, dimension))
        y = rd.ranf((lenght_1, dimension))

        for i in range(lenght_1):
            x[i, to_activate[i]] = mult_compare_to_noise
            y[i, to_activate[i]] = mult_compare_to_noise

        x = x / x.sum(axis=1)[:, None]
        y = y / y.sum(axis=1)[:, None]
        return x, y
    if randomi:
        to_activate_1 = np.random.randint(dimension, size=lenght_1)
        to_activate_2 = np.random.randint(dimension, size=lenght_2)
        x = rd.ranf((lenght_1, dimension))
        y = rd.ranf((lenght_2, dimension))

        for i in range(lenght_1):
            x[i, to_activate_1[i]] = mult_compare_to_noise
        for i in range(lenght_2):
            y[i, to_activate_2[i]] = mult_compare_to_noise

        x = x / x.sum(axis=1)[:, None]
        y = y / y.sum(axis=1)[:, None]
        return x, y
    if long:
        to_activate = np.random.randint(dimension, size=lenght_1)
        x = rd.ranf((lenght_1, dimension))
        y = rd.ranf((lenght_2, dimension))
        for i in range(lenght_1):
            x[i, to_activate[i]] = mult_compare_to_noise
        iter = 0
        to_copy = 0
        to_double = [
            random.randint(0, lenght_1 - 1) for i in range(lenght_2 - lenght_1)
        ]
        to_double = sorted(to_double)
        begin = 0
        while iter < lenght_2:
            if to_copy in to_double[begin:]:
                y[iter, :] = x[to_copy, :]
                begin += 1
                iter += 1
            else:
                y[iter, :] = x[to_copy, :]
                to_copy += 1
                iter += 1

        x = x / x.sum(axis=1)[:, None]
        y = y / y.sum(axis=1)[:, None]
        return x, y
def simul_gauss(x_sample, y_sample, GN=20):
   # simulate the gaussians:

   gaussians = deque()
   for i in xrange(GN):
       x = rd.randint(1, M-1)
       y = rd.randint(1, M-1)
    
       dx = sqrt(M) * rd.ranf()
       dy = sqrt(M) * rd.ranf()
       
       align = 0e0 #pi * rd.ranf() 
       
       gaussians.append([[x, y], [dx, dy], align])

   theta_zero = ones(N)
       
   for m, d, theta in gaussians:
       print(m, d, theta)
       x = rd.normal(m[0], d[0], N)
       y = rd.normal(m[1], d[1], N)

       if theta != 0e0:
          print(theta_zero[x < m[0]].shape, theta_zero[x >= m[0]].shape)
          vecl = sqrt((x - m[0])**2 + (y - m[1])**2)
          theta_zero[x < m[0]]  = 2e0*pi - arccos((x - m[0])/vecl)[x < m[0]]
          theta_zero[x >= m[0]] = arccos((x - m[0])/vecl)[x >= m[0]]
          theta = theta + theta_zero

          x_sample.append( vecl * cos(theta) + m[0])
          y_sample.append( vecl * sin(theta) + m[1])
       
       else:
          x_sample.append(x)
          y_sample.append(y)

   return x_sample, y_sample
Example #42
0
def realization(H_Heis, psi_0, basis, diag_op, times, real):
    """
        This function computes the entropies for a single disorder realisation.
        --- arguments ---
        vs: vector of ramp speeds
        H_Heis: static Heisenberg Hamiltonian
        basis: spin_basis_1d object containing the spin basis
        n_real: number of disorder realisations; used only for timing
        """
    ti = time()  # get start time
    #
    seed()  # the random number needs to be seeded for each parallel process
    #
    # draw random field uniformly from [-1.0,1.0] for each lattice site
    unscaled_fields = -1 + 2 * ranf((basis.L, ))
    # define z-field operator site-coupling list
    h_z = [[unscaled_fields[i], i] for i in range(basis.L)]
    # static list
    disorder_field = [["z", h_z]]
    # compute disordered z-field Hamiltonian
    no_checks = {"check_herm": False, "check_pcon": False, "check_symm": False}
    Hz = hamiltonian(disorder_field, [],
                     basis=basis,
                     dtype=np.float64,
                     **no_checks)
    # compute the MBL and ETH Hamiltonians for the same disorder realisation
    H_MBL = H_Heis + h_MBL * Hz
    #
    expO = exp_op(H_MBL)
    psi_t = psi_0
    time_old = 0
    imb = []
    #        Sent=[]
    #        sub_sizes=[1,2,3]
    for a_time in times:
        expO.set_a(-1j * (a_time - time_old))
        time_old = a_time
        psi_t = expO.dot(psi_t)
        imb.append(np.einsum('i,i,i', np.conj(psi_t), diag_op, psi_t))
#                Sent.append([map(lambda x: ent_entropy(psi_t,basis,chain_subsys=range(x,x+size))["Sent"],range(basis.L-size+1)) for size in sub_sizes])
#       imb_at_t = lambda time: diag_op_exp(expO,psi_0,diag_op,time)
#       imb = list(map(imb_at_t,times))
#       imb = list(map(imb_at_t,times))
# show time taken
    print("realization {0}/{1} took {2:.2f} sec".format(
        real + 1, n_real,
        time() - ti))
    #
    return np.real(imb)
Example #43
0
def TransformTrace (trace, mean, stable):
  new_trace = []
  ctime = stable
  for t in trace:
    parts = t.strip().split()
    if parts[-1] == 'up' or parts[-1] == 'down':
      if len(parts) == 3:
        new_trace.append(t.strip())
      else:
        ctime = ctime + mean * random.ranf()
        new_trace.append("%f %s"%(ctime, t.strip()))
    elif parts[-1] == 'end':
      ctime = ctime + float(parts[0])
      new_trace.append("%f end"%ctime)
  return (ctime, new_trace)
Example #44
0
def hyperbolic_motion(map_width, map_height, posX=None, posY=None, velX=None, velY=None, accX=None, accY=None):
    if posX == None or posY == None:
        posX = map_width / 2 + 0.5
        posY = map_height / 2 + 0.5
    if velX == None or velY == None:
        start_velocity = 1.0
        angle = RNG.ranf() * 2 * NP.pi
        velX = start_velocity * NP.cos(angle)
        velY = start_velocity * NP.sin(angle)
    if accX == None or accY == None:
        accX = (NP.floor(RNG.ranf() * 5) - 2.0) / 10.0
        accY = 0
    X = posX
    Y = posY
    while True:
        a = yield (X, Y)
        if a != None:
            raise StopIteration
        X += velX
        Y += velY
        velX += accX
        velY += accY
        while (X < 0) or (X > map_width):
            if X < 0:
                X = -X
                velX = -velX
            if X > map_width:
                X = 2 * map_width - X
                velX = -velX
        while (Y < 0) or (Y > map_width):
            if Y < 0:
                Y = -Y
                velY = -velY
            if Y > map_width:
                Y = 2 * map_width - Y
                velY = -velY
def UniformCrossover(parent1, parent2, max_features, prob=0.5):

    offspring = np.copy(parent1)
    for i in range(len(parent1)):
        if parent1[i] != parent2[i]:
            offspring[i] = parent1[i] if ranf() <= prob else parent2[i]

    offspring_ones = offspring[offspring > 0]
    if len(offspring_ones) > max_features:
        offspring_ones[choice(len(offspring_ones),
                              replace=False,
                              size=len(offspring_ones) - max_features)] = 0
        offspring[offspring > 0] = offspring_ones
    elif len(offspring_ones) < 1:  # For this rather unlikely situation,
        offspring = np.copy(parent2)  # just avoid increasing the running time.
    return offspring
Example #46
0
 def attack(self, atk, defend):
     """"""
     if ranf() <= atk.accuracy:
         hit = atk.attack - defend.defense
         if type(atk) == Player and self.player.equipped["Weapon"].stat.get(
                 "backstab", 0) == 1:
             hit = atk.attack * 3
         if ranf < 0.05:
             hit = hit * 2
         if hit < 0:
             hit = 0
         defend.hp -= hit
         self.outputfunc(" A Hit! {} has {} hp remaining".format(
             defend.name, defend.hp))
     else:
         self.outputfunc("{} missed!".format(atk.name))
def CreateOffspring(parents,
                    crossover,
                    mutation,
                    max_features,
                    crossover_prob=0.9,
                    mutation_prob=0.1):

    n_crossovers = round(parents.shape[0] * crossover_prob)
    offspring = np.empty((n_crossovers, parents.shape[1]), dtype=bool)
    for n in range(n_crossovers):
        p1, p2 = choice(parents.shape[0], replace=False, size=2)
        offspring[n] = crossover(parents[p1], parents[p2], max_features)
        if ranf() <= mutation_prob:
            offspring[n] = mutation(offspring[n], max_features)

    return offspring
def float_generator_func(min_val=0,max_val=100,shape=None):
    """Generate random float numbers

    Generate a random float number in the range of min_val and max_val.

    Args:
        min_val (float): lower bound for random numbers
        max_val (float): upper bound for random numbers

    Return:
        float: random number

    """
    delta = max_val-min_val

    while True:
        yield min_val+delta*random.ranf(size=shape)
Example #49
0
 def _simulate(self, t) -> Graph:
     g = self.graph
     g_ = GraphView(g,
                    vfilt=lambda v: g.vertex_properties["active"][v] == t)
     while g_.num_vertices() != 0:
         for n in g_.vertices():
             # Go through the neighboors
             v = g.vertex(n)
             for e_neighbour, n_neighbour in zip(v.out_edges(),
                                                 v.out_neighbors()):
                 # Check if the node is activated and make sure not to give it a second life
                 if g.edge_properties["weight"][e_neighbour] >= ranf(1) \
                         and g.vertex_properties["active"][n_neighbour] == 0:
                     g.vertex_properties["active"][n_neighbour] = t + 1
         t += 1
         g_ = GraphView(
             g, vfilt=lambda v: g.vertex_properties["active"][v] == t)
     return g
Example #50
0
 def __init__(self, x_start, y_start, generation_number=-1, behavior_graph=None, name=None):
     super().__init__(x_start, y_start)
     # TODO: Create some kind of individual-level memory
     # TODO: Create bot fields that can control signal movement, size, and investment
     self.behavior = behavior_graph
     self.speed = 1
     self.child_investment = 600
     self.max_age = 5000
     self.peak_energy = 0
     self.generation_number = generation_number
     self.target_point = x_start, y_start
     self.signal = None
     self.signal_direction = ranf()*2*math.pi
     self.message_signal_type = 0
     Bot.counter += 1
     if name is None:
         self.name = "Bot_" + str(Bot.counter)
     else:
         self.name = name
def TransformTrace (links, fail_links, mttf, mttr, stable, end_time, bootstrap):
  new_trace = []
  ctime = 0.0
  for link in links:
    new_trace.append("%f %s up"%(ctime, link))
  if bootstrap:
    new_trace.append("1.0 compute_and_update") 
  ctime += stable
  up_links = set(links)
  down_links = set()
  set_up_at = defaultdict(list)

  while ctime < end_time: 
    ctime = ctime + mttf * random.ranf()
    recovery_time = random.exponential(mttr)
    new_trace.append("%f down_active %f"%(ctime, recovery_time)) 
  # Otherwise end instantly
  new_trace.append("%f end"%end_time)
  return (end_time, new_trace)
Example #52
0
 def estimate(test,pars,X,Y,N):
     ''' Estimate covariance of region allowed by test(z,*pars)
     draw points from box defined by X and Y till N points pass
     test
     '''
     N = int(N)
     Z = np.empty((N,2))
     i = 0
     while i<N:
         # Next line draws N trails
         z = nr.ranf(2*N).reshape((N,2))*[X[1]-X[0],Y[1]-Y[0]] + [X[0],Y[0]]
         t = test(z,*pars) # t is a vector of N Boolean values
         for j in xrange(N):
             if t[j]:
                 Z[i,:] = z[j,:]
                 i += 1
                 if i >= N:
                     break
     return np.dot(Z.T,Z)/N
Example #53
0
def noise_padding(x,dt,Tpad,amp=1e-8):
    """
    Adds random noise at the beginning of a timeseries
    to avoid 0 division in  rmean.  
    """

    # Create the random noise padding
    Npad=int(Tpad/dt)
    padding=zeros(Npad)
    for i in range(Npad):
        padding[i]=(ranf() - 0.5) * amp


    # insert the padding before the timeseries
    Nxx=len(x) + Npad
    xx=empty(Nxx)
    xx[0:Npad]=padding
    xx[Npad:]=x

    return(xx)
def TransformTrace (links, fail_links, nfailures, mttf, mttr, stable, end_time):
  new_trace = []
  ctime = 0.0
  for link in links:
    new_trace.append("%f %s up"%(ctime, link))
  ctime += stable
  up_links = set(links)
  set_up_at = defaultdict(list)
  for fail in xrange(nfailures):
    ctime = ctime + mttf * random.ranf()
    set_up = list()
    for t in sorted(set_up_at.keys()):
      if t < ctime:
        for l in set_up_at[t]:
          new_trace.append("%f %s up"%(t, l))
          set_up.append(l)
        del(set_up_at[t])
    if len(up_links) == 0:
      # Nothing to fail
      continue
    while True:
      to_fail = random.choice(list(fail_links))
      if to_fail in up_links:
        up_links.remove(to_fail)
        break
    new_trace.append("%f %s down"%(ctime, to_fail))
    recovery_time = random.exponential(mttr)
    assert(recovery_time) > 0
    set_up_at[ctime + recovery_time].append(to_fail)
    for l in set_up:
      up_links.add(l)
  for t in sorted(set_up_at.keys()):
    for l in set_up_at[t]:
      new_trace.append("%f %s up"%(t, l))
      up_links.add(l)
    ctime = t
  if ctime < end_time:
    ctime = end_time
  # Otherwise end instantly
  new_trace.append("%f end"%ctime)
  return (ctime, new_trace)
Example #55
0
    def skip_test_bma(self):
        storage = StorageFactory().get_storage('dict_storage')

        storage.write_table(
            table_name='dataset',
            table_data={
                "id":arange(100)+1,
                "attr1":concatenate((random.randint(0,10, 50), random.randint(20,40, 50))),
                "attr2":random.ranf(100),
                "outcome": array(50*[0]+50*[1])
                }
            )

        ds = Dataset(in_storage=storage, in_table_name='dataset', id_name="id")
        specification = EquationSpecification(
                          variables=array(["constant", "attr2", "attr1"]),
                          coefficients=array(["constant", "ba2", "ba1"]))

        filename = 'bma_output.pdf'
        model = RegressionModel(estimate_config={'bma_imageplot_filename': filename})
        model.estimate(specification, ds, "outcome", procedure="opus_core.bma_for_linear_regression_r")
Example #56
0
def rpca_learn(datamat,dicts=500,learnstep=1000,iterperstep=50,stepdict=None,stepcode=None,l1_recon=1.0,l1_code=1.0,l2_dict=1.0,eps=1e-10):
    "Robust PCA算法字典学习(离线)"
    dictshape = (dicts,datamat.shape[1])
    if stepdict==None:
        stepdict = npr.ranf(dictshape)
    codeshape = (datamat.shape[0],dicts)
    CODELEN = np.prod(codeshape)
    if stepcode==None:
        stepcode = np.zeros(codeshape)

    def fmin(code_dict):
        code = code_dict[:CODELEN].reshape(codeshape)
        idict = code_dict[CODELEN:].reshape(dictshape)
        recons = dot(code,idict)
        error = recons - datamat

        loss_recon = np.sqrt(error * error + eps)
        loss_code = np.sqrt(code * code + eps)
        loss_dict = idict * idict
        loss = l1_recon * loss_recon.sum() + l1_code * loss_code.sum() + l2_dict * loss_dict.sum()

        grad_recon = l1_recon * error / loss_recon
        grad_code = l1_code * code / loss_code
        grad_dict = l2_dict * idict * 2

        grad_code_2 = dot(grad_recon,idict.T)
        grad_dict_2 = dot(code.T,grad_recon)

        grad_code = grad_code + grad_code_2
        grad_dict = grad_dict + grad_dict_2
        print loss_recon.sum(), loss_code.sum(), loss_dict.sum(), loss
        return loss, np.concatenate((grad_code.flatten(),grad_dict.flatten()))

    for itx in range(learnstep):
        print "ITERATION",itx
        mres = minimize(fmin, np.concatenate((stepcode.flatten(),stepdict.flatten())), method='L-BFGS-B', jac=True, options={'maxiter':iterperstep, 'disp':False})
        stepcode = mres.x[:CODELEN].reshape(codeshape)
        stepdict = mres.x[CODELEN:].reshape(dictshape)

        yield stepdict, stepcode
    def shootPosAngle(self, n):
        """
        cosine of positron angel generator
        :param n: number of generated angles
        :return: numpy.array of cos positron angels
        """
        angleList = []
        val = random.ranf(n)
        for v in val:  # цикл по множеству необходимых значений косинуса
            for i in range(0, FirstOrder.cosTeta.size):
                if self.p_probability[i] >= v:
                    deltaX = v - self.p_probability[i]
                    y = self.p_energyBin[i] - self.p_energyBin[i - 1]
                    x = self.p_probability[i] - self.p_probability[i - 1]
                    angleList.append(deltaX * y / x + self.p_energyBin[i])
                    if deltaX * y / x + self.p_energyBin[i] < -0.999995:
                        print('Yes, you got it!')
                    if deltaX * y / x + self.p_energyBin[i] > 0.999995:
                        print('Ups, you got it!')
                    break

        return asarray(angleList)
Example #58
0
def point_generator(n = 10, bound = 5.0):
    plist = []
    for i in range(n):
        plist.append(point(rd.ranf() * bound, rd.ranf() * bound))
    return plist
Example #59
0
    subterm = np.mean(arr4d,(2,3),keepdims = True)
    divterm = 1.0/(arr4d.std(axis=(2,3),keepdims=True)+1e-10)
    arr4d = (arr4d - subterm) * divterm
    farr4d = fft.fft2(arr4d)
    fftterm = 1.0/(np.sqrt(np.mean(np.abs(farr4d)**2,axis=0))+1e-10)
    farr4d = farr4d * fftterm
    arr4d = np.real(fft.ifft2(farr4d))
    np.savez(cachename+'.npz',sub=subterm,div=divterm,fft=fftterm)
    whiteconfs[cachename] = [subterm,divterm,fftterm]
    return arr4d

def unwhite(arr4d, cachename):
    assert cachename in whiteconfs
    subterm, divterm, fftterm = whiteconfs[cachename]
    farr4d = fft.fft2(arr4d)
    farr4d = farr4d / fftterm
    arr4d = np.real(fft.ifft2(farr4d))
    return arr4d

if __name__=="__main__":
    import numpy.random as npr
    d = npr.ranf((10,1,20,20))
    dw = white(d,'test')
    d2 = unwhite(dw,'test')
    os.unlink('test.npz')
    print d
    print dw
    print np.sum(np.abs(d2-d))