def get_distance(n,balloon,offset,Leader,var_measurement,rssiAll,gps,gpsAll): distance_matrix = np.zeros((n,n)) sigma = np.zeros((n,n)) for i in np.arange(start=0,stop=n,step=1): if i == Leader: X = np.array([balloon[i].X,balloon[i].Y])+offset else: X = np.array([balloon[i].X,balloon[i].Y]) for jj in np.arange(start=i,stop=n,step=1): Y = np.array([balloon[jj].X,balloon[jj].Y]) if i in GlobalVals.REAL_BALLOON_LIST and jj in GlobalVals.REAL_BALLOON_LIST: test = distance2D([gps, gpsAll[jj],GlobalVals.GPS_REF,rssiAll[i][jj].distance]) distance_matrix[i,jj]= distance2D([gps, gpsAll[jj],GlobalVals.GPS_REF,rssiAll[i][jj].distance]) sigma[i,jj] = var_measurement sigma[jj,i] = var_measurement else: test = np.linalg.norm(X-Y) + random.uniform(-10,10) distance_matrix[i,jj] = np.linalg.norm(X-Y) + random.uniform(-10,10) # CHECK THIS!! SHOULD WE ADD NOISE TO MEASUREMENT? sigma[i,jj] = 5 sigma[jj,i] = 5 distance_matrix[jj,i] = distance_matrix[i,jj] return distance_matrix,sigma
def task_5(): def f(x): #return exp(cos(x[0] ** 2) ** 2 - sin(x[1]) ** 2) return -(sin(x[0])**2 + cos(x[1])**2) / (5 + x[0]**2 + x[1]**2) def neg_f(x): return -f(x) x0 = (1, 1) x_min = fmin(neg_f, x0) print(x_min) delta = 4 x_knots = linspace(x_min[0] - delta, x_min[0] + delta, 41) y_knots = linspace(x_min[1] - delta, x_min[1] + delta, 41) X, Y = meshgrid(x_knots, y_knots) Z = zeros(X.shape) for i in range(Z.shape[0]): for j in range(Z.shape[1]): Z[i][j] = f([X[i, j], Y[i, j]]) tab_max = x0 value_max = f(x_min) for i in range(100): xa = [random.uniform(-3, 3), random.uniform(-3, 3)] print("a", xa) s = fmin(f, xa) print("s", s) z = f(s) if z < value_max: tab_max = s value_max = z print(tab_max) print(value_max) ax = Axes3D(figure(figsize=(8, 5))) ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.4) ax.plot([x0[0]], [x0[1]], [f(x0)], color='g', marker='o', markersize=5, label='initial') ax.plot([x_min[0]], [x_min[1]], [f(x_min)], color='b', marker='o', markersize=5, label='final') ax.plot([tab_max[0]], [tab_max[1]], [f(tab_max)], color='r', marker='o', markersize=10, label='best') ax.legend() show()
def test_regress(x): stats = importr('stats') x = random.uniform(0, 1, 100).reshape([100, 1]) y = 1 + x + random.uniform(0, 1, 100).reshape([100, 1]) x_in_r = create_r_matrix(x, x.shape[1]) y_in_r = create_r_matrix(y, y.shape[1]) formula = robjects.Formula('y~x') env = formula.environment env['x'] = x_in_r env['y'] = y_in_r fit = stats.lm(formula) coeffs = stats.coef(fit) resids = stats.residuals(fit) fitted_vals = stats.fitted(fit) modsum = base.summary(fit) rsquared = modsum.rx2('r.squared') se = modsum.rx2('coefficients')[2:4] print "coeffs:", coeffs print "resids:", resids print "fitted_vals:", fitted_vals print "rsquared:", rsquared print "se:", se return (coeffs, resids, fitted_vals, rsquared, se)
def test_multigibbs(): d = 18 Mu = random.uniform(-9, 9, size=d) A = random.uniform(-33, 33, size=(d, d)) A = dot(A, A.T) # crap way of making a positive semidef matrix n = 99999 Sd = multinormal(n, Mu, A) j = d // 3 + 1 Sg = multigibbs(n, Mu, A, j=j) try: # these should be approximately the same print("Expected mean:") print(Mu) print("Expected variance:") print(A) print("built-in multinormal sampler") print("mean (largest absolute deviation):") print(abs(Mu - Sd.mean(axis=0)).max()) print("variance (largest absolute deviation):") print(abs(A - cov(Sd.T)).max()) print("gibbs sampler at j=", j) print("mean (largest absolute deviation):") print(abs(Mu - Sg.mean(axis=0)).max()) print("variance (largest absolute deviation):") print(abs(A - cov(Sg.T)).max()) test_compare2d(Sd, Sg, 0, 1) # import IPython; IPython.embed() #DEBUG finally: return Sd, Sg # so that python -i can do fun things
def gen_uniform(seedxl, seedxh, seedyl, seedyh): ''' Generate uniform coordinates ''' return int(random.uniform(seedxl, seedxh)), int(random.uniform(seedyl, seedyh))
def test_regress(x): stats=importr('stats') x=random.uniform(0,1,100).reshape([100,1]) y=1+x+random.uniform(0,1,100).reshape([100,1]) x_in_r=create_r_matrix(x, x.shape[1]) y_in_r=create_r_matrix(y, y.shape[1]) formula=robjects.Formula('y~x') env = formula.environment env['x']=x_in_r env['y']=y_in_r fit=stats.lm(formula) coeffs = stats.coef(fit) resids = stats.residuals(fit) fitted_vals = stats.fitted(fit) modsum = base.summary(fit) rsquared = modsum.rx2('r.squared') se = modsum.rx2('coefficients')[2:4] print "coeffs:", coeffs print "resids:", resids print "fitted_vals:", fitted_vals print "rsquared:", rsquared print "se:", se return (coeffs, resids, fitted_vals, rsquared, se)
def main2(): ITERATIONS = 10 q= 100 for it in range(ITERATIONS): W = random.uniform(-0.1, 0.1, [q, q]) WI = random.uniform(-.1, .1, [q, 1]) mc = sum(memory_capacity(W, WI, memory_max=200, runs=1)[0]) og = matrix_orthogonality(W) print(og)
def reset(self): """ re-initializes the environment, setting the cart back in a random position. """ if self.randomInitialization: angle = random.uniform(-0.2, 0.2) pos = random.uniform(-0.5, 0.5) else: angle = -0.2 pos = 0.2 self.sensors = (angle, 0.0, pos, 0.0)
def gen_master(x_l, x_h, y_l, y_h): ''' Decide exponential or uniform ''' if random.randint(1, 2, 1) > 1: return gen_exponential(random.uniform(x_l, x_h), random.uniform(y_l, y_h)) else: return gen_uniform(x_l, x_h, y_l, y_h)
def __init__(self, vocab_size, hidden_size, bptt_truncate=4, clip=5, save_path=None): self.vocab_size = vocab_size # The size of the dictionary. self.hidden_size = hidden_size # Initialize the hidden layer # Initialize the input weights self.Wz = sprd.uniform(-sp.sqrt(1. / vocab_size), sp.sqrt(1. / vocab_size), (hidden_size, vocab_size)) self.Wi = sprd.uniform(-sp.sqrt(1. / vocab_size), sp.sqrt(1. / vocab_size), (hidden_size, vocab_size)) self.Wf = sprd.uniform(-sp.sqrt(1. / vocab_size), sp.sqrt(1. / vocab_size), (hidden_size, vocab_size)) self.Wo = sprd.uniform(-sp.sqrt(1. / vocab_size), sp.sqrt(1. / vocab_size), (hidden_size, vocab_size)) # Initialize the recurrent weights self.Rz = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (hidden_size, hidden_size)) self.Ri = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (hidden_size, hidden_size)) self.Rf = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (hidden_size, hidden_size)) self.Ro = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (hidden_size, hidden_size)) # Initialize the peephole weights self.pi = sp.zeros(hidden_size) self.pf = sp.zeros(hidden_size) self.po = sp.zeros(hidden_size) # Initialize the bias weights self.bz = sp.zeros(hidden_size) self.bi = sp.zeros(hidden_size) self.bf = sp.zeros(hidden_size) self.bo = sp.zeros(hidden_size) # Initialize the output layer self.V = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (vocab_size, hidden_size)) self.c = sp.zeros(self.vocab_size) self.param_names = [ 'Wz', 'Wi', 'Wf', 'Wo', 'Rz', 'Ri', 'Rf', 'Ro', 'pi', 'pf', 'po', 'bz', 'bi', 'bf', 'bo', 'V', 'c' ] self.bptt_truncate = bptt_truncate self.clip = clip self.save_path = save_path
def __init__(self, vocab_size, hidden_size, bptt_truncate=4, clip=5, save_path=None): self.vocab_size = vocab_size # The size of the dictionary. self.hidden_size = hidden_size # Initialize the hidden layer # Initialize the input weights self.Wz = sprd.uniform(-sp.sqrt(1. / vocab_size), sp.sqrt(1. / vocab_size), (hidden_size, vocab_size)) self.Wi = sprd.uniform(-sp.sqrt(1. / vocab_size), sp.sqrt(1. / vocab_size), (hidden_size, vocab_size)) self.Wf = sprd.uniform(-sp.sqrt(1. / vocab_size), sp.sqrt(1. / vocab_size), (hidden_size, vocab_size)) self.Wo = sprd.uniform(-sp.sqrt(1. / vocab_size), sp.sqrt(1. / vocab_size), (hidden_size, vocab_size)) # Initialize the recurrent weights self.Rz = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (hidden_size, hidden_size)) self.Ri = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (hidden_size, hidden_size)) self.Rf = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (hidden_size, hidden_size)) self.Ro = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (hidden_size, hidden_size)) # Initialize the peephole weights self.pi = sp.zeros(hidden_size) self.pf = sp.zeros(hidden_size) self.po = sp.zeros(hidden_size) # Initialize the bias weights self.bz = sp.zeros(hidden_size) self.bi = sp.zeros(hidden_size) self.bf = sp.zeros(hidden_size) self.bo = sp.zeros(hidden_size) # Initialize the output layer self.V = sprd.uniform(-sp.sqrt(1. / hidden_size), sp.sqrt(1. / hidden_size), (vocab_size, hidden_size)) self.c = sp.zeros(self.vocab_size) self.param_names = ['Wz', 'Wi', 'Wf', 'Wo', 'Rz', 'Ri', 'Rf', 'Ro', 'pi', 'pf', 'po', 'bz', 'bi', 'bf', 'bo', 'V', 'c'] self.bptt_truncate = bptt_truncate self.clip = clip self.save_path = save_path
def reset(self): if self.randomInitialization: angle = random.uniform(-0.2, 0.2) pos = random.uniform(-0.5, 0.5) else: angle = -0.2 pos = 0.2 self.sensors_sequence = E.tools.RingBuffer(self.critic_model.setting.n_time_steps, ivalue=[0.0] * self.outdim) self.actions_sequence = E.tools.RingBuffer(self.critic_model.setting.n_time_steps, ivalue=[0.0]) self.sensors = [angle, 0.0, pos, 0.0] self.sensors_sequence.append(self.sensors)
def monte_carlo_volume(f, x1, x2, y1, y2, z1, z2, N): counter = 0 for i in range(N): x = random.uniform(x1, x2) y = random.uniform(y1, y2) z = random.uniform(z1, z2) if f(x, y, z): counter += 1 return (x2 - x1) * (y2 - y1) * (z2 - z1) * (counter / N)
def generate_lin_regression_nikolaenko(n, d): """ Generates a synthetic linear regression instance as in "Privacy-Preserving Ridge Regression on Hundreds of Millions of Records" """ X = random.uniform(low=-1, high=1, size=(n, d)) beta = random.uniform(low=-1, high=1, size=d) mu, sigma = 0, 1 # mean and standard deviation e = numpy.array(random.normal(mu, sigma, n)) y = X.dot(beta) + e.T return (X, y, beta, e)
def generate_lin_system(n, d, filepath=None): X = random.randn(n, d) A = 1. / (d * n) * X.T.dot(X) y = random.uniform(low=-1, high=1, size=d) b = A.dot(y) # -10 and 10 are an arbitrary choice here mask_A = random.uniform(low=-10, high=10, size=(d, d)) mask_b = random.uniform(low=-10, high=10, size=d) if filepath: write_system(A, b, y, filepath) return (A, mask_A, b, mask_b, y)
def reset(self): if self.randomInitialization: angle = random.uniform(-0.2, 0.2) pos = random.uniform(-0.5, 0.5) else: angle = -0.2 pos = 0.2 self.t = 0 self.sensors_sequence = RingBuffer(N_CTIME_STEPS, ivalue=[0.0] * 4) self.actions_sequence = RingBuffer(N_CTIME_STEPS, ivalue=[0.0]) self.sensors = (angle, 0.0, pos, 0.0) self.sensors_sequence.append(self.sensors)
def reset(self): if self.randomInitialization: angle = random.uniform(-0.2, 0.2) pos = random.uniform(-0.5, 0.5) else: angle = -0.2 pos = 0.2 self.sensors_sequence = E.tools.RingBuffer( self.critic_model.setting.n_time_steps, ivalue=[0.0] * self.outdim) self.actions_sequence = E.tools.RingBuffer( self.critic_model.setting.n_time_steps, ivalue=[0.0]) self.sensors = [angle, 0.0, pos, 0.0] self.sensors_sequence.append(self.sensors)
def __init__(self, n_features, n_hiddens, bptt_truncate=4): self.n_features = n_features # The size of the dictionary. self.n_hiddens = n_hiddens # Initialize the hidden layer self.U = sprd.uniform(-sp.sqrt(1. / n_features), sp.sqrt(1. / n_features), (n_hiddens, n_features)) self.W = sprd.uniform(-sp.sqrt(1. / n_hiddens), sp.sqrt(1. / n_hiddens), (n_hiddens, n_hiddens)) self.b = sp.zeros(self.n_hiddens) # Initialize the output layer self.V = sprd.uniform(-sp.sqrt(1. / n_hiddens), sp.sqrt(1. / n_hiddens), (n_features, n_hiddens)) self.c = sp.zeros(self.n_features) self.bptt_truncate = bptt_truncate
def disperseRand(self,n): a=[] c=[] i=0 while i<n: xy=[random.uniform(-1,1),random.uniform(-1,1)] if (xy[0]**2+xy[1]**2)<=1: a.append(xy) i+=1 for i in range(0,len(a)): c.append([a[i][0],a[i][1],sqrt(1-(a[i][0]**2+a[i][1]**2))]) for i in range(0,len(c)): #randomly change sign of the z coord if random.randint(0,1): c[i][2]=c[i][2]*-1 return c
def disperseRand(self, n): a = [] c = [] i = 0 while i < n: xy = [random.uniform(-1, 1), random.uniform(-1, 1)] if (xy[0]**2 + xy[1]**2) <= 1: a.append(xy) i += 1 for i in range(0, len(a)): c.append([a[i][0], a[i][1], sqrt(1 - (a[i][0]**2 + a[i][1]**2))]) for i in range(0, len(c)): #randomly change sign of the z coord if random.randint(0, 1): c[i][2] = c[i][2] * -1 return c
def getMaxAction(self, state): """ Return the action with the maximal value for the given state. """ #print argmax(self.getActionValues(state)) # return argmax(self.getActionValues(state)) return random.uniform(-50,50)
def get_distance(n, balloon, offset, Leader, var_measurement, distanceRSSI): distance_matrix = np.zeros((n, n)) sigma = np.zeros((n, n)) for i in np.arange(start=0, stop=n, step=1): if i == Leader: X = np.array([balloon[i].X, balloon[i].Y]) + offset else: X = np.array([balloon[i].X, balloon[i].Y]) for jj in np.arange(start=i, stop=n, step=1): Y = np.array([balloon[jj].X, balloon[jj].Y]) if i == 0 and jj == 1: distance_matrix[i, jj] = distanceRSSI sigma[i, jj] = var_measurement sigma[jj, i] = var_measurement else: distance_matrix[ i, jj] = np.linalg.norm(X - Y) + random.uniform( -10, 10) # CHECK THIS!! SHOULD WE ADD NOISE TO MEASUREMENT? sigma[i, jj] = 5 sigma[jj, i] = 5 distance_matrix[jj, i] = distance_matrix[i, jj] return distance_matrix, sigma
def main(): ITERATIONS = 100 mc = zeros(ITERATIONS) og = zeros(ITERATIONS) #farby = QS = 10 colors = [ [0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1], [1, 0, 1], [0, 1, 1], ] for qpre in range(QS): q = qpre + 2 for it in range(ITERATIONS): W = random.normal(0, 0.1, [q, q]) WI = random.uniform(-.1, .1, [q, 1]) mc[it] = sum(memory_capacity(W, WI, memory_max=200, runs=1, iterations_coef_measure=5000)[0][:q+2]) og[it] = matrix_orthogonality(W) print(qpre, QS, it, ITERATIONS) plt.scatter(og, mc, marker='+', label=q, c=(colors[qpre % len(colors)])) plt.xlabel("orthogonality") plt.ylabel("memory capacity") plt.grid(True) plt.legend() plt.show()
def nPointSeed(image, n): "Seed according to the distribution in the image." # Compute a CDF function across the flattened image imageCDF = image.flatten().cumsum() imageCDF /= 1.0 * imageCDF.max() # Function to turn a random point in the CDF into a random index in the image indexInterpolator = interpolate.interp1d(imageCDF, arange(imageCDF.size)) # Set to collect the UNIQUE indices indexContainer = set() while len(indexContainer) < n: # Generate at most the number of points remaining maxToGenerate = n - len(indexContainer) randomCDFValues = random.uniform( np.finfo(np.float32).eps, 1.0 - np.finfo(np.float32).eps, maxToGenerate) # Back them into indices iInterp = indexInterpolator(randomCDFValues) iInterp = np.round(iInterp).astype(uint32) # Add them to the set indexContainer.update(iInterp) # Break them out of the set iInterp = array(list(indexContainer)) # Compute the equivalent xy xCoords = (iInterp // image.shape[1]).astype(int32) yCoords = (iInterp % image.shape[1]).astype(int32) # Return them glued together. return np.c_[xCoords, yCoords].T
def initialize_x(N, init_type='normal'): ''' Initialized an array of length N filled with probability parameter. init_type: str Accepts 'equal', 'normal', and 'uniform' ''' x = np.zeros(N) initialize = True while initialize: if init_type == 'normal': x = random.normal(p, min(p, (1 - p)) / 2, N) elif init_type == 'uniform': x = random.uniform(pmin, pmax, N) elif init_type == 'equal': x = np.full(N, p) else: print('init_type is wrong') exit() if np.all(x <= pmax) and np.all(x >= pmin): initialize = False else: initialize = True return x
def perturbation(self): """ produce a parameter perturbation """ deltas = random.uniform(-self.epsilon, self.epsilon, self.numParameters) # reduce epsilon by factor gamma self.epsilon *= self.gamma return deltas
def nRandomImagePoint(image, n): # Compute a CDF function across the flattened image imageCDF = image.flatten().cumsum() imageCDF /= 1.0 * imageCDF.max() # Function to turn a random point in the CDF into a random index in the image indexInterpolator = interpolate.interp1d(imageCDF, arange(imageCDF.size)) # Set to collect the UNIQUE indices indexContainer = set() while len(indexContainer) < n: # Generate at most the number of points remaining maxToGenerate = n - len(indexContainer) randomCDFValues = random.uniform(0, 1.0, maxToGenerate) # Back them into indices iInterp = indexInterpolator(randomCDFValues) iInterp = np.round(iInterp).astype(uint32) # Add them to the set indexContainer.update(iInterp) # Break them out of the set iInterp = array(list(indexContainer)) # Compute the equivalent xy xCoords = (iInterp // image.shape[0]).astype(int32) yCoords = (iInterp % image.shape[0]).astype(int32) # Return them glued together. return c_[xCoords, yCoords]
def random_mean_and_matrix_semidefinite(self, random_means, random_covariances, array=True, num_points=1): ''' Definition: Parameters ---------- random_means: Matrix Have in each row a two column vector [(low = a,high = b)] Example: means = [[0,1],[2,3],[6,7]] Where each row indicates in which range the uniform random generator can takes values random_covariances: Matrix Is a square matrix, where each row has a column vector, who has inside a vector with two components Example: A matrix 3x3. covariance = [[[a11,b11],[a12,b12],[a13,b13]],[[a21,b21],[a22,b22],[a23,b23]],[[a31,b31],[a32,b32],[a33,b33]]] Where each row indicates in which range the uniform random generator can takes values a = [[[1,2],[-5,5],[-100,6]],[[135,683],[2,285],[-135,13]],[[58,135],[16,35],[5,68478]]] ''' mean = [] for random_mean in random_means: mean.append(random.uniform(low=random_mean[0], high=random_mean[1])) if array == True: covariance = random.rand( random_means.shape[0], random_means.shape[0]) * ( random_covariances[1] - random_covariances[0]) + random_covariances[0] covariance = covariance * covariance.transpose() else: covariance = [] for random_covariance in random_covariances: covariance.append([]) for points in random_covariance: covariance[-1].append( random.uniform(low=points[0], high=points[1])) covariance = np.array(covariance) #A*A' is a semedefinite matrix covariance = covariance * covariance.transpose() return [mean, multivariate_normal(mean, covariance, num_points)]
def reset(self): """ re-initializes the environment, setting the ship to rest at a random orientation. """ # [h, hdot, v] self.sensors = [random.uniform(-30., 30.), 0.0, 0.0] if self.render: if self.server.clients > 0: # If there are clients send them reset signal self.server.send(["r", "r", "r"])
def reset(self): self.state = random.uniform(2, 2, self.dim) # if self.hasRenderer(): # self.renderer.reset() # self.renderer.updateData((self.state, self.f(self.state))) self.action = zeros(self.dim, float) self.updated = True
def __init__(self, input_length, hidden_length, out_lenght): self.input_length = input_length self.out_lenght = out_lenght self.hidden_length = hidden_length self.centers = [] for i in xrange(hidden_length): self.centers.append(random.uniform(-1, 1, input_length)) self.variance = 1 self.W = random.random((self.hidden_length, self.out_lenght))
def monte_carlo_integral(f, x1, x2, y1, y2, N): counter = 0 for i in range(N): x = random.uniform(x1, x2) y = random.uniform(y1, y2) if 0 < y < f(x): counter += 1 if f(x) < y < 0: counter -= 1 result = (x2 - x1) * (y2 - y1) * (counter / N) if y1 > 0: result += y1 * (x2 - x1) if y2 < 0: result += y2 * (x2 - x1) return result
def keplerSim(tau, e, T0, K, w, sig, tlo, thi, n): dt = (thi - tlo) / (n - 1) data = zeros((n, 2), Float) data[:, 0] = r.uniform(tlo, thi, (n)) # for i in range(n): # data[i,0] = tlo + i*dt data[:, 1] = v_rad(K, w, tau, e, T0, data[:, 0]) + r.normal(0., sig, (n)) print "Created data." return data
def integrate(f, a, b, N): xrand = random.uniform(a, b, N) sum = 0. for x in xrand: sum += f(x) return sum * (b - a) / N
def reset(self): """ re-initializes the environment, setting the ship to rest at a random orientation. """ # [h, hdot, v] self.sensors = [random.uniform(-30., 30.), 0.0, 0.0] if self.render: if self.server.clients > 0: # If there are clients send them reset signal self.server.send(["r","r","r"])
def keplerSim(tau, e, T0, K, w, sig, tlo, thi, n): dt = (thi-tlo)/(n-1) data = zeros((n,2), Float) data[:,0] = r.uniform(tlo, thi, (n)) # for i in range(n): # data[i,0] = tlo + i*dt data[:,1] = v_rad(K, w, tau, e, T0, data[:,0])+r.normal(0.,sig,(n)) print "Created data." return data
def nRandomImagePoint(image, n): imageCDF = 1.0 * image.cumsum(axis=0) colSum = image.sum(axis=0) # Avoid zero division rootnanny colSum[colSum == 0] = 1 imageCDF /= colSum prob = (1.0 * colSum.cumsum()) / colSum.sum() pointContainer = [] xInterp = interpolate.interp1d(prob, linspace(0.0, 1.0, prob.shape[0])) while len(pointContainer) < n: xN = random.uniform(0.0, 1.0) x = xInterp(xN) * imageCDF.shape[1] print xN, x # Values of the columns straddling point colBelow = imageCDF[:, floor(x)] colAbove = imageCDF[:, ceil(x)] delta = ceil(x) - x # Weighted average of the two CDF's colCDF = (colBelow * delta) + (colAbove * (1.0 - delta)) if colCDF.sum() == 0: continue yInterp = interpolate.interp1d(colCDF, linspace(0.0, 1.0, colCDF.shape[0])) y = yInterp(random.uniform(0.0, 1.0)) * colCDF.shape[0] if (y == 0) or (y == colCDF.shape[0]): continue pointContainer.append((x, y)) print array(pointContainer).shape return array(pointContainer).T
def gopt_min(fun, bounds, n_warmup = 1000, n_local = 10): """ Global optimization (minimization) based on: 1. Sampling 'n_warmup' uniformly random points 2. Local optimization (L-BFGS-B) at 'n_local' + 1 points - 'n_local' uniformly random starting points - best location from step (1) as starting point Input: fun : vectorized function bounds : list of tuples [(min, max), (min, max), ... ] n_warmup : number of warmup steps n_local : number of local optimizations """ # Input dimension dim = len(bounds) # Warm up samples x_warmup = scale_to_bounds(random.uniform(size = (n_warmup, dim)), bounds) # Best from warmup y = fun(x_warmup) idx = y.argmin() y_best = y[idx] x_best = x_warmup[idx] # Run local optimization if n_local > 0: # Starting points for local optimization x_local = scale_to_bounds(random.uniform(size = (n_local, dim)), bounds) for x in x_local: res = minimize(fun = fun, x0 = x, bounds = bounds, method = 'L-BFGS-B') if res.success: if res.fun[0] < y_best: y_best = res.fun[0] x_best = res.x return x_best, y_best
def reset(self): """ re-initializes the environment, setting the plane back at a random distance from the center of the thermal """ if self.randomInitialization: planeDist = random.uniform(0, self.maxPlaneStartDist) # The distance the plane is from the center of the thermal else: planeDist = self.maxPlaneStartDist # Initialize sensors self.sensors = planeDist
def perturbate(self): """ perturb the parameters. """ # perturb the parameters and store the deltas in dataset deltas = random.uniform(-self.epsilon, self.epsilon, self.module.paramdim) # reduce epsilon by factor gamma self.epsilon *= self.gamma self.ds.append('deltas', deltas) # change the parameters in module (params is a pointer!) params = self.module.params params[:] = self.original + deltas
def generate_lin_regression(n, d, sigma): """ See cgd.pdf """ X = random.randn(n, d) for i in xrange(d): X[:, i] /= numpy.max(numpy.abs(X[:, i])) beta = random.uniform(low=0, high=1, size=d) e = numpy.array(random.normal(0, sigma, n)) y = X.dot(beta) + e.T return (X, y, beta, e)
def monte_carlo_basic(f, x0, xN, N): xrand = np.zeros(N) for i in range(len(xrand)): xrand[i] = random.uniform(x0, xN) integral = 0.0 for i in range(N): integral += f(xrand[i]) return (xN-x0)/float(N)*integral
def random_mean_and_matrix_semidefinite(self,random_means,random_covariances,array = True, num_points = 1): ''' Definition: Parameters ---------- random_means: Matrix Have in each row a two column vector [(low = a,high = b)] Example: means = [[0,1],[2,3],[6,7]] Where each row indicates in which range the uniform random generator can takes values random_covariances: Matrix Is a square matrix, where each row has a column vector, who has inside a vector with two components Example: A matrix 3x3. covariance = [[[a11,b11],[a12,b12],[a13,b13]],[[a21,b21],[a22,b22],[a23,b23]],[[a31,b31],[a32,b32],[a33,b33]]] Where each row indicates in which range the uniform random generator can takes values a = [[[1,2],[-5,5],[-100,6]],[[135,683],[2,285],[-135,13]],[[58,135],[16,35],[5,68478]]] ''' mean = [] for random_mean in random_means: mean.append(random.uniform(low=random_mean[0],high=random_mean[1])) if array == True: covariance = random.rand( random_means.shape[0] , random_means.shape[0])*(random_covariances[1] - random_covariances[0]) + random_covariances[0] covariance = covariance * covariance.transpose() else: covariance = [] for random_covariance in random_covariances: covariance.append([]) for points in random_covariance: covariance[-1].append(random.uniform(low=points[0],high=points[1])) covariance = np.array(covariance) #A*A' is a semedefinite matrix covariance = covariance*covariance.transpose() return [mean,multivariate_normal(mean,covariance,num_points)]
def perform_kmeans(init, iteration, seed): init_array = [] im_width = len(image[0]) im_height = len(image) + len(image[0]) / 2 for i in range(0, 9): init_array.append([random.uniform(0, im_width - 1), random.uniform(0, im_height - 1)]) init_array = np.array(init_array) kmeans_part = KMeans(n_clusters=9, init=init_array, max_iter=1, n_init=1) data_copy = df.ix[:, 0:2] kmeans_part.fit(data_copy) init_array_part = kmeans_part.cluster_centers_ target_data = df.ix[:, 0:2] target_kmeans = KMeans(n_clusters=9, init='random') target_kmeans.fit(target_data) data = df.ix[:, 0:2] #print(im_width, ' ', im_height) if init == 'random': kmeans = KMeans(n_clusters=9, init=init_array, max_iter=iteration, n_init=1, random_state=seed) elif init == 'forgy': kmeans = KMeans(n_clusters=9, init='random', max_iter=iteration, random_state=seed) elif init == 'kmeans++': kmeans = KMeans(n_clusters=9, init='k-means++', max_iter=iteration, n_init=1, random_state=seed) elif init == 'randompartition': kmeans = KMeans(n_clusters=9, init=init_array_part, max_iter=iteration, n_init=1, random_state=seed) kmeans.fit(data) print('score for init: ' , init , ' is ' , v_measure_score(target_kmeans.labels_, kmeans.labels_)) '''
def randomweights(self): """ Randomize weights due to Bottou proposition. """ nofw = len(self.conec) weights = zeros(nofw, 'd') for w in xrange(nofw): trg = self.conec[w,1] n = len(self.graph.predecessors(trg)) bound = 2.38 / sqrt(n) weights[w] = random.uniform(-bound, bound) self.weights = weights self.trained = False
def initialize(input): # to initialize the MLE parameters a = random.uniform(0, 150) b = random.uniform(0, 150) c = random.uniform(0, 150) miu = [ input[:, a], input[:, b], input[:, c], ] # set the initial mu to be the same as randomly chosen input from the original inputs cov = [ np.matrix(np.eye(4)), np.matrix(np.eye(4)), np.matrix(np.eye(4)), ] # set the initial covariances to be identities a = random.random() b = random.random() c = random.random() a1 = a / a + b + c b1 = b / a + b + c c1 = c / a + b + c pai = [a1, b1, c1] # set the initial pi to be three normalized random figure, and sums up to 1 return [pai, miu, cov]
def query(self,state,bins): # just return a random action if self.burn: return random.uniform(-100,100) #print state evidence = dict(StateA=state[0], StateB=state[1],StateC=state[1], StateD=state[3]) #evidence = dict(theta=state[0],thetaV=state[1],s=state[2],sV=state[3],Reward=0) #evidence = dict(theta=state[0], thetaPrime=state[1],s=state[2], sPrime=state[3]) # sample the network given evidence result = self.net.randomsample(10, evidence) evidence["Reward"] = -1 # result2 = self.net.randomsample(10, evidence) #print evidence, result[0],"\n" # return result[0]["Action"] #bins = array([0.0, 1.0, 2.0, 3.0]) # t = pd.cut(ac, 4,labels=False) # counts = bincount(t) # r = argmax(counts) a = [] av = [] for x in result: a.append(x["Reward"]) av.append(x["Action"]) # av2 = [] # for x in result2: # av.append(x["Action"]) i = argmax(a) #position of highest reward avg = mean(av) #avg2 = mean(av2) #print abs(avg-avg2), avg, avg2 # so the action with highest reward is action = result[i]["Action"] return avg #print avg,action,"yo" #print "action:", action return action
def drop_object(self): """Drops a random object (box, sphere) into the scene.""" # choose between boxes and spheres if random.uniform() > 0.5: (body, geom) = self._create_sphere(self.space, 10, 0.4) else: (body, geom) = self._create_box(self.space, 10, 0.5, 0.5, 0.5) # randomize position slightly body.setPosition((random.normal(-6.5, 0.5), 6.0, random.normal(-6.5, 0.5))) # body.setPosition( (0.0, 3.0, 0.0) ) # randomize orientation slightly #theta = random.uniform(0,2*pi) #ct = cos (theta) #st = sin (theta) # rotate body and append to (body,geom) tuple list # body.setRotation([ct, 0., -st, 0., 1., 0., st, 0., ct]) self.body_geom.append((body, geom))
def reset(self): self.state = random.uniform(2, 2, self.dim) self.action = zeros(self.dim, float) self.updated = True
#!/usr/bin/env python import sys sys.path.append("../..") import scipy as sp from scipy import io from scipy import random import psc585 from psc585 import ps4 foo = ps4.FinalModel.from_mat("FinalModel.mat", "FinalData.mat") random.seed(4403205) # Initial Conditional Choice probabilities # Government Pg = random.uniform(0, 1, (foo.n, foo.k)) Pg /= Pg.sum(1)[:, newaxis] # Provinces Pp0 = random.uniform(0, 1, (foo.n, foo.k)) Pp = sp.concatenate([sp.vstack((Pp0[:, i], 1 - Pp0[:, i])).T for i in range(Pp0.shape[1])], 1) # Initial Parameters theta0 = random.normal(0, 1, (5, 1)) theta = foo.npl(Pp, Pg, verbose=True)
underblock=[0,0,0] # initiate a new block under construction for i in range(1,L+1): if len(block['levelX'.replace('X',str(i))])>ll: # if num of block in level i is bigger than ll, pop out most old two blocks block1=block['levelX'.replace('X',str(i))].pop(0) block2=block['levelX'.replace('X',str(i))].pop(0) blockmerged=normmerge(block1,block2) # merge two blocks if i==L: L+=1 block['levelX'.replace('X',str(L))]=[] block['levelX'.replace('X',str(i+1))].append(blockmerged) # put the merged block in level i+1 if k>N: normremove(k,N,L) # only start to remove block when rows passed is more than a window size predefined wi=linalg.norm(A[k-1,:m])**2 # norm of row k if wi==0: # if row k is all zero continue ui=random.uniform(0,1,l) # generate l uniform distributed random number pi=ui**(1/wi) # set p, row k have each pi for each level i for i in range(1,l+1):# for each level i from 1 to l if len(priority['copyX'.replace('X',str(i))])==0: priority['copyX'.replace('X',str(i))].append((k,pi[i-1],A[k-1].reshape(1,m))) # start time, p, row else: for item in priority['copyX'.replace('X',str(i))]: if k-item[0]>N or item[1]<pi[i-1]: # remove rows that are out of window or have a p smaller than p of row k in this level i priority['copyX'.replace('X',str(i))].remove(item) priority['copyX'.replace('X',str(i))].append((k,pi[i-1],A[k-1].reshape(1,m))) # append row k to level i if k>=N and np.remainder(k,jg)==0: # measure err between window and sketch B=max(priority['copy1'],key=lambda x: x[1])[2] #sketch num=len(priority['copy1']) # number of rows stored for i in range(2,l+1): B=np.append(B,max(priority['copyX'.replace('X',str(i))],key=lambda x: x[1])[2],axis=0) num=num+len(priority['copyX'.replace('X',str(i))])
def main(): """ Today's agenda: i) generate initial matrices ii) simulate iii) learn gauss iv) simulate v) plot & compare """ W = random.normal(0, sigma, [q, q]) WI = random.uniform(-sigma, sigma, [q, 1]) X = random.uniform(-1, 1, [q]) a = ones([q]) b = zeros([q]) S = zeros([q, ITERATIONS]) S2 = zeros([q, ITERATIONS]) ahist = zeros([q, ITERATIONS]) # i?) simulate U = random.uniform(-1, 1, [ITERATIONS]) for it in range(ITERATIONS): net = dot(WI, U[it].reshape(1)) + dot(W, X) X = tanh( a * net + b) S[:, it] = X # iii) learn U = random.uniform(-1, 1, [ITERATIONS]) for it in range(ITERATIONS): net = dot(WI, U[it].reshape(1)) + dot(W, X) Y = tanh( a * net + b) a, b = ipgauss(net, Y, a, b) ahist[:, it] = a X = Y # i?) simulate2 U = random.uniform(-1, 1, [ITERATIONS]) for it in range(ITERATIONS): net = dot(WI, U[it].reshape(1)) + dot(W, X) X = tanh( a * net + b) S2[:, it] = X # iv) view histogram BINCNT=10 def show_histograms(): for yplt in range(gridy): print("\r {}/{}".format(yplt, gridy), end="") for xplt in range(gridx): indx = yplt*gridx + xplt pyplot.subplot(gridy, gridx, indx) std1 = std(S[indx, :]) std2 = std(S2[indx, :]) pyplot.hist(S[indx,:], bins=BINCNT, normed=True, label="{}:bfr std1={:6.4f}".format(indx,std1)) pyplot.hist(S2[indx,:], bins=BINCNT, normed=True, label="{}:atr std2={:6.4f}".format(indx,std2)) pyplot.grid(True) pyplot.legend() print("std1={0}, std2={1}".format(std1, std2)) #pyplot.legend() pyplot.show() show_histograms() for ciara in range(q): pyplot.plot(range(ITERATIONS), ahist[ciara, :], label="%d"%ciara) pyplot.legend() pyplot.show() print("a = %s" % a) print("b = %s" % b) print("done.")
# # Initial state. x = 0 f_x = pdf(x) # # Scale parameter of uniform distribution. s = float(sys.argv[1]) # # Vector to store the output. chain = zeros(Ttot) chain[0] = x # # Number of misses miss = 0 # # Start the loop to produce each sample of the chain. for t in range(1, Ttot): y = x + s * random.uniform(-s, s) f_y = pdf(y) alpha = 1.0 if f_y > f_x else f_y / f_x if alpha > 1.0 or (random.uniform() < alpha): # # Make the transition. x = y f_x = f_y else: # # The trial was not accepted. Stay at the same state, # # and increment the "miss" counter. miss += 1 # # Store current state. chain[t] = x mu = mean(chain) sig2 = var(chain)
def run_delayed_ssa(system): """ SSA with delays and custom event functions """ #vars used in the simulation time = 0 #unitless end_time = system['sim-time'] species = system['participants'] parameters = system['parameters'] events = system['events'] prop_funcs = {} exec_funcs = {} props = {} delays = {} last_exec_time = {} #return values time_array = [] species_array = [] #populate results array time_array = [time] row = [0]*len(species) species_names = [''] * len(species) #create species vars so that rate code can be executed i = 0 for name in species: species_names[i] = name exec( name + '=' + str(species[name]) ) row[i] = species[name] i += 1 species_array.append(row) #create parameter vars so that rate code can be executed for name in parameters: exec( name + '=' + str(parameters[name]) ) #create (compile) functions from input strings for rates and events for name in events: if events[name].get('delay'): delays[name] = events[name]['delay'] else: delays[name] = 0.0 last_exec_time[name] = -1 props[name] = 0.0 prop_funcs[name] = compile("props['" + name + "'] = " + str(events[name]['propensity']), 'prop_funcs_'+name, 'exec') exec_funcs[name] = compile(events[name]['consequence'], 'exec_funcs_'+name, 'exec') #MAIN LOOP while time < end_time: #calculate propensities for name in props: exec(prop_funcs[name]) if delays[name] > 0 and delays[name] + last_exec_time[name] < time: print(name) props[name] = 0.0 #calculate total of all propensities total_prop = 0 for name in props: total_prop += props[name] u = random.uniform(0,total_prop) usum = 0 lucky = None for name in props: usum += props[name] if usum > u: lucky = name break #fire that reaction if lucky: last_exec_time[lucky] = time exec(exec_funcs[lucky]) row = [0]*len(species) i = 0 for name in species: row[i] = eval(name) i += 1 time_array.append(time) species_array.append(row) #update next time using exp distrib if total_prop == 0.0: #jump to next delay lowest_delay = inf for name in props: if delays[name] > 0 and delays[name] < lowest_delay: lowest_delay = delays[name] time += lowest_delay else: dt = random.exponential(1.0/total_prop) time += dt #END MAIN LOOP result = {'time':time_array, 'participants':species_array, 'headers': species_names} return result