def display(*args): global x, y, move_x, move_y, NUMDOTS, NUMDOTS2, MAX_AGE, age glClearColor(0.0, 0.0, 0.0, 0.0) glClear(GL_COLOR_BUFFER_BIT) glColor3f(1.0, 1.0, 0.0) x = x + move_x y = y + move_y age = age + 1 which = greater(age, MAX_AGE) x = choose(which, (x, random(NUMDOTS))) y = choose(which, (y, random(NUMDOTS))) age = choose(which, (age, 0)) x = choose(greater(x, 1.0), (x, x - 1.0)) # very cool - wraparound y = choose(greater(y, 1.0), (y, y - 1.0)) x2 = random(NUMDOTS2) y2 = random(NUMDOTS2) v = concatenate( (transpose(array([x, y])), transpose(array([x - .005, y + .005])), transpose(array([x + .005, y - .005])), transpose(array([x2, y2])))) glVertexPointerd(v) glEnableClientState(GL_VERTEX_ARRAY) glDrawArrays(GL_POINTS, 0, len(v)) glDisableClientState(GL_VERTEX_ARRAY) glFlush() glutSwapBuffers()
def _testSpeednD(self,nr_points, dim): points = 100*numpy.random(dim, nr_points) vals = 100*numpy.random(nr_points) #build the model lut_ss = LutStrategy() lut_model = LutFactory().build(points, vals, lut_ss) # test the model target_points = points + 0.1 cnt=0 starttime=time.time() while cnt < 2: yhat = lut_model.simulate(target_points) cnt=cnt+1 elapsed=time.time()-starttime nb_lookups=nr_points * cnt lookups_per_sec=nb_lookups / elapsed print "%d simulations (%d-D) of %d points took %f seconds (%d lookups/sec)" % ( cnt, dim , nr_points, elapsed, lookups_per_sec)
def initNetwork(): global network, imW, imH, iWeights, netDepth, maskResH, maskResH global layerLen, iBias, maxPixVal for i in range(netDepth): if i == 0: iWeights.append([[ random() * 2 - 1 for x in range(round(1 / (maskResH * maskResW))) ] for y in range(layerLen[i])]) #iWeights.append([[((np.random.random_sample()*2)-1) for x in range(round(1/(maskResH*maskResW)))] for y in range(layerLen[i])]) #iWeights.append([[0 for x in range(round(1/(maskResH*maskResW)))] for y in range(layerLen[i])]) else: iWeights.append( [[random() * 2 - 1 for x in range(layerLen[i - 1])] for y in range(layerLen[i])]) #iWeights.append([[((np.random.random_sample()*2)-1) for x in range(layerLen[i-1])] for y in range(layerLen[i])]) #iWeights.append([[0 for x in range(layerLen[i-1])] for y in range(layerLen[i])]) iBias.append([random() * 2 - 1 for x in range(layerLen[i])]) #iBias.append([(np.random.random_sample()*2-1) for x in range(layerLen[i])]) #iBias.append([0 for x in range(layerLen[i])]) #iErrors.append(0 for x in range(layerLen[i])]) tempLayer = { 'layer': None, 'weights': iWeights[i], 'bias': iBias[i], 'layerType': 'flat', 'error': [0 for x in range(layerLen[i])], 'prevLayer': None, } network.append(tempLayer)
def on_epoch_end(self): 'make X-train sample list' """ 1. go over each class 2. select randomly #n_sample samples of each class 3. add selection list to dict with class as key """ self.class_selection_path = np.array([]) self.labels = np.array([]) for class_i in self.classes: samples_class_i = randsomsample( range(0, self.samples[class_i]), self.number_samples_per_class_to_pick) self.class_selection_path = np.append(self.class_selection_path, [ self.filename_dict[f"{self.class_indices[class_i]}_{i}"] for i in samples_class_i ]) self.labels = np.append( self.labels, [self.class_indices[class_i] for i in samples_class_i]) self.list_IDs = self.class_selection_path 'Updates indexes after each epoch' self.indexes = np.arange(len(self.list_IDs)) if self.shuffle == True: if self.seed: np.random(self.seed) np.random.shuffle(self.indexes)
def init_args(args, **kwargs): cfg = kwargs params = Map({}) assert os.path.isdir(args.dump_dir), 'Dump dir must be a valid dir' args.ckpt_save_dir = os.path.join(args.dump_dir, 'checkpoints') args.log_dir = os.path.join(args.dump_dir, 'logs') for dir in [args.ckpt_save_dir, args.log_dir]: if not os.path.isdir(dir): os.makedirs(dir, exist_ok=True) args.is_cuda = torch.cuda.is_available() args.device = torch.device('cuda' if args.is_cuda else 'cpu') if args.is_cuda: torch.cuda.empty_cache() cudnn.deterministic = True # os.environ["CUDA_VISIBLE_DEVICES"] if args.seed is not None: random.seed(args.seed) np.random(args.seed) torch.manual_seed(args.seed) if args.is_cuda: torch.cuda.manual_seed(args.seed) # don't pass it as args since it can't be serialized # https://discuss.pytorch.org/t/how-to-debug-saving-model-typeerror-cant-pickle-swigpyobject-objects/66304 params.tensorboard = SummaryWriter(log_dir=args.log_dir) cfg.update(vars(args)) return Map(cfg), params
def __init__(self, center, radius, theshold, maxDepth): self.isLeaf = False self.center = center self.radius = radius self.threshold = theshold self.maxDepth = maxDepth # self.color = (1., 0., 0.) # self.color = (randint(0,255), randint(0, 255), randint(0, 255)) self.color = (random(), random(), random()) # print "self.color = ", self.color self.children = [None, None, None, None, None, None, None, None] # self.boundsOffsetTable = array([[-0.5, -0.5, -0.5], # [+0.5, -0.5, -0.5], # [-0.5, +0.5, -0.5], # [+0.5, +0.5, -0.5], # [-0.5, -0.5, +0.5], # [+0.5, -0.5, +0.5], # [-0.5, +0.5, +0.5], # [+0.5, +0.5, +0.5]]) self.boundsOffsetTable = array( [ [-0.5, -0.5, -0.5], [+0.5, -0.5, -0.5], [-0.5, -0.5, +0.5], [+0.5, -0.5, +0.5], [-0.5, +0.5, -0.5], [+0.5, +0.5, -0.5], [-0.5, +0.5, +0.5], [+0.5, +0.5, +0.5], ] )
def setdata(self, X, V): A = self.bialtprodeye(2*self.F.J_coords) """Note: p, q <= min(n,m)""" self.data.Brand = 2*(random((A.shape[0],self.data.p))-0.5) self.data.Crand = 2*(random((A.shape[1],self.data.q))-0.5) self.data.B = zeros((A.shape[0],self.data.p), float) self.data.C = zeros((A.shape[1],self.data.q), float) self.data.D = zeros((self.data.q,self.data.p), float) U, S, Vh = linalg.svd(A) self.data.b = U[:,-1:] self.data.c = transpose(Vh)[:,-1:] if self.update: self.data.B[:,1] = self.data.b self.data.C[:,1] = self.data.c U2, S2, Vh2 = linalg.svd(c_[r_[A, transpose(self.data.C[:,1])], r_[self.data.B[:,1], [[0]]]]) self.data.B[:,2] = U2[0:A.shape[0],-1:] self.data.C[:,2] = transpose(Vh2)[0:A.shape[1],-1:] self.data.D[0,1] = U2[A.shape[0],-1] self.data.D[1,0] = transpose(Vh2)[A.shape[1],-1] else: # self.data.B = eye(self.data.Brand.shape) # self.data.C = eye(self.data.Crand.shape) # USE OF RANDOM self.data.B = self.data.Brand self.data.C = self.data.Crand
def _init_(self, num_bias_weights, input_weights): self.values = [] self.biases = [] for i in range(num_bias_weights): self.biases.append(np.random(0, 1)) for i in range(num_bias_weights): self.output_weights.append(np.random(0, 1)) self.input_weights = input_weights
def init_weights(self): self.weights = {} self.weights['W'] = np.random((self.n_visible, self.n_hidden), dtype=np.float64) self.weights['A'] = np.random((self.n_condition, self.n_visible), dtype=np.float64) self.weights['B'] = np.random((self.n_condition, self.n_hidden), dtype=np.float64) self.weights['a_stat'] = np.zeros((self.n_visible), dtype=np.float64) self.weights['b_stat'] = np.zeros((self.n_hidden), dtype=np.float64) return
def default_weight_initializer(self): """initialize weight using a gaussianj distribution, mu=0, std=1. The first layer is the iput later so we dont set any biases for those""" self.biases = [np.random(y, 1) for y in self.sizes[1:]] self.weights = [ np.random(y, x) / np.sqrt(x) for x, y in zip(self.sizes[:-1], self.sizes[1:]) ]
def randPointUnitBall(d): if d == 1: return 2 * random(size=(1, )) - 1 r = random()**(1 / d) theta = 2 * pi * random() p = randPointUnitBall(d - 1) return r_[r * cos(theta), r * sin(theta) * p / numpy.linalg.norm(p)]
def __init__(self, number_of_classes): self.input_images = tf.placeholder( shape=[None, 227, 227, 3], dtype=tf.float64, name="input_images", ) conv_1_kernel = tf.Variable(np.random.sample((11, 11, 3, 96)), dtype=tf.float64, name="conv_1_kernel") conv_1 = tf.nn.conv2d(self.input_images, conv_1_kernel, strides=(4, 4, 4, 4), dtype=tf.float64, name="conv_1") conv_1_activation = tf.nn.relu(conv_1, name="conv_1_activation") conv_1_max_pool = tf.nn.max_pool(conv_1_activation, ksize=(1, 3, 3, 1), name="conv_1_max_pool") conv_2_kernel = tf.Variable(np.random.sample((5, 5, 96, 256)), name="conv_2_kernel") conv_2 = tf.nn.conv2d(conv_1_max_pool, conv_2_kernel, name="conv_2") conv_2_activation = tf.nn.relu(conv_2, name="conv_2_activation") conv_2_max_pool = tf.nn.max_pool(conv_2_activation, ksize=(1, 3, 3, 1), name="conv_2_max_pool") conv_3_kernel = tf.Variable(np.random((3, 3, 256, 384)), name="conv_3_kernel") conv_3 = tf.nn.conv2d(conv_2_max_pool, conv_3_kernel, strides=(1, 1, 1, 1), name="conv_3") conv_3_activation = tf.nn.relu(conv_3, name="conv_3_activation") conv_4_kernel = tf.Variable(np.random((3, 3, 256, 384)), name="conv_4_kernel") conv_4 = tf.nn.conv2d(conv_3_activation, conv_4_kernel, strides=(1, 1, 1, 1), name="conv_4") conv_4_activation = tf.nn.relu(conv_4, name="conv_3_activation") conv_5_kernel = tf.Variable(np.random((3, 3, 256, 384)), name="conv_5_kernel") conv_5 = tf.nn.conv2d(conv_4_activation, conv_5_kernel, strides=(1, 1, 1, 1), name="conv_5") conv_5_activation = tf.nn.relu(conv_5, name="conv_3_activation") conv_5_max_pool = tf.nn.max_pool(conv_5_activation, ksize=(1, 3, 3, 1), name="conv_5_activation") tf.nn.fla
def rand_unicirc(): t = 2 * pi * random() u = random() + random() if u > 1: r = 2 - u else: r = u return array([r * cos(t), r * sin(t)])
def update_state(): newly_infected.a = False global has_infection # visit the nodes in random order vs = list(g.vertices()) shuffle(vs) for v in vs: if random() < x: p[v] = 0.0 newly_infected[v] = True elif has_infection[v] == True: ns = list(v.out_neighbours()) if len(ns) > 0: for w in ns: if random() < p[w]: # chance de ser infectado newly_infected[w] = True p[w] = 0.0 if (p[v] < 1.0): p[v] += recovery_rate state[v] = [p[v], p[v], p[v], 1.0] has_infection = cp.deepcopy(newly_infected) #with open("plot.txt", "a") as myfile: # myfile.write(str(counter["S"])+","+str(counter["I"])+","+str(counter["R"])+"\n") #plot_values["S"].append(counter["S"]) #plot_values["I"].append(counter["I"]) #plot_values["R"].append(counter["R"]) # Filter out the recovered vertices #g.set_vertex_filter(removed, inverted=True) # The following will force the re-drawing of the graph, and issue a # re-drawing of the GTK window. win.graph.regenerate_surface() win.graph.queue_draw() #ax1.plot(range(len(plot_values["S"])), plot_values["S"],color='b') #ax1.plot(range(len(plot_values["I"])), plot_values["I"],color='r') #ax1.plot(range(len(plot_values["R"])), plot_values["R"],color='y') #fig1.canvas.draw() # if doing an offscreen animation, dump frame to disk if offscreen: global count pixbuf = win.get_pixbuf() pixbuf.savev(r'./frames/sirs%06d.png' % count, 'png', [], []) if count > max_count: sys.exit(0) count += 1 # We need to return True so that the main loop will call this function more # than once. return True
def rpn_target(all_anchors, inside_inds,gt_labels, gt_boxes): # keep inside anchors anchors = all_anchors[inside_inds, :] if Debug: print('anchors.shape ' + anchors.shape) labels = np.empty((len(inside_inds),) dtype=np.float32) labels.fill(-1) overlaps = bbox_overlaps( np.ascontiguousarray(anchors, dtype=np.float32), np.ascontiguousarray(gt_boxes, dtype=np.float32)) # indices of most possible labels for each anchors argmax_overlaps = overlaps.argmax(overlaps, axis=1) max_overlaps = overlaps[np.arange(len(inside_inds)), argmax_overlaps] gt_argmax_overlaps = overlaps.argmax(overlaps, axis=0) gt_max_overlaps = overlaps[gt_argmax_overlaps, np.arange(overlaps.shape[1])] gt_argmax_overlaps = np.where(overlaps==gt_max_overlaps)[0] # label 0 for background, 1 for object and -1 for nothing labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0 # for each gt, the anchor with highest overlap labels[gt_argmax_overlaps] = 1 # labels[max_overlaps > cfg.TRAIN.RPB_POSITIVE_OVERLAP] = 1 # subsample num_pos = int(cfg.TRAIN.RPN_POS_FRACTION*cfg.TRAIN.RPN_BATCH_SIZES) pos_inds = np.where(labels == 1)[0] if (len(pos_inds) > num_pos): disable_inds = random( pos_inds, size=(len(pos_inds)-num_pos), replace=False) labels[disable_inds] = -1 num_neg = cfg.TRAIN.RPN_BATCH_SIZES - np.sum(labels == 1) neg_inds = np.where(labels == 1)[0] if (len(neg_inds) > num_neg): disable_inds = random( neg_inds, size=(len(neg_inds)-neg_pos), replace=False) labels[disable_inds] = -1 idx_label = np.where(labels != - 1)[0] idx_pos = np.where(labels == 1)[0] inds = inside_inds[idx_label] labels = labels[idx_label] pos_inds = inside_inds[idx_pos] pos_anchors = anchors[idx_pos] pos_gt_boxes = (gt_boxes[argmax_overlaps])[idx_pos] targets = bbox(pos_gt_boxes, pos_anchors) return inds, pos_inds, labels, targets
def update_state(): newly_infected.a = False removed.a = False # visit the nodes in random order vs = list(g.vertices()) shuffle(vs) for v in vs: if state[v] == I: if random() < r: state[v] = R elif state[v] == S: if random() < x: state[v] = I else: ns = list(v.out_neighbours()) if len(ns) > 0: w = ns[randint(0, len(ns))] # choose a random neighbour if state[w] == I: state[v] = I newly_infected[v] = True elif random() < s: state[v] = S if state[v] == R: removed[v] = True if state[v] == S: if I in [state[w] for w in v.out_neighbours()]: vertex_sfcs[v] = Simg_fear else: vertex_sfcs[v] = Simg else: vertex_sfcs[v] = Iimg # Filter out the recovered vertices g.set_vertex_filter(removed, inverted=True) # The following will force the re-drawing of the graph, and issue a # re-drawing of the GTK window. win.graph.regenerate_surface(lazy=False) win.graph.queue_draw() # if doing an offscreen animation, dump frame to disk if offscreen: global count pixbuf = win.get_pixbuf() pixbuf.savev(r'./frames/zombies%06d.png' % count, 'png', [], []) if count > max_count: sys.exit(0) count += 1 # We need to return True so that the main loop will call this function more # than once. return True
def update_state(): newly_infected.a = False removed.a = False # visit the nodes in random order vs = list(g.vertices()) shuffle(vs) for v in vs: if state[v] == I: if random() < r: state[v] = R elif state[v] == S: if random() < x: state[v] = I else: ns = list(v.out_neighbours()) if len(ns) > 0: w = ns[randint(0, len(ns))] # choose a random neighbour if state[w] == I: state[v] = I newly_infected[v] = True elif random() < s: state[v] = S if state[v] == R: removed[v] = True if state[v] == S: if I in [state[w] for w in v.out_neighbours()]: vertex_sfcs[v] = Simg_fear else: vertex_sfcs[v] = Simg else: vertex_sfcs[v] = Iimg # Filter out the recovered vertices g.set_vertex_filter(removed, inverted=True) # The following will force the re-drawing of the graph, and issue a # re-drawing of the GTK window. win.graph.regenerate_surface() win.graph.queue_draw() # if doing an offscreen animation, dump frame to disk if offscreen: global count pixbuf = win.get_pixbuf() pixbuf.savev(r'./frames/zombies%06d.png' % count, 'png', [], []) if count > max_count: sys.exit(0) count += 1 # We need to return True so that the main loop will call this function more # than once. return True
def mutate(p): if random() < 0.5: return random(), random() else: dx = normal(0, 0.0001) dy = normal(0, 0.0001) x = p[0] + dx y = p[1] + dy x = x - 1 if x > 1 else x + 1 if x < 0 else x y = y - 1 if y > 1 else y + 1 if y < 0 else y return x, y
def RandSelectFocalPoint(self): objId = random.randint(0, self.GetObjectsCount()-1) faceId = random.randint(0, self.GetObjectFacesCount(objId)-1) pt1, pt2 ,pt3 = self.GetVertices(objId, faceId) c1 = numpy.random() c2 = numpy.random() c3 = numpy.random() c = c1 + c2 + c3 pt = (c1/c) * pt1 + (c2/c) * pt2 + (c3/c) * pt3 return pt
def setdata(self, A): """Note: p, q <= min(n,m)""" self.data.Brand = 2*(random((A.shape[0],self.data.p))-0.5) self.data.Crand = 2*(random((A.shape[1],self.data.q))-0.5) self.data.D = zeros((self.data.q,self.data.p), float) if self.update: U, S, Vh = linalg.svd(A) self.data.B = U[:,-1*self.data.p:] self.data.C = transpose(Vh)[:,-1*self.data.q:] else: self.data.B = self.data.Brand self.data.C = self.data.Crand
def FB_simulate(self): seeds = list(range(1, 5)) self.set_seed(seeds) count = { 'S': len(seeds), 'I': self.number_of_nodes() - len(seeds), 'R': 0 } print(count) print("=========================") t_max = 1000 for time in range(t_max): for i in self.nodes(): # while count['S'] > 0: if self.node[i]['name'] != 'S': continue # active: Speader for n in self.neighbors(i): # passive: Ignorant if self.node[n]['name'] != 'I': continue if random() < alpha: # passive: I --> S or R if random() < p: # passive: I --> S self.node[n]['name'] = 'S' count['I'] -= 1 count['S'] += 1 print(count) else: # passive: I --> R self.node[n]['name'] = 'R' count['I'] -= 1 count['R'] += 1 print(count) # passive: Spreader or Stifler elif random() < lamda: # active: S --> R self.node[i]['name'] = 'R' count['S'] -= 1 count['R'] += 1 print(count) if count['S'] == 0: spread_time = time return (self, spread_time, count)
def subsample_imbalanced(X_enhancer, X_promoter, y, positive_subsample_frac): n = np.shape(y_train)[0] # sample size (i.e., number of pairs) # indices that are positive and selected to be retained or negative to_keep = (np.random(n) < positive_subsample_frac) or (y == 1) return X_enhancer[to_keep, :], X_promoter[to_keep, :], y[to_keep]
def buildArrays(): a = arange(0, n) vertex = shuffle(cos(2 * pi * a / n), sin(2 * pi * a / n)) vertex.shape = (n, 2) color = random(n * 3) color.shape = (n, 3) return vertex, color
def get_random_number(): m = random() choice = [25, 75, 125, 175, 225, 275, 325, 375, 425, 525] probabilities = [ 0.24, 0.23, 0.18, 0.14, 0.11, 0.036, 0.02, 0.0103, 0.0103, 0.005155 ] return (float(choices(choice, probabilities)[0]) * m)
def step(self): # logpability and loglike for stoch's current value: logp = sum([stoch.logp for stoch in self.stochs]) + self.indicator.logp loglike = self.loglike # Sample a candidate value for the value and indicator of the stoch. self.propose() # logpability and loglike for stoch's proposed value: logp_p = sum([stoch.logp for stoch in self.stochs]) + self.indicator.logp # Skip the rest if a bad value is proposed if logp_p == -Inf: for stoch in self.stochs: stoch.revert() return loglike_p = self.loglike # test: test_val = logp_p + loglike_p - logp - loglike test_val += self.inv_q(self.indicator) test_val += self.q(self.indicator, self._u) if self.Jacobian is not None: test_val += self.Jacobian(self.indicator, self._u, **self.stoch_dict) if log(random()) > test_val: for stoch in self.stochs: stoch.revert
def allocate_data_to_agents(self, type='lin'): ''' :param type: lin - linear space between attributes or rnd - randomly distributed ''' attr = self.p_feat if type == 'lin': means = np.linspace(np.min(self.data_train[[attr]].values), np.max(self.data_train[[attr]].values), self.n_agents + 2)[1:-1] elif type == 'rnd': means = np.random(np.min(self.data_train[[attr]].values), np.max(self.data_train[[attr]].values), self.n_agents) agt_data_rows = {i: [] for i in range(self.n_agents)} for index, row in self.data_train.iterrows(): Pr = to_probability(1 / (abs(row[attr] - means) + 0.000001)) i = np.random.choice(range(self.n_agents), p=Pr) agt_data_rows[i].append(index) self.agents_data = \ {i: self.data_train.iloc[agt_data_rows[i], :].reset_index(drop=True) for i in range(self.n_agents)} if _debug_: print('allocated data to agents') return self.agents_data
def generatePoint(self, i): """ Generates a new point through mutation and crossover. """ # Select 3 distinct indices different from i between 0 and np-1 indices=arange(self.Np) indices=concatenate((indices[:i], indices[i:]), axis=0) indices=permutation(indices) a, b, c = indices[0], indices[1], indices[2] # Get the point (x) x=self.population[i] # Generate mutant (y) y=self.population[a]+self.w*(self.population[b]-self.population[c]) # Place it inside the box self.bound(y) # Generate ndim random numbers from 0..1 uvec=random(self.ndim) # Crossover x and y, use components of y with probability self.pc yind=where(uvec<self.pc)[0] z=x.copy() z[yind]=y[yind] return z
def generate_harmonic_oscillators(self, number_ = None): if number_ == None: self.harmonic_oscillators = \ self.x_range*np.random.random(len(self.harmonic_oscillators)) \ - self.x_range/2. else: self.harmonic_oscillators = self.x_range*np.random(number_) - self.x_range/2.
def _init_state_value(self, s_name, randomized=True): if not self._is_state_in_Q(s_name): self.Q[s_name], self.E[s_name] = {}, {} for action in range(self.action_space.n): default_v = random() / 10 if randomized is True else 0.0 self.Q[s_name][action] = default_v self.E[s_name][action] = 0.0
def next_slice(self): """Get a random slice of a file, together with its start position and ID. Populates self.snd_slice, self.mel_slice, and self.mask""" picks = np.random(0, self.n_total_samples, self.batch_size) for vpos, b in enumerate(picks): file_i = util.greatest_lower_bound(self.voffset, vpos) last_in = self.n_snd_elem[file_i] - 1 last_out = self.n_samples[file_i] - 1 sam_i = vpos - self.voffset[file_i] mel_in_b, mel_in_e = rf.get_rfield(self.mel_in, self.dec_out, sam_i, sam_i, last_out) dec_in_b, dec_in_e = rf.get_rfield(self.dec_in, self.dec_out, sam_i, sam_i, last_out) out_b, out_e = rf.get_ifield(self.ae_wav_in, self.dec_out, snd_in_b, snd_in_e, last_in) snd_off = self.snd_offset[file_i] mel_off = self.mel_offset[file_i] self.snd_slice[b] = self.snd_data[snd_off + dec_in_b:snd_off + dec_in_e + 1] self.mel_slice[b] = self.mel_data[mel_off + mel_in_b:mel_off + mel_in_e + 1] self.mask[b].zero_() self.mask[b, sam_i - out_b] = 1 assert self.mask.size()[1] == out_e - out_b
def setdata(self, A): """Note: p, q <= min(n,m)""" self.data.Brand = 2*(random((A.shape[0],self.data.p))-0.5) self.data.Crand = 2*(random((A.shape[1],self.data.q))-0.5) self.data.D = zeros((self.data.q,self.data.p), float) if self.update: U, S, Vh = linalg.svd(A) self.data.B = U[:,-1*self.data.p:] self.data.C = transpose(Vh)[:,-1*self.data.q:] else: # self.data.B = eye(self.data.Brand.shape) # self.data.C = eye(self.data.Crand.shape) # USE OF RANDOM self.data.B = self.data.Brand self.data.C = self.data.Crand
def isample_without_replacement(self, k): """ Return a sample of size k, without replacement k <= n O(n) Use a heap to keep track of selection. """ if k > len(self.weights): raise ValueError("Sample size should be <= %d" % len(self.weights)) heap = [] random = self.random.random_sample weights = random(len(self.weights)) ** (1.0/self.weights) for ix, weight in enumerate(weights): if ix < k: heapq.heappush(heap, (weight, ix)) else: if heap[0][0] < weight: heapq.heapreplace(heap, (weight, ix)) # now sort the heap -- this is to make things repeatable heap.sort() # return permuted indices return(self.random.permutation([x[1] for x in heap]))
def isample_without_replacement(self, k): """ Return a sample of size k, without replacement k <= n O(n) Use a heap to keep track of selection. """ if k > len(self.weights): raise ValueError("Sample size should be <= %d" % len(self.weights)) heap = [] random = self.random.random_sample weights = random(len(self.weights))**(1.0 / self.weights) for ix, weight in enumerate(weights): if ix < k: heapq.heappush(heap, (weight, ix)) else: if heap[0][0] < weight: heapq.heapreplace(heap, (weight, ix)) # now sort the heap -- this is to make things repeatable heap.sort() # return permuted indices return (self.random.permutation([x[1] for x in heap]))
def step(self): # logpability and loglike for stoch's current value: logp = sum([stoch.logp for stoch in self.stochs]) + self.indicator.logp loglike = self.loglike # Sample a candidate value for the value and indicator of the stoch. self.propose() # logpability and loglike for stoch's proposed value: logp_p = sum([stoch.logp for stoch in self.stochs]) + self.indicator.logp # Skip the rest if a bad value is proposed if logp_p == -Inf: for stoch in self.stochs: stoch.revert() return loglike_p = self.loglike # test: test_val = logp_p + loglike_p - logp - loglike test_val += self.inv_q(self.indicator) test_val += self.q(self.indicator,self._u) if self.Jacobian is not None: test_val += self.Jacobian(self.indicator,self._u,**self.stoch_dict) if log(random()) > test_val: for stoch in self.stochs: stoch.revert
def __init__(self, sizes): self.sizes = sizes self.n_layers = len(sizes) self.learningRate = 2 # Note: typically needs to be lower when using 'sigm' activation function and non-normalized inputs. self.momentum = 0.5 self.scaling_learningRate = 1 # Scaling factor for the learning rate (each epoch) self.weightPenaltyL2 = 0 # L2 regularization self.nonSparsityPenalty = 0 # Non sparsity penalty self.sparsityTarget = 0.05 # Sparsity target self.dropoutFraction = 0 # Dropout level self.activation_function = 'tanh_opt' # Activation functions of hidden layers: 'sigm' (sigmoid) or 'tanh' (optimal tanh). self.output = 'sigm' # output unit 'sigm' (=logistic), 'softmax' and 'linear' self.testing = False self.W = [None for _ in range(1, self.n_layers)] self.vW = [None for _ in range(1, self.n_layers)] self.p = [None for _ in range(1, self.n_layers)] self.n_outputs = self.sizes[-1] for i in range(1, self.n_layers): # weights and weight momentum # +1 in shape for bias self.W[i - 1] = (np.random(self.sizes[i], self.sizes[i - 1]+1) - 0.5) * 2 * 4 * sqrt(6 / (self.sizes[i] + self.sizes[i - 1])) self.vW[i - 1] = np.zeros_like(self.W[i - 1]) # average activations self.p[i]= np.zeros(1, self.sizes[i])
def main(): f = Frame() f.pack(side='top', expand=1) quit = Button(f, text='Quit', command=sys.exit) quit.pack(side='top') o = Opengl(width=400, height=400, double=1) a = arange(0, n) vertex = shuffle(cos(2 * pi * a / n), sin(2 * pi * a / n)) vertex.shape = (n, 2) # vertex1 = shuffle(0.5*cos(2*pi*a/n), 0.5*sin(2*pi*a/n)) # color=ones((n, 3), 'i') # color[0]=[1,0,0] # color[1]=[1,1,0] # color[1]=[1,0,0] color = random(n * 3) color.shape = (n, 3) glVertexPointerd(vertex) glColorPointerd(color) glEnableClientState(GL_VERTEX_ARRAY) glEnableClientState(GL_COLOR_ARRAY) o.redraw = redraw o.pack(side='top', expand=1, fill='both') o.mainloop()
def __init__(self, antenae1=None, antenae2=None): if antenae1 is None or antenae2 is None: self.theta1 = 2*pi*random() self.theta2 = 2*pi*random() self.l1 = randrange(Antennae._MIN_LENGTH, Antennae._MAX_LENGTH) self.l2 = randrange(Antennae._MIN_LENGTH, Antennae._MAX_LENGTH) else: # TODO: make a little random self.theta1 = (antenae1.theta1 + antenae2.theta1)/2 self.theta2 = (antenae1.theta2 + antenae2.theta2)/2 self.l1 = (antenae1.l1 + antenae2.l1)/2 self.l2 = (antenae1.l2 + antenae2.l2)/2 print 'Warning: antennae inheritance not implemented'
def hamil_i(numsites, site, J, V, m): #Params #numsites - positive integer for the number of sites in spin chain #site - non-negative integer representing the site operated on #J,V,h - arbitrary constants that define system potentials #returns (qutip.Qobj) hamiltonian for fermions jumping from site #to site oper_i means combined hilbert space operator acting at #site i, with remaining sites untouched by the total operator #build ith state operators sigplus_i = operator_i('sigma plus', numsites, site) sigminus_i = operator_i('sigma minus', numsites, site) number_i = operator_i('number', numsites, site) sigma_zi = operator_i('sigmaz', numsites, site) sigplus_ip1 = operator_i('sigma plus', numsites, site+1) sigminus_ip1 = operator_i('sigma minus', numsites, site+1) number_ip1 = operator_i('number', numsites, site+1) #crate parts of the hamiltonian #i.e. H_i = J*(s+_i*s-_1+1 + s-_i*s+_i+1) + V*m_i*m_1+1 + m_i*h_i rand_dist = 2*m*(random()-0.5) jump = J*(sigplus_i*sigminus_ip1 + sigminus_i*sigplus_ip1) interaction_potential = V*(number_i*number_ip1) site_potential = rand_dist*sigma_zi #write this into operator_i #add pieces together H_i = jump + interaction_potential + site_potential return H_i
def buildArrays( ): a = arange(0,n) vertex = shuffle(cos(2*pi*a/n), sin(2*pi*a/n)) vertex.shape = (n, 2) color = random(n*3) color.shape = (n, 3) return vertex,color
def evaluate(self): # load raw data X_data_raw, Y_gender, Y_smile = self.read_data() score = self.combined.evaluate( [X_data_raw, np.random(0, 1, (2723, 100, 1))], [X_data_raw, Y_gender]) print("score", score)
def leave_one_out(dataset, labels, dataset_name): test_image_idx = np.random(0, 19) test_indexes = None if dataset_name == 'utrecht': start = test_image_idx * constant.N_SLICE_UTRECHT end = test_image_idx * constant.N_SLICE_UTRECHT + constant.N_SLICE_UTRECHT test_image_idxes = np.arange(start, end) elif dataset_name == 'singapore': start = test_image_idx * constant.N_SLICE_SINGAPORE end = test_image_idx * constant.N_SLICE_SINGAPORE + constant.N_SLICE_SINGAPORE test_image_idxes = np.arange(start, end) elif dataset_name == 'amsterdam': start = test_image_idx * constant.N_SLICE_AMSTERDAM end = test_image_idx * constant.N_SLICE_AMSTERDAM + constant.N_SLICE_AMSTERDAM test_indexes = np.arange(start, end) else: print('Dataset name not found for LOO cross validation.') return None all_indexes = np.arange(0, len(dataset) - 1) train_indexes = np.array(set(all_indexes).difference(set(test_indexes))) train_data, test_data = dataset[train_indexes], dataset[test_indexes] train_labels, test_labels = labels[train_indexes], labels[test_indexes] return train_data, test_data, train_labels, test_labels
def sim_mh(): # MH x0 = random(), random() x = x0 z = [] M = 100000 c = 0 for i in range(0, M): accept, x = mh_step(x) if accept: c += 1 z.append(x) print("MH Acceptance rate:", 100 * c / M, "%") plt.hexbin([x for x, y in z], [y for x, y in z])
def main(): f = Frame() f.pack(side = 'top', expand = 1) quit = Button(f, text = 'Quit', command = sys.exit) quit.pack(side = 'top') o = Opengl(width = 400, height = 400, double = 1) a = arange(0,n) vertex = shuffle(cos(2*pi*a/n), sin(2*pi*a/n)) vertex.shape = (n, 2) # vertex1 = shuffle(0.5*cos(2*pi*a/n), 0.5*sin(2*pi*a/n)) # color=ones((n, 3), 'i') # color[0]=[1,0,0] # color[1]=[1,1,0] # color[1]=[1,0,0] color = random(n*3) color.shape = (n, 3) glVertexPointerd(vertex) glColorPointerd(color) glEnableClientState(GL_VERTEX_ARRAY) glEnableClientState(GL_COLOR_ARRAY) o.redraw = redraw o.pack(side = 'top', expand = 1, fill = 'both') o.mainloop()
def find_in_list(distances, sizes, exposure, within_radius): """ Corresponds to the check_and_choose function. Given a unit, ask which exposure might accept it as a target. """ for target_idx in within_radius: for exp in exposure: difference=abs(distances[target_idx]- exp.exposure_distance) if exp.target_idx is None: exp.target_idx=target_idx exp.target_distance=distances[target_idx] exp.difference=difference exp.cumulative_size+=sizes[target_idx] elif abs(difference-exp.difference)<scenario.epsilon: chance=np.random()< scenario.size[target_idx]/exp.cumulative_size allowed=zoned[target_idx] is False and quarantined[target_idx] is False if allowed and chance: exp.target_idx=target_idx exp.target_distance=distances[target_idx] exp.difference=difference exp.cumulative_size+=sizes[target_idx] elif difference < exp.difference: exp.target_idx=target_idx exp.target_distance=distances[target_idx] exp.difference=difference exp.cumulative_size+=sizes[target_idx] else: pass
def layerModes(): N = mypaintlib.TILE_SIZE dst = np.zeros((N, N, 4), 'uint16') # rgbu dst_values = [] r1 = range(0, 20) r2 = range((1 << 15)/2-10, (1 << 15)/2+10) r3 = range((1 << 15)-19, (1 << 15)+1) dst_values = r1 + r2 + r3 src = np.zeros((N, N, 4), 'int64') alphas = np.hstack(( np.arange(N/4), # low alpha (1 << 15)/2 - np.arange(N/4), # 50% alpha (1 << 15) - np.arange(N/4), # high alpha np.randint((1 << 15)+1, size=N/4), # random alpha )) #plot(alphas); show() src[:, :, 3] = alphas.reshape(N, 1) # alpha changes along y axis src[:, :, 0] = alphas # red src[:, N*0/4:N*1/4, 0] = np.arange(N/4) # dark colors src[:, N*1/4:N*2/4, 0] = alphas[N*1/4:N*2/4]/2 + np.arange(N/4) - N/2 # 50% lightness src[:, N*2/4:N*3/4, 0] = alphas[N*2/4:N*3/4] - np.arange(N/4) # bright colors src[:, N*3/4:N*4/4, 0] = alphas[N*3/4:N*4/4] * np.random(N/4) # random colors # clip away colors that are not possible due to low alpha src[:, :, 0] = np.minimum(src[:, :, 0], src[:, :, 3]).clip(0, 1 << 15) src = src.astype('uint16') #figure(1) #imshow(src[:,:,3], interpolation='nearest') #colorbar() #figure(2) #imshow(src[:,:,0], interpolation='nearest') #colorbar() #show() src[:, :, 1] = src[:, :, 0] # green src[:, :, 2] = src[:, :, 0] # blue for name in dir(mypaintlib): if not name.startswith('tile_composite_'): continue junk1, junk2, mode = name.split('_', 2) print('testing', name, 'for invalid output') f = getattr(mypaintlib, name) for dst_value in dst_values: for alpha in [1.0, 0.999, 0.99, 0.90, 0.51, 0.50, 0.49, 0.01, 0.001, 0.0]: dst[:] = dst_value dst_has_alpha = False src_opacity = alpha f(src, dst, dst_has_alpha, src_opacity) #imshow(dst[:,:,0], interpolation='nearest') #gray() #colorbar() #show() errors = dst > (1 << 15) assert not errors.any() print('passed')
def mh_step(x, t=0): y = mutate(x) f1 = f(y, t) f2 = f(x, t) a = min(1, 0 if f1 == 0 else 1 if f2 == 0 else f1 / f2) if random() < a: return True, y return False, x
def random_graph(self): self.clear_graph() for x in xrange(self.N): v = self.graph.add_vertex() for x in xrange(self.N): for y in xrange(x): if (random() < self.p and x is not y): self.graph.add_edge(self.graph.vertex(x), self.graph.vertex(y))
def countsToSamples( counts ) : values=[] bin=-180 for value in counts: for i in arange(value) : values.append(bin+random()*step) bin += step return values
def solve(self): new_X = np.random() loss_history = [] loss_history.append(new_X) for i in xrange(num_iterations): x = new_X - 2 * self.t * np.dot(self.A.T, np.dot(self.A, new_X) - self.b) new_X = LinearInverseSolver.t_operator(self.eta * self.t, x) loss_history.append(new_X) return loss_history
def _resample(self): indices = [] C = [0.0] + [sum(self.w[: i + 1]) for i in range(self.num_particles)] u0, j = random(), 0 for u in [(u0 + i) / self.num_particles for i in range(self.num_particles)]: while u > C[j]: j += 1 indices.append(j - 1) self.particles = self.particles[indices, :]
def resample(self): cumulative_sum = np.cumsum(self.weights) cumulative_sum[-1] = 1. # avoid round-off error indexes = np.searchsorted(cumulative_sum, random(self.N)) # resample according to indexes self.particles = self.particles[indexes] self.weights = self.weights[indexes] self.weights /= np.sum(self.weights) # normalize
def run(filename=None, directory=None, revanConfigFile=None, seed=None): if filename == None and directory == None: print "*** No filename or directory provide ***" print "Please provide a filename, a list of filenames, or a directory name" return # Check to see if the user supplied a directory. If so, get the list of source files if directory != None: sourcefiles = glob.glob(directory + '/*.source') # Check if the user supplied a single file vs a list of files if isinstance(filename, list) == False and filename != None: sourcefiles = [filename] # Get the revan config file if one was not provided revanConfigFile = glob.glob(directory + '/*.cfg')[0] # Generate a list of seeds seeds = int(numpy.random(len(sourcefiles))*100) # Loop through each of the source files and run them through cosima and revan, and analyze their output with EventAnalysis for sourcefile, seed in zip(sourcefiles, seeds): # Generate the cosima command command_cosima = "cosima -s %s %s" % (seed, sourcefile) # Issued the cosima command print command_cosima output = os.system(command_cosima) # Generate the sim filename simfile = sourcefile.replace('.source','.sim') # Generate the revan command command_revan = "revan -f %s -c %s" % (simfile, revanConfigFile) # Issued the revan command print command_revan output = os.system(command_revan) # Extract the number of triggered and simulated events EventAnalysis.getTriggerEfficiency(filename=simfile) # Generate the .tra filename trafile = simfile.replace('.sim', '.tra') # Analyze the results of the .tra file # EventAnalysis.performCompleteAnalysis(filename=trafile) return
def resample(weights): n = len(weights) indices = [] C = [0.] + [sum(weights[:i+1]) for i in range(n)] u0, j = random(), 0 for u in [(u0+i)/n for i in range(n)]: while u > C[j]: j+=1 indices.append(j-1) return indices
def r_smpl(w): num = len(w) index = [] count = [0.] + [sum(w[:i+1]) for i in range(num)] e0, k = random(), 0 for e in [(e0+i)/num for i in range(num)]: while e > count[k]: k+=1 index.append(k-1) return index
def resample(weights): n = len(weights) indices = [] P = [0.] + [sum(weights[:i+1]) for i in range(n)] # accumulerade summan equlient to discrete integration (really bad complexity, bootstrap please) u0, j = random(), 0 for u in [(u0+i)/n for i in range(n)]: while u > P[j]: #find index of first P[j] greater then u j+=1 indices.append(j-1) return indices