def reproduce(self, parent1, parent2, index1, index2): """Generate offspring using random crossover.""" # verbose print('mating parent {0} + parent {1} -> children {2} and {3}'.format( parent1.index, parent2.index, index1, index2)) # random crossover point crossover = random.randrange(0, len(parent1.genotype)) # construct children child1 = sample.Sample( parent1.genotype[0:crossover] + parent2.genotype[crossover:], gen=parent1.gen + 1, # note: gen comes from parent 1 index=index1, parents=[parent1, parent2], fitness_func=self.model.fitness_func) child2 = sample.Sample(parent2.genotype[0:crossover] + parent1.genotype[crossover:], gen=parent1.gen + 1, index=index2, parents=[parent1, parent2], fitness_func=self.model.fitness_func) # return children return child1, child2
def parseFile(self, filePath, metaPath): data = self.loadFile(filePath) meta = self.loadMeta(metaPath) if data is None or meta is None: return None metaDict, names = self.parseMeta(meta) samples = [] for name in names: samples.append(sample.Sample(name)) for line in data: if line[0].strip()[0] == '#': #It's just a comment continue else: # It's a data line orgName = line[-1] for i in range(0, len(samples)): samples[i].addOrg(orgName, float(line[i + 1])) groups = {"All Samples": grouping.Group("All Samples", samples)} for s in samples: sampleName = s.getName() attrs = metaDict[sampleName] for attrName, attrVal in attrs.iteritems(): groupName = attrName + ": " + attrVal if groupName not in groups: groups[groupName] = grouping.Group(groupName, [s]) else: groups[groupName].addSamples([s]) return samples, list(groups.values())
def take_samples(self): for i in range(6): print "======", i, "========" m = self.get_measurement() t = get_time() self.samples.append(sample.Sample(m, t)) time.sleep(delay)
def genDistribution(xMean=0, xSD=1, yMean=0, ySD=1, n=20, namePrefix=''): samples = [] for s in range(n): x = random.gauss(xMean, xSD) y = random.gauss(yMean, ySD) samples.append(sample.Sample(namePrefix + str(s), [x, y])) return samples
def onclick(event): # Creating a new point and finding the k nearest neighbours new = sample.Sample('', [event.xdata, event.ydata], '') new_knn = knn(new, data, K) # Assigning a label to the new point based on the k neighbours label_votes = {l: 0 for l in LABELS} for p in new_knn: label_votes[p.getLabel()] += 1 max_label = sorted(label_votes, key=label_votes.get, reverse=True)[0] new.setLabel(max_label) # add the new data in! data.append(new) # Plotting the point & updating the graph pylab.scatter([new.getFeatures()[0]], \ [new.getFeatures()[1]], \ label=new.getLabel(), \ marker=MARKERS[LABELS.index(new.getLabel())], \ color=COLORS[LABELS.index(new.getLabel())]) pylab.draw() print(new) for n in new_knn: print(n)
def __iadd__(self,other): """ Add matter to self, returns sample object """ # check if you are adding two things that are matter if not issubclass(type(other),Matter): raise TypeError('Sample can only add Matter objects') result = sample.Sample(contents=[self,other]) return result
def normalizeSampleManual(samples, critterSubset, scale): retval = [] if sample <= 0: return None for sampleInst in samples: newSample = sample.Sample(sampleInst.getName()) retval.append(newSample) for critter in critterSubset: newSample.addOrg(critter, sampleInst[critter] / scale) return retval
def compute_mean(data): dim = data[0].dimensionality() mean = sample.Sample('mean', [0.0] * dim) for d in data: # IMPLEMENTATION # ---- start your code ---- # pass # ---- end of your code --- # return mean / len(data)
def take_samples(self): print "== Creating Initial Pattern ==" for i in range(24): m = self.get_measurement() if m == -1: break print "[", i, "]", m t = get_time() self.samples.append(sample.Sample(m, t)) time.sleep(delay)
def make_data(n): C = [random.choice(LABELS) for x in range(n)] linear_data = [ sample.Sample(C[x], [x / (float(SCALE)), x / (float(SCALE))], C[x]) for x in range(n) ] mean = 0 std = DEV * SCALE noise = genDistribution(mean, std, mean, std, n, '') data = [linear_data[i] + noise[i] for i in range(n)] return data
def make_data(n, scale=1): """ A simple y = x curve, with noisy displacement on both both x and y axis; change scale to change the range """ linear_data = [ sample.Sample('', [float(x) / scale, float(x) / scale], '') for x in range(n) ] noise = util.genDistribution(xSD=0.3, ySD=0.3, n=n) data = [linear_data[i] + noise[i] for i in range(n)] return data
def onclick(event): # Creating a new point and finding the k nearest neighbours new = sample.Sample('', [event.xdata, event.ydata], '') knn.knn(new, data, K) data.append(new) pylab.scatter([new.getFeatures()[0]], \ [new.getFeatures()[1]], \ label=new.getLabel(), \ marker=MARKERS[LABELS.index(new.getLabel())], \ color=COLORS[LABELS.index(new.getLabel())]) pylab.draw()
def computeCentroid(self): ''' return an instance of Sample, its features should be the center of all the samples in the cluster ''' #return helper.computeCentroid(self) dim = self.samples[0].dimensionality() centroid = sample.Sample('centroid', [0.0]*dim) for e in self.samples: centroid += e centroid /= len(self.samples) return centroid
def normalizeSampleRatio(samples, critterSubset): retval = [] for sampleInst in samples: scale = 0 for critter in critterSubset: scale = max(scale, sampleInst[critter]) newSample = sample.Sample(sampleInst.getName()) retval.append(newSample) if scale == 0: scale = 1 for critter in critterSubset: newSample.addOrg(critter, sampleInst[critter] / scale) return retval
def compute_std(data, mean): dim = data[0].dimensionality() std = sample.Sample('std', [0.0] * dim) for d in data: # IMPLEMENTATION # ---- start your code ---- # # you only need to sum the square of the # difference between d and the mean pass # ---- end of your code --- # # we've done the square root and averaging for you return std.power(0.5) / (len(data)**0.5)
def build_phrases(self, sample_frame): phrases = sample_map[sample_frame]['rephrases'] phrases.append(key) samples = [] vals_per_question = 10 for phrase in phrases: for i in range(vals_per_question): s = sample.Sample(phrase, self.name) s.populate_entities(entity_map) samples.append(s) for s in samples: self.build_phrase(s)
def normalizeSampleAbsolute(samples, critterSubset): scale = 0 for sampleInst in samples: for org in critterSubset: scale = max(scale, sampleInst[org]) if scale == 0: scale = 1 retval = [] for sampleInst in samples: newSample = sample.Sample(sampleInst.getName()) retval.append(newSample) for critter in critterSubset: newSample.addOrg(critter, sampleInst[critter] / scale) return retval
def train(df): model = network_builder() exp_rep = memo.ExperienceReplay(mem_len=mem_len, disc=gamma) sample = samp.Sample(df, v1_max, reward_col) epochs = sample.get_epochs() loss_df = pd.DataFrame({ 'step': range(1, v1_max * epochs + 1), 'loss': range(1, v1_max * epochs + 1) }) for ep in range(epochs): loss = 0. next_state = sample.get_init_sample(ep) # duplicate state prevention in initial step w/ start from 1 for i in range(1, v1_max): state = next_state q = model.predict(state) action = np.argmax(q[0]) # print("Action in ", ep, "and step", i) reward = sample.get_reward(ep, i) next_state = sample.get_sample(ep, i) game_over = sample.is_over(ep) # store experience # ([s, a, r, s'], game_over) exp_rep.store([state, action, reward, next_state], game_over) # retrieve proper features and targets inputs, targets = exp_rep.get_batch(model, batch_size=batch_size) # loss += model.train_on_batch(inputs, targets)[0] loss += model.train_on_batch(inputs, targets) print("before store: ", str(ep * v1_max + i), loss) plotter(loss_df, ep * v1_max + i, loss) plt.plot('loss', 'step', data=loss_df, marker='o') plt.ylabel('Loss over episodes of training') plt.show() # print("Epoch {:4d}/{:4d} | Loss {:.4f}".format(ep, epochs, loss)) model_export(model)
def run_prevalence_thread(manifest_queue, platform, paired_end, result, working_dir, out_dir, produce_prevalence): """ Single-threaded generator of pools of reads. Args: manifest_queue: The parallel queue onto which to push manifests. platform: One of "roche", "illumina" or "ion". paired_end: Are we simulating paired_end data? result: The sequencing error result from which to simulate prevs. working_dir: The folder in which to place temporary files. out_dir: The final output directory to place the file. prevalence: The prevalence at which to run (default is all) Returns: True on completion. """ print "Simulating reads from sequence sets." prevalences = DEFAULT_PREVALENCES if produce_prevalence is not None: prevalences = [produce_prevalence] # remove any prevalences that fall within error bars. prevalences_to_remove = [p for p in prevalences \ if prevalence.range_unusable(plat.platforms[platform], p)] if len(prevalences_to_remove) > 0: print "Removing prevalences ", prevalences_to_remove, \ "as they are ambiguous at the given error rate. " \ "Please rerun at a different prevalence." prevalences = [ p for p in prevalences if p not in prevalences_to_remove ] for required_prevalence in prevalences: hashed_filename = prevalence.produce_prevalence( required_prevalence, plat.platforms[platform], paired_end, result['susceptible_fq'], result['susceptible_sam'], result['resistant_fq'], result['resistant_sam'], result['sequence'], working_dir, out_dir) manifest_queue.put( sample.Sample(ntpath.basename(hashed_filename), result['sequence'], required_prevalence))
def create_samples(self): """ This method creates list of samples out of data @:return: our_samples: list of samples """ our_samples = [] for i in range(len(self.data['sample'])): genes_list = [] for key in self.data.keys(): if (key == 'sample'): continue if (key == 'label'): continue genes_list.append(self.data[key][i]) sample = s.Sample(i, genes_list, self.data['label'][i]) our_samples.append(sample) return our_samples
def run_model(): d_f = dataset_import() smpl = samp.Sample(d_f, v1_max, reward_col) epochs = smpl.get_epochs() policy = np.zeros((epochs * v1_max, 1)) model = model_import() for ep in range(epochs): loss = 0. next_state = smpl.get_init_sample(ep) game_over = smpl.is_over(ep) # plt.imshow(input_t.reshape((grid_size,) * 2), interpolation='none', cmap='gray') # plt.imshow(next_state, interpolation='none', cmap='gray') # plt.show() # plt.savefig("./figs/%03d.png" % ep) for i in range(1, v1_max): state = next_state q = model.predict(state) # action = np.argmax(q[0]) action = np.argmax(q[0]) # if action == 1: # print("Action in ", ep, "and step", i) policy[ep * v1_max + i] = pick_action(q[0]) next_state = smpl.get_sample(ep, i) game_over = smpl.is_over(ep) # plt.imshow(input_t.reshape((grid_size,) * 2), interpolation='none', cmap='gray') # plt.imshow(next_state, interpolation='none', cmap='gray') # plt.show() # plt.savefig("./figs/%03d.png" % (ep + 1)) policy_export(d_f, policy)
def main(args): # Generating a specimen if args.verbose: print('Generating a new sample named {}'.format(args.samplename)) print('Starting the script with options {}'.format(args)) specimen = sample.Sample(sample_name=args.samplename) specimen.image_count = len(args.filename) # Loading image paths if args.verbose: print('Loading the specified images.') specimen.load_images(args.filenames) # Cropping if args.crop: # Square detection if args.verbose: print('Starting the cropping process.') print('Detecting the square(s).') specimen.detect_square() # Cropping if args.verbose: print('Cropping the images.') specimen.crop() # Saving if args.crop: if args.verbose: print('Saving the cropped images') specimen.save_images()
def render_webpage(args={}): """ Configure and return a template for the Siegel modular forms pages. """ info = dict(args) # info['learnmore'] = [ ('Siegel modular forms', 'http://en.wikipedia.org/wiki/Siegel_modular_form')] info['learnmore'] = [] bread = [('Siegel modular forms', url_for('ModularForm_GSp4_Q_top_level'))] if len(args) == 0: return render_template("ModularForm_GSp4_Q_navigation.html", title='Siegel Modular Forms', bread=bread, **info) # possible keys for the URL group = args.get('group') character = args.get('character') weight = args.get('weight') level = args.get('level') form = args.get('form') page = args.get('page') weight_range = args.get('weight_range') # set info info['group'] = group info['form'] = form info['level'] = level # We check first the key 'group' since it is needed always tmp_parent_as_tex = '%s' if args['group']: if 'Sp4Z' == args['group']: info['parent_as_tex'] = 'M_{k}\\big({\\rm Sp}(4,\\mathbb{Z})\\big)' # dimension = siegel_core._dimension_Sp4Z dimension = dimensions.dimension_Sp4Z info['generators'] = 'smf.Igusa_generators' elif 'Gamma0_2' == args['group']: info['parent_as_tex'] = 'M_{k}\\big(\\Gamma_0(2)\\big)' dimension = dimensions.dimension_Gamma0_2 elif 'Gamma1_2' == args['group']: info['parent_as_tex'] = 'M_{k}\\big(\\Gamma_1(2)\\big)' dimension = dimensions.dimension_Gamma1_2 elif 'Gamma_2' == args['group']: info['parent_as_tex'] = 'M_{k}\\big(\\Gamma(2)\\big)' dimension = dimensions.dimension_Gamma_2 elif 'Sp4Z_2' == args['group']: info[ 'parent_as_tex'] = 'M_{k,2}\\big({\\rm Sp}(4,\\mathbb{Z})\\big)' dimension = siegel_core._dimension_Sp4Z_2 elif 'Sp6Z' == args['group']: info['parent_as_tex'] = 'M_k\\big({\\rm Sp}(6,\\mathbb{Z})\\big)' # dimension = siegel_core._dimension_Sp6Z dimension = dimensions.dimension_Sp6Z elif 'Sp8Z' == args['group']: info['parent_as_tex'] = 'M_k\\big({\\rm Sp}(8,\\mathbb{Z})\\big)' # dimension = siegel_core._dimension_Sp8Z dimension = dimensions.dimension_Sp8Z elif 'Gamma0_4_half' == group: info['parent_as_tex'] = 'M_{k-1/2}\\big(\\Gamma_0(4)\\big)' # dimension = siegel_core._dimension_Gamma0_4_half dimension = dimensions.dimension_Gamma0_4_half elif 'Kp' == args['group']: info['parent_as_tex'] = 'M_k\\big(K(p)\\big)' info['learnmore'] += [('Paramodular forms', 'http://math.lfc.edu/~yuen/paramodular/')] info['generators'] = 'smf.Kp_generators' dimension = siegel_core._dimension_Kp elif 'Gamma0_2' == args['group']: info['parent_as_tex'] = 'M_k\\big(\\Gamma_0(2)\\big)' dimension = siegel_core._dimension_Gamma0_2 elif 'Gamma0_3' == args['group']: info['parent_as_tex'] = 'M_k\\big(\\Gamma_0(3)\\big)' dimension = siegel_core._dimension_Gamma0_3 elif 'Gamma0_3_psi_3' == args['group']: info['parent_as_tex'] = 'M_k\\big(\\Gamma_0(3,\\psi_3)\\big)' dimension = siegel_core._dimension_Gamma0_3_psi_3 elif 'Gamma0_4' == args['group']: info['parent_as_tex'] = 'M_k\\big(\\Gamma_0(4)\\big)' dimension = siegel_core._dimension_Gamma0_4 elif 'Gamma0_4_psi_4' == args['group']: info['parent_as_tex'] = 'M_k\\big(\\Gamma_0(4,\\psi_4)\\big)' dimension = siegel_core._dimension_Gamma0_4_psi_4 else: info[ 'error'] = 'Request for unavailable type of Siegel modular form' return render_template("None.html", **info) info['learnmore'] += [('The spaces \(' + info['parent_as_tex'] + '\)', url_for('ModularForm_GSp4_Q_top_level', group=group, page='basic'))] bread += [('\(' + info['parent_as_tex'] + '\)', url_for('ModularForm_GSp4_Q_top_level', group=group, page='forms'))] else: # some nonsense request came in, we answer by nonsense too return render_template("None.html") # We branch now according to the value of the key 'page' ########################################################## ## FORM COLLECTION REQUEST ########################################################## if page == 'forms': try: f = urllib.urlopen(DATA + group + '/available_eigenforms.p') go = pickle.load(f) f.close() forms_exist = True except (IOError, EOFError, KeyError): info['error'] = 'No data access' forms_exist = False if True == forms_exist: info['forms'] = [(k, [(form, go[k][form]) for form in go[k]]) for k in go] return render_template("ModularForm_GSp4_Q_forms.html", title='Siegel modular forms \(' + info['parent_as_tex'] + '\)', bread=bread, **info) if page == 'basic': bread += [('Basic information', url_for('ModularForm_GSp4_Q_top_level', group=group, page=page))] return render_template("ModularForm_GSp4_Q_basic.html", title='Siegel modular forms basic information', bread=bread, **info) ########################################################## ## DIMENSIONS REQUEST ########################################################## if page == 'dimensions': # We check whether the weight_range makes sense to us and, if so, dispatch it info['weight_range'] = weight_range try: assert info['weight_range'], 'Please enter a valid argument' min_wt, max_wt, sym_pow = input_parser.kj_parser(weight_range) min_wt = Integer(min_wt) if None == max_wt or max_wt < min_wt: max_wt = min_wt if None == sym_pow: sym_pow = 0 assert min_wt < 1000000 and ( max_wt - min_wt + 1 ) * max_wt < 10000 and sym_pow < 1000, '%d-%d,%d: Input too large: Please enter smaller range or numbers.' % ( max_wt, min_wt, sym_pow) except Exception as e: info['error'] = str(e) return render_template("ModularForm_GSp4_Q_dimensions.html", title='Siegel modular forms dimensions \(' + info['parent_as_tex'] + '\)', bread=bread, **info) # A priori the request is reasonable, so we try to get the data for the answer try: info['new_method'] = None if 'Gamma_2' == group or 'Gamma0_2' == group or 'Gamma1_2' == group or 'Sp4Z' == group or 'Sp6Z' == group or 'Sp8Z' == group or 'Gamma0_4_half' == group: info['sym_pow'] = sym_pow info['table_headers'], info['dimensions'] = dimension( range(min_wt, max_wt + 1), sym_pow) ####### a hack ######## info['new_method'] = 'new_method' bread += [('Dimensions', url_for('ModularForm_GSp4_Q_top_level', group=group, page=page, level=level, weight_range=weight_range))] elif 'Kp' == group: info['dimensions'] = [(k, dimension(k, tp=int(level))) for k in range(min_wt, max_wt + 1)] bread += [('Dimensions', url_for('ModularForm_GSp4_Q_top_level', group=group, page=page, level=level, weight_range=weight_range))] else: info['dimensions'] = [(k, dimension(k)) for k in range(min_wt, max_wt + 1)] bread += [('Dimensions', url_for('ModularForm_GSp4_Q_top_level', group=group, page=page, weight_range=weight_range))] except Exception as e: info['error'] = 'Functional error: %s' % (str(e) ) #(sys.exc_info()[0]) return render_template("ModularForm_GSp4_Q_dimensions.html", title='Siegel modular forms dimensions \(' + info['parent_as_tex'] + '\)', bread=bread, **info) # We provide some headers for the 'old' method and ask for rendering an answer if info['new_method']: info['table_headers'] = info['table_headers'] # elif 'Sp8Z' == group: # info['table_headers'] = ['Weight', 'Total', 'Ikeda lifts', 'Miyawaki lifts', 'Other'] elif 'Sp6Z' == group: info['table_headers'] = [ 'Weight', 'Total', 'Miyawaki lifts I', 'Miyawaki lifts II', 'Other' ] elif group == 'Kp': info['table_headers'] = [ "Weight", "Total", "Gritsenko Lifts", "Nonlifts", "Oldforms" ] elif 'Sp4Z_2' == group or 'Gamma0_4_half' == group: info['table_headers'] = ['Weight', 'Total', 'Non cusp', 'Cusp'] else: info['table_headers'] = [ "Weight", "Total", "Eisenstein", "Klingen", "Maass", "Interesting" ] return render_template("ModularForm_GSp4_Q_dimensions.html", title='Siegel modular forms dimensions \(' + info['parent_as_tex'] + '\)', bread=bread, **info) ########################################################## ## SPECIFIC FORM REQUEST ########################################################## if page == 'specimen': info['weight'] = weight ev_modulus = args.get('emod') fc_modulus = args.get('fcmod') erange = args.get('erange') fcrange = args.get('fcrange') # try to load data if 'Kp' == group or 'Sp4Z_2' == group or 'Sp4Z' == group: # fetch from mongodb try: smple = sample.Sample([group], weight + '_' + form) f = (smple.field()(0), smple.explicit_formula(), smple.Fourier_coefficients() if smple.Fourier_coefficients() else {}) g = (smple.field()(0), smple.eigenvalues() if smple.eigenvalues() else {}) file_name = weight + '_' + form + '.sobj' f_url = DATA + group + '/eigenforms/' + file_name file_name = weight + '_' + form + '-ev.sobj' g_url = DATA + group + '/eigenvalues/' + file_name loaded = True except Exception as e: info['error'] = 'Data not available: %s %s' % (str(e), weight + '_' + form) loaded = False else: try: file_name = weight + '_' + form + '.sobj' f_url = DATA + group + '/eigenforms/' + file_name # print 'fafaf %s'%f_url f = load(f_url) file_name = weight + '_' + form + '-ev.sobj' g_url = DATA + group + '/eigenvalues/' + file_name # print 'gagag %s'%g_url g = load(g_url) loaded = True except: info['error'] = 'Data not available' loaded = False if True == loaded: # define specific methods for computing discriminant and ordering of form if 'Sp8Z' != group and 'Sp6Z' != group: # with current data this is all degree 2 SMFs __disc = lambda (a, b, c): 4 * a * c - b**2 __cmp = lambda (a, b, c), (A, B, C): cmp( (4 * a * c - b**2, a, b, c), (4 * A * C - B**2, A, B, C)) if 'Sp8Z' == group: # matrix index is given as [m11 m22 m33 m44 m12 m13 m23 m14 m24 m34] __mat = lambda (m11, m22, m33, m44, m12, m13, m23, m14, m24, m34): \ matrix(ZZ, 4, 4, [m11, m12, m13, m14, m12, m22, m23, m24, m13, m23, m33, m34, m14, m24, m34, m44]) __disc = lambda i: __mat(i).det() __cmp = lambda f1, f2: cmp([__mat(f1).det()] + list(f1), [__mat(f2).det()] + list(f2)) if 'Sp6Z' == group: # matrix index is given as [m11/2 m22/2 m33/2 m12 m13 m23] __mat = lambda (a, b, c, d, e, f): \ matrix(ZZ, 3, 3, [2 * a, d, e, d, 2 * b, f, e, f, 2 * c]) __disc = lambda i: __mat(i).det() __cmp = lambda f1, f2: cmp([__mat(f1).det()] + list(f1), [__mat(f2).det()] + list(f2)) # make the coefficients of the M_k(Sp(4,Z)) forms integral # if 'Sp4Z' == group: # or 'Sp4Z_2' == group: # d = lcm(map(lambda n: denominator(n), f[1].coefficients())) # f = list(f) # f[1] *= d # for k in f[2]: # f[2][k] *= d # replace generator with a to make things prettier if isinstance(f[0].parent(), Field): if f[0].parent() != QQ: gen = str(f[0].parent().gen()) info['gen_coeff_field'] = teXify_pol( str(f[0].parent().gen()).replace(gen, 'a')) info['poly_coeff_field'] = teXify_pol( str(f[0].parent().polynomial()).replace(gen, 'a')) info['poly_in_gens'] = teXify_pol( str(f[1]).replace(gen, 'a')) else: info['poly_in_gens'] = teXify_pol(str(f[1])) else: # coefficient field is not a sage field, so just assume its supposed to be rationals info['poly_in_gens'] = teXify_pol(str(f[1])) # isolate requested eigenvalue indices if erange == 'all': filt_evals = g[1] eval_index = filt_evals.keys() info['erangedesc'] = 'all available eigenvalues' else: if erange: spliterange = erange.split('-') if len(spliterange) > 1 and spliterange[0].isdigit( ) and spliterange[1].isdigit(): elow, ehigh = int(spliterange[0]), int(spliterange[1]) # filter out to have eigenvalues in [elow, ehigh] filt_evals = { n: lam for n, lam in g[1].iteritems() if int(n) >= elow and int(n) <= ehigh } eval_index = filt_evals.keys() info[ 'erangedesc'] = 'eigenvalues with $n$ in [' + ` elow ` + ', ' + ` ehigh ` + ']' else: # can't make sense of the range, return a default info['erange'] = '' filt_evals = g[1] eval_index = filt_evals.keys()[0:20] info['erangedesc'] = 'the first few eigenvalues' # prepare formatted eigenvalues ftd_evals = [] try: if not ev_modulus: m = 0 else: m = int(ev_modulus) info['ev_modulus'] = m K = g[0].parent().fraction_field() if m != 0: if QQ == K: for i in eval_index: rdcd_eval = Integer(g[1][i]) % m ftd_evals.append( (str(i), teXify_pol(str(rdcd_eval)))) else: I = K.ideal(m) for i in eval_index: rdcd_eval = I.reduce(g[1][i]) ftd_evals.append( (str(i), teXify_pol(str(rdcd_eval).replace(gen, 'a')))) info['emoddesc'] = 'reduced modulo ' + ` m ` + '.' else: for i in eval_index: if QQ == K: ftd_evals.append( (str(i), teXify_pol(str(g[1][i])))) else: ftd_evals.append( (str(i), teXify_pol(str(g[1][i]).replace(gen, 'a')))) info['emoddesc'] = 'with no reduction.' except: pass if (fcrange == 'all'): filt_fcs = f[2] fc_index = filt_fcs.keys() fc_index.sort(cmp=__cmp) info['fcrangedesc'] = 'all available Fourier coefficients' else: if fcrange: splitfcrange = fcrange.split('-') if len(splitfcrange) > 1 and splitfcrange[0].isdigit( ) and splitfcrange[1].isdigit(): fclow, fchigh = int(splitfcrange[0]), int( splitfcrange[1]) filt_fcs = { n: fc for n, fc in f[2].iteritems() if __disc(n) >= fclow and __disc(n) <= fchigh } fc_index = filt_fcs.keys() fc_index.sort(cmp=__cmp) info[ 'fcrangedesc'] = 'Fourier coefficients with index such that $D$ is in [' + ` fclow ` + ', ' + ` fchigh ` + ']' else: # can't make sense of the range, return a default info['fcrange'] = '' filt_fcs = f[2] fc_index = filt_fcs.keys() fc_index.sort(cmp=__cmp) fc_index = fc_index[0:20] info['fcrangedesc'] = 'the first few Fourier coefficients' # prepare formatted fourier coefficients ftd_fcs = [] try: if not fc_modulus: m = 0 else: m = int(fc_modulus) info['fc_modulus'] = m K = g[0].parent().fraction_field() if m != 0: if 'Sp4Z_2' == group: if QQ == K: for i in fc_index: ftd_fc = sum( (v[0] % m) * v[1] for v in list(f[2][i])) ftd_fcs.append( (str(i), teXify_pol(str(ftd_fc)), str(__disc(i)))) else: I = K.ideal(m) for i in fc_index: ftd_fc = sum( I.reduce(v[0]) * v[1] for v in list(f[2][i])) ftd_fcs.append( (str(i), teXify_pol(str(ftd_fc).replace(gen, 'a')), str(__disc(i)))) else: if QQ == K: for i in fc_index: ftd_fc = Integer(f[2][i]) % m ftd_fcs.append( (str(i), teXify_pol(str(ftd_fc)), str(__disc(i)))) else: I = K.ideal(m) for i in fc_index: ftd_fc = I.reduce(f[2][i]) ftd_fcs.append( (str(i), teXify_pol(str(ftd_fc).replace(gen, 'a')), str(__disc(i)))) info['fcmoddesc'] = 'reduced modulo ' + ` m ` + '.' else: for i in fc_index: ftd_fc = f[2][i] if QQ == K: ftd_fcs.append((str(i), teXify_pol(str(ftd_fc)), str(__disc(i)))) else: ftd_fcs.append( (str(i), teXify_pol(str(ftd_fc).replace(gen, 'a')), str(__disc(i)))) info['fcmoddesc'] = 'with no reduction.' except: pass location = url_for('ModularForm_GSp4_Q_top_level', group=group, page=page, weight=weight, form=form) properties2 = [('Species', '$' + info['parent_as_tex'] + '$'), ('Weight', '%s' % weight)] # if implemented, add L-function to friends if 'Sp4Z' == group: numEmbeddings = f[0].parent().degree() friends = [] if form.endswith('E'): # form is a Siegel-Eisenstein series, nothing interesting # to show here pass elif form.endswith('Klingen'): # form is a Klingen-Eisenstein series, so plausibly could # link to the elliptic cusp form on the boundary it comes # from pass elif form.endswith('Maass'): # link the the underlying elliptic modular form. This # datum is not included with the modular form, so just # link to the unique Galois orbit of full level and # weight 2k-2 (this code assumes Maeda). ellWeight = 2 * int(weight) - 2 friends.append( ('Elliptic modular form', '/ModularForm/GL2/Q/holomorphic/1/' + str(ellWeight))) else: # there are no other lifts to full level, so the L-function # is primitive and therefore interesting for embedding in range(0, numEmbeddings): friends.append( ('Spin L-function for ' + str(weight) + '_' + form + '.' + str(embedding), '/L/ModularForm/GSp/Q/Sp4Z/specimen/' + str(weight) + '/' + form + '/' + str(embedding))) else: friends = [] #TODO implement remaining spin L-functions, standard L-functions, # and first Fourier-Jacobi coefficient downloads = [('Fourier coefficients', f_url), ('Eigenvalues', g_url)] location = url_for('ModularForm_GSp4_Q_top_level', group=group, page=page, weight=weight, form=form) bread += [(weight + '_' + form, location)] info['ftd_evals'] = ftd_evals info['ftd_fcs'] = ftd_fcs info['location'] = location info['form_name'] = form return render_template("ModularForm_GSp4_Q_specimen.html", title='Siegel modular form ' + weight + '_' + form, bread=bread, properties2=properties2, friends=friends, downloads=downloads, **info) # if a nonexisting page was requested return the homepage of Siegel modular forms return render_webpage()
#!/usr/bin/env python import numpy import sample import matplotlib.pyplot as plt import IRT s = sample.Sample() s.setRaytracer(IRT.Raytracer_Jittered) screen = numpy.zeros((600, 800), dtype=numpy.long) s.hitLevel(screen) plt.imshow(screen) plt.colorbar() plt.show()
training_data = sample.Sample() logger.warning('Using only first 100 mentions!') #training_data.generated = candidate_generation.generate_candidate(corpus.tokenized_mentions,dictionary.processed,dictionary.tokenized,dictionary.vectorized,config.getint('candidate','n')) training_data.generated = candidate_generation.generate_candidate(corpus.tokenized_mentions[:100],dictionary.processed,dictionary.tokenized,dictionary.vectorized,config.getint('candidate','n')) logger.info('Finished generating {0} candidates.'.format(len(training_data.generated))) ''' ''' #save candidates / load previously generated candidates import pickle with open(config['settings']['gencan_file'],'wb') as f: pickle.dump(training_data.generated,f) logger.info('Saving generated candidates...') ''' import pickle training_data = sample.Sample() training_data.generated = pickle.load( open(config['settings']['gencan_file_train'], 'rb')) #training_data.generated = pickle.load(open('gitig_generated_candidates_all.txt','rb')) logger.info('Loading generated candidates...') #formatting generated candidates logger.info('Formatting candidates...') sample.format_candidates(training_data, corpus_train, dictionary.vectorized) #format y logger.info('Checking candidates...') sample.check_candidates(training_data, corpus_train.mention_ids) # validation set
def test_struct_sample(self): import sample as s self.assertEqual(struct.pack('>L', s.Sample(10)), '')
""" max_label = util.LABELS[0] p.setLabel(max_label) # above forces a fixed label: remove them # replace knn_helper.knn(p, data, k) with your own logic print(p) knn_helper.knn(p, data, k) print(p) if __name__ == "__main__": random.seed(0) n = 100 K = 3 data = util.genDistribution(n=10) for d in data: d.setLabel(random.choice(util.LABELS)) print("before....") util.plot_data(data) new_pt = sample.Sample('', [0.2, 0.3], '') knn(new_pt, data, K) data.append(new_pt) print("\nafter....") util.plot_data(data)
2. read your data into an array of Sample class objects 3. apply k means to cluster the samples """ data = open("iris_data.txt", "r").readlines() allSamples = [] # IMPLEMENTATION: Read the data # ---- start your code ---- # pass """ fill the array allSamples to hold the samples, each sample takes two attributes of an iris instance """ for line in data: content = line.strip().split(",") d = sample.Sample('', [float(content[1]), float(content[3])]) allSamples.append(d) # ---- end of your code --- # verbose = False k = 3 print("before clustering") unclustered = [kmeans.Cluster(allSamples)] util.plot_cluster(unclustered) clusters = unclustered print("after clustering") # IMPLEMENTATION: apply k means to cluster the samples # ---- start your code ---- # pass clusters = kmeans.kmeans(allSamples, 3, verbose)
#!/usr/bin/env python import sample mySample = sample.Sample('Hello') mySample.sampleMethod()
Range_path = glob.glob('%s/NA17281_S[0-9][0-9].txt'%path)[0] Gene_var_path = '/home/yifei.wan/AH_Project/PGx_QC/PGx_GV/PGxOne_v3_Gene_Variant_List.txt' Code_drug_path = '%s/%s'%(path, 'sample_codes_drugs.txt') #print Drug_action_path #print Low_coverage_path #print Range_path #ID = 'A-1600158824' #ID = 'A-1600161746B' ID = sys.argv[2] amp_name = ['792984_CYP3A5-exon2-2-exon2-1'] gene_name = 'CYP3A5' ICDs = ['F32.9', 'F41.9'] sample_ICD = [re.sub('\..*', '', ICD) for ICD in ICDs] with open(Code_drug_path, 'r') as CD, open(Drug_action_path, 'r') as DA, open(Low_coverage_path, 'r') as LC, open(Range_path, 'r') as RP, open(Active_score_path, 'r') as AS, open(Output_geno_path, 'r') as OG, open(Gene_var_path, 'r') as GK: Active_score = AS.readlines() Drug_action = DA.readlines() Low_coverage = LC.readlines() Range = RP.readlines() Output_geno = OG.readlines() Gene_KB = GK.readlines() Code_drug = CD.readlines() #check_point = aq.Gene(ID, gene_name, Output_geno, amp_name, Active_score, Drug_action, Low_coverage, Range, Gene_KB) #check_point = aq.Gene_Scored(ID, sample_ICD, gene_name, Output_geno, amp_name, Active_score, Drug_action, Low_coverage, Range, Gene_KB) check_point = aq.Sample(ID, Output_geno, Code_drug, Active_score, Drug_action, Low_coverage, Range, Gene_KB) check_point.check() #check_point = aq.Gene(ID, gene_name, OG, amp_name, AS, DA, LC, Range)