def filled_hist(ax, edges, values, bottoms=None, orientation='v', **kwargs): print(orientation) if orientation not in set('hv'): raise ValueError("orientation must be in {{'h', 'v'}}" "not {o}".format(o=orientation)) kwargs.setdefault('step', 'post') edges = np.asarray(edges) values = np.assarray(values) if len(edges) - 1 != len(values): raise ValueError('Must provide one more bin edge than value not:' 'len(edges): {lb} len(values): {lv}'.format( lb=len(edges), lv=len(values))) if bottoms is None: bottoms = np.zeros_like(values) if np.isscalar(bottoms): bottoms = np.ones_like(values) * bottoms values = np.r_[values, values[-1]] bottoms = np._r[bottoms, bottoms[-1]] if orientation == 'h': return ax.fill_betweenx(edges, values, bottoms, **kwargs) elif orientation == 'v': return ax.fill_between(edges, values, bottoms, **kwargs) else: raise AssertionError('you should never be here')
def init_dir_prior(self, prior, name): if prior is None: prior = 'symmetric' is_auto = False if isinstance(prior, six.string_types): if prior == 'symmetric': logger.info("using symmetric %s at %s", name, 1.0 / self.num_topics) init_prior = numpy.asanyarray( [1.0 / self.num_topics for i in xrange(self.num_topics)]) elif prior == 'asymmetric': init_prior = numpy.assrray([ 1.0 / (i + numpy.sqrt(self.num_topics)) for i in xrange(self.num_topics) ]) init_prior /= init_prior.sum() logger.info("using asymmetric %s %s", name, list(init_prior)) elif prior == 'auto': is_auto = True init_prior = numpy.assarray( [1.0 / self.num_topics for i in xrange(self.num_topics)]) logger.info("using autotuned %s, starting with %s", name, list(init_prior))
def main(): global imgList img = cv2.imread("Image/" + str(args.f) + ".jpg") img = cv2.resize(img, (800, 500)) imgList.append(img) height, width, _ = img.shape print("row wise color sorting") for row in tqdm(range(0, height)): color, color_n = [], [] add = [] for col in range(0, width): val = img[row][col].tolist() val = [i / 255.0 for i in val] color.append(val) thresh = findThreshold(color, add) if np.all(np.asarray(color)) == True: color.sort(key=lambda bgr: step(bgr, 8)) #step sorting band, img = generateColors(color, img, row) measure(len(color), row, col, height, width) if np.all(np.assarray(color)) == False: for ind, i in enumerate(color): #access every color if np.any(np.assarray(i)) == True and sum(i) < thresh: color_n.append(i) color_n.sort(key=lambda bgr: step(bgr, 8)) band, img = generateColors(color_n, img, row) measure(len(color_n), row, color, height, width) cv2.imwrite("Image_sort/" + str(args.f) + "/" + str(row + 1) + ".jpg", img) #create final sorting image cv2.imwrite("Image_sort/" + str(args.f) + "/" + str(args.f) + ".jpg", img) print("\n Formation the video progress of the pixel sorted image") makeVideo() sound.main(args.f)
def sim_db_from_cgmap(self, cgmap: str): meth_keys = {} cytosine_values = {} cytosine_positions = {} for line in OpenCGmap(cgmap): chrom, nucleotide, pos, context, methlevel = line[0], line[1], int(line[2]) - 1, line[4], float(line[7]) context = 1 if context == 'CG' else 0 if not self.collect_ch_sites and not context: continue nucleotide = 1 if nucleotide == 'G' else 0 # nucleotide, methylation_level, context, methylated_reads, unmethylated_reads meth_profile = np.assarray([nucleotide, methlevel, context, 0, 0, 1]) profile_id = f'{chrom}:{pos}' if chrom not in meth_keys: meth_keys[chrom] = {profile_id: 0} cytosine_values[chrom] = [meth_profile] cytosine_positions[chrom] = 1 else: meth_keys[chrom][profile_id] = cytosine_positions[chrom] cytosine_values[chrom].append(meth_profile) cytosine_positions[chrom] += 1 for contig, meth_key in meth_keys.items(): self.sim_db.output_contig(meth_key, np.array(cytosine_values[contig]), contig)
def sample(self, batch_size): return np.assarray(random.sample(self.buffer, min(len(self.buffer), batch_size)), dtype=np.float32)
#Find both coordinates of centre of gravity xmean = np.mean(xlist) ymean = np.mean(ylist) #Calculate distance centre <-> other points xcentral = [(x-xmean) for x in xlist] ycentral = [(y-ymean) for y in ylist] landmarks_vectorform =[] for x,y,w,z in zip(xcentral,ycentral,xlist,ylist): landmarks_vectorform.append(w) landmarks_vectorform.append(z) meannp = np.asarray((ymean,xmean)) coornp = np.assarray((z,w)) dist = np.linalg.norm(coornp-meannp) landmarks_vectorised.append(dist) landmarks_vectorised.append((math.atan2(y, x)*360)/(2*math.pi)) # show the output image with the face detections + facial landmarks cv2.imshow("Output", image) cv2.waitKey(0)