def do_padding(img, padding): if not padding: return img try: padding = float(padding)*2.0 if padding > .9: padding = .9 if padding <= 0.0: return img except ValueError: return iw, ih = img.size img.thumbnail( ( int( round( float(img.size[0]) * (1.0 - padding) ) ), int( round( float(img.size[1]) * (1.0 - padding) ) ) ), pil.ANTIALIAS ) img = do_fill(img, "ffffff00", iw, ih) return img
def _image(self, node): import urllib from reportlab.lib.utils import ImageReader u = urllib.urlopen(str(node.getAttribute("file"))) s = StringIO.StringIO() s.write(u.read()) s.seek(0) img = ImageReader(s) (sx, sy) = img.getSize() args = {} for tag in ("width", "height", "x", "y"): if node.hasAttribute(tag): args[tag] = utils.unit_get(node.getAttribute(tag)) if ("width" in args) and (not "height" in args): args["height"] = sy * args["width"] / sx elif ("height" in args) and (not "width" in args): args["width"] = sx * args["height"] / sy elif ("width" in args) and ("height" in args): if (float(args["width"]) / args["height"]) > (float(sx) > sy): args["width"] = sx * args["height"] / sy else: args["height"] = sy * args["width"] / sx self.canvas.drawImage(img, **args)
def calc_pr(bigccmat, numnodes, pprloc=-99): """ function calc_pr calculates PageRank based on the input transition matrix """ # convert to transition matrix rowsum = bigccmat.sum(axis=1) for ii in range(numnodes): if rowsum[ii, 0] != 0: bigccmat[ii, :] = bigccmat[ii, :] / rowsum[ii, 0] else: # case with no outgoing links bigccmat[ii, ii] = 1.0 # convert sparse matrix format sp_transmat_first = scisp.csr_matrix(bigccmat) oldprvec = np.matrix(np.ones((numnodes, 1))) / float(numnodes) convergevec = [1000] # some large value if pprloc > 0: onevec = np.matrix(np.zeros((numnodes, 1))) onevec[pprloc, 0] = 0.15 else: onevec = (0.15 / float(numnodes)) * np.matrix(np.ones((numnodes, 1))) ii = 0 while convergevec[-1] > 1e-5: newprvec = 0.85 * (sp_transmat_first.T * oldprvec) newprvec = newprvec + onevec newnorm = np.linalg.norm(newprvec, 1) convergevec.append(sum(abs(newprvec - oldprvec))[0, 0]) oldprvec = newprvec ii = ii + 1 print "Norm of PR vector:", newnorm print "Number of iterations for convergence:", ii convergevec.remove(1000) return (newprvec, convergevec)
def readXml10811(item): global X_10811,LIGHT_SMALL_BIG,DATA_BIG,DATA_SMALL tablelist = item.getElementsByTagName('table') table_1 = tablelist[0] datalist = table_1.getElementsByTagName('td') #获取光源及小像大像位置 data_x = (float(datalist[2].firstChild.nodeValue),float(datalist[3].firstChild.nodeValue), float(datalist[4].firstChild.nodeValue)) LIGHT_SMALL_BIG = (float(datalist[2].firstChild.nodeValue),float(datalist[3].firstChild.nodeValue),float(datalist[4].firstChild.nodeValue)) table_2 = tablelist[1] datalist = table_2.getElementsByTagName('td') #获取大像位置 data_b2 = (float(datalist[0].firstChild.nodeValue), float(datalist[1].firstChild.nodeValue),float(datalist[2].firstChild.nodeValue), float(datalist[3].firstChild.nodeValue)) for b in data_b2: DATA_BIG.append(b) #获取小像位置 data_b1 = (float(datalist[4].firstChild.nodeValue),float(datalist[5].firstChild.nodeValue),float(datalist[6].firstChild.nodeValue),float(datalist[7].firstChild.nodeValue)) for b in data_b1: DATA_SMALL.append(b) table_3 = tablelist[2] datalist = table_3.getElementsByTagName('td') #获取条纹位置 data_X = [] for i in range(len(datalist)): data_X.append(float(datalist[i].firstChild.nodeValue)) X_10811.append(float(datalist[i].firstChild.nodeValue)) return data_b1, data_b2, data_x, data_X
def __parse_positions(self, var): listPattern = re.compile("\[[^\[]+?\]") tagPattern = re.compile("<.+?>") betweenTagPattern = re.compile(">(.+?)<") numberPattern = re.compile("-?\d+\.?\d*") stringPattern = re.compile('".*?[^\\\\]"') positions = [] columns = ("pid", "date", "stock", "percentage", "shares", "notes") for text in listPattern.findall(var): data = stringPattern.findall(text[1:-1]) stock = betweenTagPattern.findall(data[0])[0] if self.user == list_user: percentage = shares = "NULL" notes = tagPattern.sub(" ", data[-1][1:-1]) else: comments = tagPattern.split(data[-1][1:-1]) try: percentage = float(numberPattern.findall(comments[0])[0]) except: percentage = 0 try: shares = float(numberPattern.findall(comments[1])[0]) except: shares = 0 try: notes = comments[2] except: notes = "" positions.append( dict(zip(columns, (self.id, self.now, stock, percentage, shares, notes.encode("ascii", "ignore")))) ) return positions
def getCraftedGcode(self, fileName, repository, svgText): "Parse svgText and store the scale svgText." svgReader = SVGReader() svgReader.parseSVG('', svgText) if svgReader.sliceDictionary == None: print('Warning, nothing will be done because the sliceDictionary could not be found getCraftedGcode in preface.') return '' xyPlaneScale = repository.xyPlaneScale.value zAxisScale = repository.zAxisScale.value decimalPlacesCarried = int(svgReader.sliceDictionary['decimalPlacesCarried']) layerHeight = zAxisScale * float(svgReader.sliceDictionary['layerHeight']) edgeWidth = float(svgReader.sliceDictionary['edgeWidth']) loopLayers = svgReader.loopLayers for loopLayer in loopLayers: setLoopLayerScale(loopLayer, xyPlaneScale, zAxisScale) cornerMaximum = Vector3(-912345678.0, -912345678.0, -912345678.0) cornerMinimum = Vector3(912345678.0, 912345678.0, 912345678.0) svg_writer.setSVGCarvingCorners(cornerMaximum, cornerMinimum, layerHeight, loopLayers) svgWriter = svg_writer.SVGWriter( True, cornerMaximum, cornerMinimum, decimalPlacesCarried, layerHeight, edgeWidth) commentElement = svg_writer.getCommentElement(svgReader.documentElement) procedureNameString = svgReader.sliceDictionary['procedureName'] + ',scale' return svgWriter.getReplacedSVGTemplate(fileName, loopLayers, procedureNameString, commentElement)
def main(): args = args_parse() out1 = open("%s.TE.fa"%str(args.fasta).split(".")[0],"w") out2 = open("%s.CON.fa"%str(args.fasta).split(".")[0],"w") seqslen = get_len(args.fasta) seqs = get_seqs(args.fasta) TE = get_TE_ids(args.TE) pfam_ids = get_pfam(args.infile) TE_ids = [] for line in open(args.infile,"r"): if "#" not in line and len(line.strip().split()) == 15 : ids = line.strip().split()[0] start = int(line.strip().split()[1]) end = int(line.strip().split()[2]) hmm = line.strip().split()[5] if hmm.split(".")[0] in TE: per = (float(end)-float(start)+float(1))/float(seqslen.get(ids)) if per >= float(0.3): TE_ids.append(ids) out1.write(">%s\t%f\n%s\n"%(ids,per,seqs.get(ids))) set1 = set(TE_ids) set2 = set(seqs.keys()) for retain_ids in set2 - set1: try: out2.write(">%s\t%s\n%s\n"%(retain_ids,pfam_ids[retain_ids],seqs.get(retain_ids))) except KeyError: out2.write(">%s\tNon-domains\n%s\n"%(retain_ids,seqs.get(retain_ids)))
def set_value(self, value): if(self._parameter_to_map_to != None): if(self._parameter_to_map_to.is_enabled): newval = float(float(float(value)/127) * (self._parameter_to_map_to.max - self._parameter_to_map_to.min)) + self._parameter_to_map_to.min self._parameter_to_map_to.value = newval else: self.receive_value(int(value))
def createOrbPos(self, feraw): orbpos = feraw.get("orbital_position") if orbpos > 1800: return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W" elif orbpos > 0: return str((float(orbpos)) / 10.0) + "\xc2\xb0 E" return ""
def __init__(self): rospy.init_node('actuators_handler') rospy.loginfo(rospy.get_caller_id() + 'Initializing actuators_handler node') self.timelast = 0 #Get all parameters from config (rosparam) name = 'engine' engine_output_pin = int(rospy.get_param('actuators/' + name + '/output_pin', 1)) engine_board_pin = int(rospy.get_param('actuators/' + name + '/board_pin', 60)) engine_period_us = int(1e6 / float(rospy.get_param('actuators/' + name + '/frequency', 60))) name = 'steering' steering_output_pin = int(rospy.get_param('actuators/' + name + '/output_pin', 1)) steering_board_pin = int(rospy.get_param('actuators/' + name + '/board_pin', 62)) steering_period_us = int(1e6 / float(rospy.get_param('actuators/' + name + '/frequency', 60))) #Initialize PWM self.dev1 = mraa.Pwm(engine_board_pin) self.dev1.period_us(engine_period_us) self.dev1.enable(True) self.dev1.pulsewidth_us(1500) self.dev2 = mraa.Pwm(steering_board_pin) self.dev2.period_us(steering_period_us) self.dev2.enable(True) self.dev2.pulsewidth_us(1500)
def __init__(self, nscans=nscans_d, nt_scan=nt_scan_d, nf=nf_d, nra=nra_d, ndec=ndec_d, scan_size=scan_size_d, map_size=map_size_d, thermal=0, correlated_noise=None, random_mean=0, random_slope=0, add_ground=False, freq_mode_noise=None, universal_over_f=None): # Store parameters. self.nscans = nscans self.nt_scan = nt_scan self.nf = nf self.nra = nra self.ndec = ndec self.scan_size = float(scan_size) self.map_size = float(map_size) self.add_ground = add_ground # Noise parameters. self.thermal = thermal self.correlated_noise = correlated_noise self.freq_mode_noise = freq_mode_noise self.random_mean = random_mean self.random_slope = random_slope self.universal_over_f = universal_over_f # Initialize a counter. self.data_set_number = 0 self.nt = nt_scan * nscans self.dt = dt_d
def testcaserun_setstatus(request, testcaserun_id): testcaserun = TestCaseRun.objects.get(pk=testcaserun_id) testcaserun_status_form = forms.TestCaseRunStatus(request.POST, instance=testcaserun) if testcaserun_status_form.is_valid(): testcaserun = testcaserun_status_form.save() log = history.History(request.user, testcaserun.parent) log.add_form(testcaserun_status_form, capture=["status"], prefix=True) log.save() # TODO: move this to testrun? method. Chec also templatetags passrate_ratio = [] testrun = testcaserun.parent testcaseruns_count = testrun.testcases.count() statuses = TestCaseRunStatus.objects.filter(testcaserun__parent=testrun).annotate(count=Count('testcaserun')) for status in statuses: passrate_ratio.append({ "ratio": float(status.count) / float(testcaseruns_count) * 100, "name": status.name, "color": status.color, }) return success(message=testcaserun.status.name, data=dict(id=testcaserun.pk, name=testcaserun.status.name, color=testcaserun.status.color, passrate=testcaserun.parent.passrate, passrate_ratio=passrate_ratio)) else: return failed(message=testcaserun.status.name, data=testcaserun_status_form.errors_list())
def get_temperature(): res = subprocess.check_output('sensors',shell=True) a = res.decode('utf-8') diva = a.split('\n') cpu1 = diva[2].split(' ')[4][1:5] cpu2 = diva[2].split(' ')[4][1:5] return (float(cpu1)+float(cpu2))/2
def from_pmml(self, pmml): """Returns a model with the intercept and coefficients represented in PMML file.""" model = self() # Reads the input PMML file with BeautifulSoup. with open(pmml, "r") as f: lm_soup = BeautifulSoup(f, "xml") if not lm_soup.RegressionTable: raise ValueError("RegressionTable not found in the input PMML file.") else: ##### DO I WANT TO PULL THIS OUT AS ITS OWN FUNCTION? ##### # Pulls out intercept from the PMML file and assigns it to the # model. If the intercept does not exist, assign it to zero. intercept = 0 if "intercept" in lm_soup.RegressionTable.attrs: intercept = lm_soup.RegressionTable['intercept'] model.intercept_ = float(intercept) # Pulls out coefficients from the PMML file, and assigns them # to the model. if not lm_soup.find_all('NumericPredictor'): raise ValueError("NumericPredictor not found in the input PMML file.") else: coefs = [] numeric_predictors = lm_soup.find_all('NumericPredictor') for i in numeric_predictors: i_coef = float(i['coefficient']) coefs.append(i_coef) model.coef_ = numpy.array(coefs) return model
def shoot(temp, lux): with picamera.PiCamera() as camera: camera.resolution = (1024, 768) camera.start_preview() time.sleep(3) #fix camera values camera.shutter_speed = camera.exposure_speed camera.exposure_mode = 'off' g = camera.awb_gains camera.awb_mode = 'off' camera.awb_gains = g #camera.iso = 100-200 daytime, 400-800 low light if float(lux) >= 250: iso = 100 + (float(lux) - 250)/(1000 - 250)*(200-100) else: iso = 400 - (float(lux) - 250)/(250)*(800-400) camera.iso = int(iso) #set iso value #add date time to the image camera.annotate_text = strftime('%d-%m-%Y %H:%M:%S', localtime()) #camera.annotate_text = temp camera.capture('image.jpg') camera.stop_preview()
def __init__(self,name,ambient=0.2,diffuse=0.2,specular=0.9,emission=0.1,shininess=2.0): self.name = str(name) self.ambient = float(ambient) self.diffuse = float(diffuse) self.specular = float(specular) self.emission = float(emission) self.shininess = float(shininess)
def weighted_mean(self, data_array, error_array, error_0): ''' weighted mean of an array ''' sz = len(data_array) # calculate the numerator of mean dataNum = 0; for i in range(sz): if (error_array[i] == 0): error_array[i] = error_0 tmpFactor = float(data_array[i]) / float((pow(error_array[i],2))) dataNum += tmpFactor # calculate denominator dataDen = 0; for i in range(sz): if (error_array[i] == 0): error_array[i] = error_0 tmpFactor = 1./float((pow(error_array[i],2))) dataDen += tmpFactor if dataDen == 0: data_mean = np.nan mean_error = np.nan else: data_mean = float(dataNum) / float(dataDen) mean_error = math.sqrt(1/dataDen) return [data_mean, mean_error]
def find_info(test): #do the match match = regex.match(test) if match is not None: return (match.group(1),float(match.group(2)),float(match.group(3)),float(match.group(4))) else: return ("",0,0,0)
def _getyview(self): # Vertical dimension. clipperHeight = self._vclipper.winfo_height() frameHeight = self._vsframe.winfo_reqheight() if frameHeight <= clipperHeight: # The scrolled frame is smaller than the clipping window. self.startY = 0 endScrollY = 1.0 if self['vertflex'] in ('expand', 'elastic'): relheight = 1 else: relheight = '' else: # The scrolled frame is larger than the clipping window. if self['vertflex'] in ('shrink', 'elastic'): self.startY = 0 endScrollY = 1.0 relheight = 1 else: if self.startY + clipperHeight > frameHeight: self.startY = frameHeight - clipperHeight endScrollY = 1.0 else: if self.startY < 0: self.startY = 0 endScrollY = (self.startY + clipperHeight) / float(frameHeight) relheight = '' # Position frame relative to clipper. self._vsframe.place(y = -self.startY, relheight = relheight) return (self.startY / float(frameHeight), endScrollY)
def get_user_details(self, username): contents = re.split('([+/])', username) assert len(contents) % 2 == 1 user, contents2 = contents[0], contents[1:] desired_pseudoshare_target = None desired_share_target = None for symbol, parameter in zip(contents2[::2], contents2[1::2]): if symbol == '+': try: desired_pseudoshare_target = bitcoin_data.difficulty_to_target(float(parameter)) except: if p2pool.DEBUG: log.err() elif symbol == '/': try: desired_share_target = bitcoin_data.difficulty_to_target(float(parameter)) except: if p2pool.DEBUG: log.err() if random.uniform(0, 100) < self.worker_fee: pubkey_hash = self.my_pubkey_hash else: try: pubkey_hash = bitcoin_data.address_to_pubkey_hash(user, self.node.net.PARENT) except: # XXX blah pubkey_hash = self.my_pubkey_hash return user, pubkey_hash, desired_share_target, desired_pseudoshare_target
def legIK(self, X, Y, Z, resolution): """ Compute leg servo positions. """ ans = [0,0,0,0] # (coxa, femur, tibia) try: # first, make this a 2DOF problem... by solving coxa ans[0] = radToServo(atan2(X,Y), resolution) trueX = int(sqrt(sq(X)+sq(Y))) - self.L_COXA im = int(sqrt(sq(trueX)+sq(Z))) # length of imaginary leg # get femur angle above horizon... q1 = -atan2(Z,trueX) d1 = sq(self.L_FEMUR)-sq(self.L_TIBIA)+sq(im) d2 = 2*self.L_FEMUR*im q2 = acos(d1/float(d2)) ans[1] = radToServo(q1+q2, resolution) # and tibia angle from femur... d1 = sq(self.L_FEMUR)-sq(im)+sq(self.L_TIBIA) d2 = 2*self.L_TIBIA*self.L_FEMUR; ans[2] = radToServo(acos(d1/float(d2))-1.57, resolution) except: if self.debug: "LegIK FAILED" return [1024,1024,1024,0] if self.debug: print "LegIK:",ans return ans
def _getxview(self): # Horizontal dimension. clipperWidth = self._hclipper.winfo_width() frameWidth = self._hsframe.winfo_reqwidth() if frameWidth <= clipperWidth: # The scrolled frame is smaller than the clipping window. self.startX = 0 endScrollX = 1.0 if self['horizflex'] in ('expand', 'elastic'): relwidth = 1 else: relwidth = '' else: # The scrolled frame is larger than the clipping window. if self['horizflex'] in ('shrink', 'elastic'): self.startX = 0 endScrollX = 1.0 relwidth = 1 else: if self.startX + clipperWidth > frameWidth: self.startX = frameWidth - clipperWidth endScrollX = 1.0 else: if self.startX < 0: self.startX = 0 endScrollX = (self.startX + clipperWidth) / float(frameWidth) relwidth = '' # Position frame relative to clipper. self._hsframe.place(x = -self.startX, relwidth = relwidth) return (self.startX / float(frameWidth), endScrollX)
def _render_on_subplot(self, subplot): """ Render this arrow in a subplot. This is the key function that defines how this arrow graphics primitive is rendered in matplotlib's library. EXAMPLES:: This function implicitly ends up rendering this arrow on a matplotlib subplot: sage: arrow(path=[[(0,1), (2,-1), (4,5)]]) """ options = self.options() width = float(options['width']) head = options.pop('head') if head == 0: style = '<|-' elif head == 1: style = '-|>' elif head == 2: style = '<|-|>' else: raise KeyError('head parameter must be one of 0 (start), 1 (end) or 2 (both).') arrowsize = float(options.get('arrowsize',5)) head_width=arrowsize head_length=arrowsize*2.0 color = to_mpl_color(options['rgbcolor']) from matplotlib.patches import FancyArrowPatch from matplotlib.path import Path bpath = Path(self.vertices, self.codes) p = FancyArrowPatch(path=bpath, lw=width, arrowstyle='%s,head_width=%s,head_length=%s'%(style,head_width, head_length), fc=color, ec=color, linestyle=options['linestyle']) p.set_zorder(options['zorder']) p.set_label(options['legend_label']) subplot.add_patch(p) return p
def send_file_udp(s, filename): bytes_sent = 0 sending_file = open(filename, 'rb') filesize = os.stat(filename).st_size while True: package, client_address = s.recvfrom(Constants.FILE_CHUNK_SIZE) unpacked_package = Utils.unpack_package(package) if unpacked_package['command'] == Constants.INIT_TRANSMIT: bytes_sent = int(unpacked_package['payload']) sending_file.seek(bytes_sent) data = sending_file.read(Constants.FILE_CHUNK_SIZE) if not data: s.sendto(Utils.pack_package(Constants.FIN, ''), client_address) sending_file.close() break else: s.sendto(Utils.pack_package(Constants.ACK, data), client_address) package, client_address = s.recvfrom(Constants.FILE_CHUNK_SIZE) unpacked_package = Utils.unpack_package(package) if unpacked_package['command'] == Constants.ACK: bytes_sent += len(data) percent = int(float(bytes_sent) * 100 / float(filesize)) print "{0} / {1} Kb sent ({2}%)".format(Utils.to_kilobytes(bytes_sent), Utils.to_kilobytes(filesize), percent)
def send_file_multicast(s, filename): connections = {} filesize = os.stat(filename).st_size try: while True: readable, _, _ = select.select([s], [], []) for rd in readable: bytes_sent = 0 package, client_address = s.recvfrom(Constants.FILE_CHUNK_SIZE) unpacked_package = Utils.unpack_package(package) if not connections.has_key(client_address) or connections[client_address] is None: connections[client_address] = open(filename, 'rb') if unpacked_package['command'] == Constants.INIT_TRANSMIT: bytes_sent = int(unpacked_package['payload']) connections[client_address].seek(bytes_sent) data = connections[client_address].read(Constants.FILE_CHUNK_SIZE) if not data: rd.sendto(Utils.pack_package(Constants.FIN, ''), client_address) connections[client_address].close() connections[client_address] = None else: rd.sendto(Utils.pack_package(Constants.ACK, data), client_address) bytes_sent += len(data) percent = int(float(bytes_sent) * 100 / float(filesize)) print "{0} / {1} Kb sent to client {2}({3}%)".format(Utils.to_kilobytes(bytes_sent), Utils.to_kilobytes(filesize), client_address, percent) sys.stdout.write('\033M') except socket.error, value: print value
def parse(self, attributes): def getValues(string): return numpy.array(list(map(float, string.split(" ")))) self.color.ident = self.demangled() if "shininess" in attributes.keys(): self.color.shininess = float(attributes["shininess"]) if "transparency" in attributes.keys(): self.color.transparency = float(attributes["transparency"]) if "diffuseColor" in attributes.keys(): self.color.diffuse = getValues(attributes["diffuseColor"]) if "emissiveColor" in attributes.keys(): self.color.emissive = getValues(attributes["emissiveColor"]) if "specularColor" in attributes.keys(): self.color.specular = getValues(attributes["specularColor"]) if "ambientIntensity" in attributes.keys(): self.color.ambient = self.color.diffuse * float(attributes["ambientIntensity"]) debug("%sMaterial properties:" % (' ' * self.level)) debug("%sShininess: %f" % (' ' * (self.level + 1), self.color.shininess)) debug("%sTransparency: %f" % (' ' * (self.level + 1), self.color.transparency)) debug("%sDiffuse Color: %f, %f, %f" % (' ' * (self.level + 1), self.color.diffuse[0], self.color.diffuse[1], self.color.diffuse[2])) debug("%sEmissive Color: %f, %f, %f" % (' ' * (self.level + 1), self.color.emissive[0], self.color.emissive[1], self.color.emissive[2])) debug("%sSpecular Color: %f, %f, %f" % (' ' * (self.level + 1), self.color.specular[0], self.color.specular[1], self.color.specular[2])) debug("%sAmbient Color: %f, %f, %f" % (' ' * (self.level + 1), self.color.ambient[0], self.color.ambient[1], self.color.ambient[2]))
def init(): global theMesh, theLight, theCamera, \ theScreen, resolution initializeVAO() glEnable(GL_CULL_FACE) glEnable(GL_DEPTH_TEST) # Add our object # LIGHT theLight = N.array((-0.577, 0.577, 0.577, 0.0),dtype=N.float32) # OBJECT phongshader = makeShader("phongshader.vert","phongshader.frag") verts, elements = readOBJ("suzanne.obj") suzanneVerts = getArrayBuffer(verts) suzanneElements = getElementBuffer(elements) suzanneNum = len(elements) theMesh = coloredMesh(N.array((1.0, 0.5, 1.0, 1.0), dtype=N.float32), suzanneVerts, suzanneElements, suzanneNum, phongshader) # CAMERA width,height = theScreen.get_size() aspectRatio = float(width)/float(height) near = 0.01 far = 100.0 lens = 4.0 # "longer" lenses mean more telephoto theCamera = Camera(lens, near, far, aspectRatio) theCamera.moveBack(6)
def extract_features(data): ''' Perform feature extraction for variables that can be extracted the same way for both training and test data sets. The input "data" is the pandas dataframe for the training or test sets. ''' token_pattern = re.compile(r"(?u)\b\w+\b") data["query_tokens_in_title"] = 0.0 data["query_tokens_in_description"] = 0.0 data["percent_query_tokens_in_description"] = 0.0 data["percent_query_tokens_in_title"] = 0.0 for i, row in data.iterrows(): query = set(x.lower() for x in token_pattern.findall(row["search_term"])) title = set(x.lower() for x in token_pattern.findall(row["product_title"])) description = set(x.lower() for x in token_pattern.findall(row["product_description"])) if len(title) > 0: data.set_value(i, "query_tokens_in_title", float(len(query.intersection(title)))/float(len(title))) data.set_value(i, "percent_query_tokens_in_title", float(len(query.intersection(title)))/float(len(query))) if len(description) > 0: data.set_value(i, "query_tokens_in_description", float(len(query.intersection(description)))/float(len(description))) data.set_value(i, "percent_query_tokens_in_description", float(len(query.intersection(description)))/float(len(query))) data.set_value(i, "query_length", len(query)) data.set_value(i, "description_length", len(description)) data.set_value(i, "title_length", len(title)) two_grams_in_query = set(get_n_grams(row["search_term"], 2)) two_grams_in_title = set(get_n_grams(row["product_title"], 2)) two_grams_in_description = set(get_n_grams(row["product_description"], 2)) data.set_value(i, "two_grams_in_q_and_t", len(two_grams_in_query.intersection(two_grams_in_title))) data.set_value(i, "two_grams_in_q_and_d", len(two_grams_in_query.intersection(two_grams_in_description)))
def parse_rho(m1, m2, config): """Parse the rho data and convert to numpy array.""" f = open(in_name(m1, m2, config), 'r') # Skip to the mixed rho correlator section. # This assumes it is the first RHO* entry. !! x = f.readline() while x: if re.match('correlator:\s+RHO', x): break x = f.readline() # Throw away header. print x for i in range(5): print f.readline().strip() result = [] for i in range(64): t, r, im = f.readline().strip().split('\t') result.append(complex(float(r), float(im))) f.close() return np.array(result)
def send_file_with_oob_tcp(s, filename): sending_file = open(filename, 'rb') filesize = os.stat(filename).st_size oob_sent = 0 try: bytes_sent = int(s.recv(Constants.FILE_CHUNK_SIZE)) print "Already sent {0} / {1}".format(bytes_sent, filesize) except: print 'Lost Connection' return 0 sending_file.seek(int(bytes_sent), 0) while True: chunk = sending_file.read(Constants.FILE_CHUNK_SIZE) if not chunk: break try: s.settimeout(Constants.DEFAULT_TIMEOUT) s.send(chunk) except socket.error: print 'Transfer fail' return 0 bytes_sent += Constants.FILE_CHUNK_SIZE percent = int(float(bytes_sent) * 100 / float(filesize)) print "{0} / {1} Kb sent ({2}%)".format(Utils.to_kilobytes(bytes_sent), Utils.to_kilobytes(filesize), percent) sys.stdout.write('\033M') if (percent % 10 == 0) & (oob_sent != percent) & (percent < 91): oob_sent = percent sys.stdout.write('\033D') print '\033[37;1;41m Urgent flag sent at {0}% \033[0m'.format(percent) s.send(b'{}'.format(percent / 10), socket.MSG_OOB) sending_file.close()
def _on_ticker(self, msg): self.ticker.time = self._epoc_to_dt(msg['ts'] / 1000) data = msg.get('tick') self.ticker.last = float(data.get('close', self.ticker.last))
def removeSmallValues(): for key in list(datW.keys()): if float(datW[key]) < 0.0001: datW.pop(key) # print(str(datW[key])) return datW
def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision")
def run_many(engine1_name,engine2_name,folder,times,timeout): os.mkdir(folder) white = engine1_name black = engine2_name for game_idx in range(times): run_game(white, black, folder, game_idx,timeout) t = black black = white white = t if __name__ == "__main__": assert len(sys.argv) == 7, "needs 6 command line arguments, the output folder and the names of the two engines, starttime, inctime, timeoutmax " outfold_name = sys.argv[1] eng1_name = sys.argv[2] eng2_name = sys.argv[3] starttime = sys.argv[4] inctime = sys.argv[5] timeout_max = float(sys.argv[6]) eng1_name = eng1_name eng2_name = eng2_name run_many(eng1_name,eng2_name,outfold_name,50,timeout_max)
def draw_pipes(pipes, cap=2, fit=1.0, **kwargs): """Draw pipes and optionally set individual name, color, and layer properties. Parameters ---------- pipes : list of dict A list of pipe dictionaries. Other Parameters ---------------- cap : {0, 1, 2}, optional fit : float, optional Returns ------- list of GUID Notes ----- A pipe dict has the following schema: .. code-block:: python Schema({ 'points': And(list, lambda x: all(len(y) == 3 for y in x)), 'radius': And(Or(int, float), lambda x: x > 0.0), Optional('name', default=''): str, Optional('color', default=None): And(lambda x: len(x) == 3, all(0 <= y <= 255 for y in x)), Optional('layer', default=None): str, }) """ guids = [] abs_tol = TOL ang_tol = sc.doc.ModelAngleToleranceRadians for p in pipes: points = p['points'] radius = p['radius'] name = p.get('name', '') color = p.get('color') layer = p.get('layer') params = [0.0, 1.0] cap = ToObject(PipeCapMode, cap) if type(radius) in (int, float): radius = [radius] * 2 radius = [float(r) for r in radius] rail = Curve.CreateControlPointCurve([Point3d(*xyz) for xyz in points]) breps = Brep.CreatePipe(rail, params, radius, 1, cap, fit, abs_tol, ang_tol) temp = [add_brep(brep) for brep in breps] for guid in temp: if not guid: continue obj = find_object(guid) if not obj: continue attr = obj.Attributes if color: attr.ObjectColor = FromArgb(*color) attr.ColorSource = ColorFromObject else: attr.ColorSource = ColorFromLayer if layer and find_layer_by_fullpath: index = find_layer_by_fullpath(layer, True) if index >= 0: attr.LayerIndex = index attr.Name = name attr.WireDensity = -1 obj.CommitChanges() guids.append(guid) return guids
def findRadius(frame, window_x, window_y, frame_no): """Function to find radius of the detected ball""" # Parameters to find radius of the ball THRESHOLD_brightness = 75 MAX_INTENSITY = 255 MIN_INTENSITY = 0 # START_RADIUS = 21.8 #151 # FINISH_RADIUS = 7.2 #223 # PITCH_DIST = 16 # Apply CLAHE algo on frame to improve contrast clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(2,2)) frame = clahe.apply(frame) blurredFrame = cv2.GaussianBlur(frame,(5,5),0) # cv2.imshow("Blurred Frame", blurredFrame) height, width = blurredFrame.shape[:2] frame_center_x = width/2 frame_center_y = height/2 # Find average color of the entire frame avg_color_frame = float(np.sum(blurredFrame))/float(width*height) # Find average color around the blurred frame center avg_color_ball = 0.0 for dx in range(1,6): for dy in range(1,6): avg_color_ball += blurredFrame[frame_center_y+dy][frame_center_x+dx] avg_color_ball /= 25.0 # Checks if any points has to be rejected based on below params returnVal = True # If ball is too bright reject it if avg_color_ball > 120.0: THRESHOLD_brightness = 100.0 returnVal = False elif avg_color_ball > 100.0: THRESHOLD_brightness = 95.0 elif avg_color_ball > 80.0: THRESHOLD_brightness = min(90.0,avg_color_ball) elif avg_color_ball < 65.0: THRESHOLD_brightness = 65.0 else: THRESHOLD_brightness = 75.0 if avg_color_frame - avg_color_ball < 20: # print "Difference in colors = {} is too less. Reject.".format(avg_color_frame - avg_color_ball) returnVal = False for i in range(len(blurredFrame)): for j in range(len(blurredFrame[0])): if(blurredFrame[i][j]<THRESHOLD_brightness): blurredFrame[i][j]=MAX_INTENSITY else: blurredFrame[i][j]=MIN_INTENSITY # cv2.imshow("Tracked Ball",blurredFrame) _,contours,_ = cv2.findContours(blurredFrame,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) cv2.drawContours(blurredFrame, contours, -1, (255,0,0), 1) # cv2.imshow("Contours", blurredFrame) centre_X = 0 centre_Y = 0 radius = 0 min_diff = 100000 # Find contour closest to centre for contour in contours: (x,y),r = cv2.minEnclosingCircle(contour) diff = abs(x-25)+abs(y-25) if diff < min_diff: if r < 1: # print "Radius too low. Reject contour." continue # print "Found new min at: {},{} with r={}".format(x,y,r) centre_X = x centre_Y = y radius = r min_diff = diff elif diff == min_diff: if r > radius: centre_X = x centre_Y = y radius = r if min_diff > 20: # Selected contour is too far away from centre. Reject point returnVal = False if radius < 3.0: # Radius is too low, reject returnVal = False if len(contours) == 0: return False circleIndex = 0 for i,j in enumerate(contours): if(len(j)>len(contours[circleIndex])): circleIndex = i; # centre_X,centre_Y,radius = findAppropriateCircle(contours[circleIndex]) (centre_X,centre_Y),radius = cv2.minEnclosingCircle(contours[circleIndex]) cv2.circle(frame,(int(centre_X),int(centre_Y)), int(radius), (255,0,0), 2) if returnVal == False: rejected_radius.append((int(window_x+centre_X), int(window_y+centre_Y))) return False if DEBUG_VISUALIZE: cv2.imshow("Best Fit Circle",frame) Textlines.append((window_x+centre_X, window_y+centre_Y, radius, frame_no,0)) return True
xl = [x] #leaprog vl = v + a(x) * dt / 2 xe = [x] #euler ve = v xdif = [0] for t in ts: #leapfrog vt = v xl.append(xl[-1] + v * dt) v = v + a(xl[-1]) * dt #euler xe.append(xe[-1] + ve * dt) ve = ve + a(x) * dt #dif xdif.append(xe[-1] - xl[-1]) pylab.plot(ts, xl[:-1], '-', label='x(t) - LeapFrog') pylab.plot(ts, xe[:-1], '-', label='x(t) - Euler') pylab.plot(ts, xdif[:-1], '-', label='x(t) - diferenca') pylab.xlabel('t') pylab.ylabel('x') pylab.legend(loc=2) #for x,y1,e1,y2,e2,ys3,es3 in zip(xs,ys1,es1,ys2,es2,ys3,es3): # print x, y1, e1, y2, e2, ys3, es3 pylab.show() if __name__ == '__main__': import sys main(*[float(x) for x in sys.argv[1:]]) #(t0,tf,dt):
def yuchuli_data(x): x = x.split(",") x[:feature_num] = [float(i) for i in x[0:feature_num]] x[class_index] = iris_label_transfor(x[class_index]) # iris 使用 # x[class_index]=int(x[class_index]) #pokerhand 使用 return x
def RunSteps(api): # Figure out which repository to use. buildername = api.properties['buildername'] if '1k' in buildername: ct_page_type = '10k' num_pages = 1000 elif '10k' in buildername: ct_page_type = '10k' num_pages = 10000 elif '100k' in buildername: ct_page_type = '100k' num_pages = 100000 elif '1m' in buildername: ct_page_type = 'All' num_pages = 1000000 else: raise Exception('Do not recognise the buildername %s.' % buildername) # Figure out which tool to use. if 'DM' in buildername: skia_tool = 'dm' build_target = 'dm' elif 'BENCH' in buildername: skia_tool = 'nanobench' build_target = 'nanobench' elif 'IMG_DECODE' in buildername: skia_tool = 'get_images_from_skps' build_target = 'tools' else: raise Exception('Do not recognise the buildername %s.' % buildername) api.core.setup() api.flavor.compile(build_target) # Required paths. infrabots_dir = api.vars.skia_dir.join('infra', 'bots') isolate_dir = infrabots_dir.join('ct') isolate_path = isolate_dir.join(CT_SKPS_ISOLATE) api.run.copy_build_products( api.flavor.out_dir, isolate_dir) api.skia_swarming.setup( infrabots_dir.join('tools', 'luci-go'), swarming_rev='') skps_chromium_build = api.properties.get( 'skps_chromium_build', DEFAULT_SKPS_CHROMIUM_BUILD) # Set build properties to make finding SKPs convenient. webpage_rankings_link = ( 'https://storage.cloud.google.com/%s/csv/top-1m.csv' % api.ct.CT_GS_BUCKET) api.step.active_result.presentation.properties['Webpage rankings'] = ( webpage_rankings_link) download_skps_link = ( 'https://pantheon.corp.google.com/storage/browser/%s/swarming/skps/%s/%s/' % (api.ct.CT_GS_BUCKET, ct_page_type, skps_chromium_build)) api.step.active_result.presentation.properties['Download SKPs by rank'] = ( download_skps_link) # Delete swarming_temp_dir to ensure it starts from a clean slate. api.run.rmtree(api.skia_swarming.swarming_temp_dir) num_per_slave = api.properties.get( 'num_per_slave', min(TOOL_TO_DEFAULT_SKPS_PER_SLAVE[skia_tool], num_pages)) ct_num_slaves = api.properties.get( 'ct_num_slaves', int(math.ceil(float(num_pages) / num_per_slave))) # Try to figure out if the SKPs we are going to isolate already exist # locally by reading the SKPS_VERSION_FILE. download_skps = True expected_version_contents = { "chromium_build": skps_chromium_build, "page_type": ct_page_type, "num_slaves": ct_num_slaves, } skps_dir = api.vars.checkout_root.join('skps', buildername) version_file = skps_dir.join(SKPS_VERSION_FILE) if api.path.exists(version_file): # pragma: nocover version_file_contents = api.file.read_text( "Read %s" % version_file, version_file, test_data=expected_version_contents) actual_version_contents = api.json.loads(version_file_contents) differences = (set(expected_version_contents.items()) ^ set(actual_version_contents.items())) download_skps = len(differences) != 0 if download_skps: # Delete and recreate the skps dir. api.run.rmtree(skps_dir) api.file.ensure_directory( 'makedirs %s' % api.path.basename(skps_dir), skps_dir) # If a blacklist file exists then specify SKPs to be blacklisted. blacklists_dir = api.vars.skia_dir.join('infra', 'bots', 'ct', 'blacklists') blacklist_file = blacklists_dir.join( '%s_%s_%s.json' % (skia_tool, ct_page_type, skps_chromium_build)) blacklist_skps = [] if api.path.exists(blacklist_file): # pragma: nocover blacklist_file_contents = api.file.read_text( "Read %s" % blacklist_file, blacklist_file) blacklist_skps = api.json.loads(blacklist_file_contents)['blacklisted_skps'] for slave_num in range(1, ct_num_slaves + 1): if download_skps: # Download SKPs. api.ct.download_swarming_skps( ct_page_type, slave_num, skps_chromium_build, skps_dir, start_range=((slave_num-1)*num_per_slave) + 1, num_skps=num_per_slave) # Create this slave's isolated.gen.json file to use for batcharchiving. extra_variables = { 'SLAVE_NUM': str(slave_num), 'TOOL_NAME': skia_tool, 'GIT_HASH': api.vars.got_revision, 'CONFIGURATION': api.vars.configuration, 'BUILDER': buildername, } api.skia_swarming.create_isolated_gen_json( isolate_path, isolate_dir, 'linux', 'ct-%s-%s' % (skia_tool, slave_num), extra_variables, blacklist=blacklist_skps) if download_skps: # Since we had to download SKPs create an updated version file. api.file.write_text("Create %s" % version_file, version_file, api.json.dumps(expected_version_contents)) # Batcharchive everything on the isolate server for efficiency. max_slaves_to_batcharchive = MAX_SLAVES_TO_BATCHARCHIVE if '1m' in buildername: # Break up the "isolate tests" step into batches with <100k files due to # https://github.com/luci/luci-go/issues/9 max_slaves_to_batcharchive = 5 tasks_to_swarm_hashes = [] for slave_start_num in xrange(1, ct_num_slaves+1, max_slaves_to_batcharchive): m = min(max_slaves_to_batcharchive, ct_num_slaves) batcharchive_output = api.skia_swarming.batcharchive( targets=['ct-' + skia_tool + '-%s' % num for num in range( slave_start_num, slave_start_num + m)]) tasks_to_swarm_hashes.extend(batcharchive_output) # Sort the list to go through tasks in order. tasks_to_swarm_hashes.sort() # Trigger all swarming tasks. dimensions={'os': 'Ubuntu-14.04', 'cpu': 'x86-64', 'pool': 'Chrome'} if 'GPU' in buildername: dimensions['gpu'] = '10de:104a' tasks = api.skia_swarming.trigger_swarming_tasks( tasks_to_swarm_hashes, dimensions=dimensions, io_timeout=40*60) # Now collect all tasks. env = {'AWS_CREDENTIAL_FILE': None, 'BOTO_CONFIG': None} failed_tasks = [] for task in tasks: try: api.skia_swarming.collect_swarming_task(task) if skia_tool == 'nanobench': output_dir = api.skia_swarming.tasks_output_dir.join( task.title).join('0') utc = api.time.utcnow() gs_dest_dir = 'ct/%s/%d/%02d/%02d/%02d/' % ( ct_page_type, utc.year, utc.month, utc.day, utc.hour) for json_output in api.file.listdir( 'listdir output dir', output_dir, test_data=['file 1', 'file 2']): with api.context(env=env): api.gsutil.upload( name='upload json output', source=json_output, bucket='skia-perf', dest=gs_dest_dir, args=['-R'] ) except api.step.StepFailure as e: # Add SKP links for convenience. api.step.active_result.presentation.links['Webpage rankings'] = ( webpage_rankings_link) api.step.active_result.presentation.links['Download SKPs by rank'] = ( download_skps_link) failed_tasks.append(e) if failed_tasks: raise api.step.StepFailure( 'Failed steps: %s' % ', '.join([f.name for f in failed_tasks]))
''' https://stackoverflow.com/questions/13081178/whats-the-difference-on-docstrings-with-triple-single-quotes-and-triple-double AcWing 125. 耍杂技的牛python3 原题链接 中等 作者: xanxus1111 , 2020-07-12 19:57:52 , 阅读 27 ''' if __name__ == '__main__': n = int(input()) q = [] for i in range(n): w,s = map(int,input().split()) q.append([w+s,w,s]) q.sort() res = -float('inf') sum = 0 for i in range(n): res = max(res,sum - q[i][2]) sum += q[i][1] print(res) ''' 作者:xanxus1111 链接:https://www.acwing.com/solution/content/16201/ 来源:AcWing 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。 '''
def set_injector_size(self, cc): inj_hex = hex(int((450/float(cc))*74)) offset = config.offset_injector_compensation self.set_value(offset, inj_hex)
from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import load_iris def iris_label_transfor(s): if s == "Iris-setosa": return 0 elif s == "Iris-versicolor": return 1 elif s == "Iris-virginica": return 2 float_feature = lambda x: [float(i) for i in x] def yuchuli_data(x): x = x.split(",") x[:feature_num] = [float(i) for i in x[0:feature_num]] x[class_index] = iris_label_transfor(x[class_index]) # iris 使用 # x[class_index]=int(x[class_index]) #pokerhand 使用 return x # 软最大化函数 ruanzuida = lambda x: np.exp(x) / sum((np.exp(x))) def enry(x):
def _train(self): if not self.use_tqdm: from .utils import _pseudo_tqdm as inner_tqdm else: inner_tqdm = tqdm start = time.time() with inner_tqdm(total=self.n_steps, postfix='loss:{0:<6.5f}', leave=False, dynamic_ncols=True) as pbar: self.pbar = pbar avg_loss = 0 self.batch_per_epoch = self.data_iterator.num_batches for epoch in range(self.epoch, self.n_epochs + 1): self.epoch = epoch pbar.set_description_str(desc="Epoch {}/{}".format(epoch, self.n_epochs)) # early stopping self.callback_manager.on_epoch_begin() for batch_x, batch_y in self.data_iterator: self.step += 1 _move_dict_value_to_device(batch_x, batch_y, device=self._model_device) indices = self.data_iterator.get_batch_indices() # negative sampling; replace unknown; re-weight batch_y self.callback_manager.on_batch_begin(batch_x, batch_y, indices) prediction = self._data_forward(self.model, batch_x) # edit prediction self.callback_manager.on_loss_begin(batch_y, prediction) loss = self._compute_loss(prediction, batch_y).mean() loss = loss / self.update_every avg_loss += loss.item() # Is loss NaN or inf? requires_grad = False self.callback_manager.on_backward_begin(loss) self._grad_backward(loss) self.callback_manager.on_backward_end() self._update() self.callback_manager.on_step_end() if self.step % self.print_every == 0: avg_loss = float(avg_loss) / self.print_every if self.use_tqdm: print_output = "loss:{:<6.5f}".format(avg_loss) pbar.update(self.print_every) else: end = time.time() diff = timedelta(seconds=round(end - start)) print_output = "[epoch: {:>3} step: {:>4}] train loss: {:>4.6} time: {}".format( epoch, self.step, avg_loss, diff) pbar.set_postfix_str(print_output) avg_loss = 0 self.callback_manager.on_batch_end() if ((self.validate_every > 0 and self.step % self.validate_every == 0) or (self.validate_every < 0 and self.step % len(self.data_iterator) == 0)) \ and self.dev_data is not None: eval_res = self._do_validation(epoch=epoch, step=self.step) eval_str = "Evaluation on dev at Epoch {}/{}. Step:{}/{}: ".format(epoch, self.n_epochs, self.step, self.n_steps) # pbar.write(eval_str + '\n') self.logger.info(eval_str) self.logger.info(self.tester._format_eval_results(eval_res)+'\n') # ================= mini-batch end ==================== # # lr decay; early stopping self.callback_manager.on_epoch_end() # =============== epochs end =================== # pbar.close() self.pbar = None
def get_cpu_temparature(): process = Popen(['vcgencmd', 'measure_temp'], stdout=PIPE) output, error = process.communicate() return float(output[output.index('=') + 1:output.rindex("'")])
return out rnn = RNN() print(rnn) optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted # training and testing for epoch in range(EPOCH): for step, (b_x, b_y) in enumerate(train_loader): # gives batch data b_x = b_x.view(-1, 1, 14500) # reshape x to (batch, time_step, input_size) output = rnn(b_x) # rnn output loss = loss_func(output, b_y) # cross entropy loss optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if step % 50 == 0: test_output = rnn(test_x) # (samples, time_step, input_size) pred_y = torch.max(test_output, 1)[1].data.numpy() accuracy = float((pred_y == test_y).astype(int).sum()) / float(test_y.size) print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy) # print 10 predictions from test data test_output = rnn(test_x[:10].view(-1, 28, 28)) pred_y = torch.max(test_output, 1)[1].data.numpy() print(pred_y, 'prediction number') print(test_y[:10], 'real number')
X = int(input()) Y = float(input()) print('{:.3f} km/l'.format(X/Y))
def __init__(self, time_: int, bid_, ask_, seekInFile_): self.rateTime = int(time_) self.rateBid = float(bid_) self.rateAsk = float(ask_) self.seekInFile = int(seekInFile_)
rad=float(input("Enter the radious:")) area=3.14*rad*rad print("Area="+str(area)) circumtance=2*3.14*rad print("Circumtance:="+str(circumtance))
def get_injector_size(self): offset = config.offset_injector_compensation inj_hex = self.get_value(offset) return int(450/(float(long(inj_hex, 16))/74))
for i in range(len(row)): key = row[i].replace(' ',"_") key = key.strip() if i == 8: key = 'latitude' if i == 9: key = 'longitude' if i == 11: continue val = x[i].strip() try: val = int(val) except : val = float(val) except ValueError: val = val finalObj[id][key.lower()] = val print(finalObj['1013351']) # for i in finalCsv: # x = i.split(',') # requests.put(url+x[0],data={before:x[2],after:x[3]}) # pull each id,before image and after from df # for each data item do a put request with the id as the param id
def divide(request,a,b): if float(b) == 0: return HttpResponse('0 cannot be used as divisor') #判断0不能为除数 else: k = float(a) / float(b) return HttpResponse(str(k))
pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', -1) for filename in all_files: dfi = pd.read_csv(filename, index_col=None, header=0) dfi = dfi.sort_values('mse') selectedi = dfi.head(1) li.append(dfi) df = pd.concat(li, axis=0, ignore_index=True) if args.abc: dfa = df[df['formulas'].str.contains("_A")] dfb = dfa[dfa['formulas'].str.contains("_B")] df = dfb[dfb['formulas'].str.contains("_C")] df = df.sort_values('mse') selected = df.head(args.n) previousvalue = float("inf") for f in selected[["formulas", "mse"]].values: rmse = math.sqrt(f[1]) if rmse != previousvalue: print(f[0], rmse) previousvalue = rmse
def subtract(Request,a,b): w = float(a) - float(b) return HttpResponse((w))
distances.append(distance) distances = np.array(distances) #print distances t = np.argmin(distances) return t k_means = K_means_classifier(10) all_data = np.vstack((train_image, test_image)) all_data = pca(all_data, topNfeat = 700) train_image = all_data[:60000] test_image = all_data[60000:] k_means.train(train_image, train_label) #print k_means.central_point cnt = 0 for i in range(1, num_test): t = k_means.predict(test_image[i]) if t == test_label[i]: cnt += 1 if i % 1000 == 0: print i print float(cnt) / i log.write('test size = ' + str(i) + ' test accuracy: ') log.write(str(float(cnt) / i)) log.write('\n') print float(cnt) / i
def trunc(f, n): # Truncates/pads a float f to n decimal places without rounding slen = len('%.*f' % (n, f)) return float(str(f)[:slen])
def multiply(Request,a,b): h = float(a) * float(b) return HttpResponse(str(h))
#LDA trainData = addLabels(data, trainLabels) ev = ldaTransform(trainData) trainData = trainData[:, :-1] trainData = project(trainData, ev) testData = project(testData, ev) trainData = addLabels(trainData, trainLabels) testData = addLabels(testData, testLabels) C0 = trainData[trainData[:, -1] == -1] C1 = trainData[trainData[:, -1] == 1] V0 = findVariance(C0[:, :-1]) V1 = findVariance(C1[:, :-1]) M0 = findMean(C0[:, :-1]) M1 = findMean(C1[:, :-1]) pr0 = float(C0.shape[0]) / float(trainData.shape[0]) pr1 = float(C1.shape[0]) / float(trainData.shape[0]) L = math.pow(10, -323) MAX = -math.pow(10, 300) #Testing phase totalValues = testData.shape[0] myPrediction = np.zeros([totalValues]) j = 0 for i in xrange(0, totalValues): sample = testData[i, :] sample = sample.tolist() ans0 = math.log(float(pr0)) ans1 = math.log(float(pr1)) count = 0 g1 = gaussian(sample[j], M0, V0)
def add2(Request,a,b): c = float(a) + float(b) return HttpResponse(str(c))
def get_phrg_production_rules (argmnts): args = argmnts t_start = time.time() df = tdf.Pandas_DataFrame_From_Edgelist(args['orig'])[0] if df.shape[1] == 4: G = nx.from_pandas_dataframe(df, 'src', 'trg', edge_attr=True) # whole graph elif df.shape[1] == 3: G = nx.from_pandas_dataframe(df, 'src', 'trg', ['ts']) # whole graph else: G = nx.from_pandas_dataframe(df, 'src', 'trg') G.name = graph_name(args['orig'][0]) print "==> read in graph took: {} seconds".format(time.time() - t_start) G.remove_edges_from(G.selfloop_edges()) giant_nodes = max(nx.connected_component_subgraphs(G), key=len) G = nx.subgraph(G, giant_nodes) num_nodes = G.number_of_nodes() phrg.graph_checks(G) if DBG: print if DBG: print "--------------------" if not DBG: print "-Tree Decomposition-" if DBG: print "--------------------" prod_rules = {} K = 2 n = 300 if num_nodes >= 500: print 'Grande' t_start = time.time() for Gprime in gs.rwr_sample(G, K, n): T = td.quickbb(Gprime) root = list(T)[0] T = td.make_rooted(T, root) T = phrg.binarize(T) root = list(T)[0] root, children = T # td.new_visit(T, G, prod_rules, TD) td.new_visit(T, G, prod_rules) Process(target=td.new_visit, args=(T, G, prod_rules,)).start() else: T = td.quickbb(G) root = list(T)[0] T = td.make_rooted(T, root) T = phrg.binarize(T) root = list(T)[0] root, children = T # td.new_visit(T, G, prod_rules, TD) td.new_visit(T, G, prod_rules) # print_treewidth(T) # TODO: needs to be fixed # exit() if DBG: print if DBG: print "--------------------" if DBG: print "- Production Rules -" if DBG: print "--------------------" for k in prod_rules.iterkeys(): if DBG: print k s = 0 for d in prod_rules[k]: s += prod_rules[k][d] for d in prod_rules[k]: prod_rules[k][d] = float(prod_rules[k][d]) / float( s) # normailization step to create probs not counts. if DBG: print '\t -> ', d, prod_rules[k][d] rules = [] id = 0 for k, v in prod_rules.iteritems(): sid = 0 for x in prod_rules[k]: rhs = re.findall("[^()]+", x) rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])) if DBG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]) sid += 1 id += 1 df = pd.DataFrame(rules) # pp.pprint(df.values.tolist()); exit() df.to_csv('../ProdRules/{}.tsv.phrg.prs'.format(G.name), header=False, index=False, sep="\t") if os.path.exists('../ProdRules/{}.tsv.phrg.prs'.format(G.name)): print 'Saved', '../ProdRules/{}.tsv.phrg.prs'.format(G.name) else: print "Trouble saving" print "-----------" print [type(x) for x in rules[0]] '''
print(len(dis1)) dis1.sort() print(dis1) temp = [] counts = math.ceil(len(dis1)*0.05) print(counts) for i in range(counts): temp.append(dis1[i]) print(temp) # 计算阈值 threshold = np.mean(temp) print(threshold) for i in range(len(C)): dis.iloc[i,i] = float('inf') # 利用阈值合并中心点 S = [] i = 0 for i in range(len(C)): point = [] index = [] point.append(C[i]) index.append(i) for j in range(len(C)): if i!=j and dis.iloc[i,j]<threshold: point.append(C[j]) index.append(j) if len(index)>1: