def main(save_type, ignore, result_db, output, filter_email, ignore_file, projects): print('----------------config--------------') p(locals()) print('(>.<)') print('------------------------------------') if ignore_file: ignore = list( filter(lambda x: x, (l.strip().lower() for l in ignore_file.readlines()))) + list(ignore or []) if not projects: verify_func = lambda x: x.startswith(u'industry_') else: projects = list([x.lower() for x in projects]) verify_func = lambda x: x.lower() in projects core(save_type, result_db, ignore, verify_func=verify_func, output=output, filter_email=filter_email)
def findLadders(self, beginWord, endWord, wordlist): """ :type beginWord: str :type endWord: str :type wordlist: Set[str] :rtype: List[List[int]] """ trace = {} local_path = defaultdict(list) wordlist.add(endWord) queue = deque([beginWord]) count = 1 reach_end = False while queue: word = queue.popleft() count -= 1 for nw in self.word_gen(word, wordlist): local_path[nw].append(word) if nw == endWord: reach_end = True else: queue.append(nw) if count == 0: count = len(queue) trace.update(local_path) local_path = defaultdict(list) if reach_end: from pprint import pprint as p p(trace) #return self.build_trace(trace, beginWord, endWord) break return []
def mainFunction(arrayOfArguments): # p(arrayOfArguments) p("Searching for command '{}.py' (created by Creed)...".format( arrayOfArguments[1])) pathToRepos = myPyFunc.getPathUpFolderTree(pathToThisPythonFile, 'repos') def ifPythonFileToImport(fileObj): if fileObj.is_file() and fileObj.suffix == '.py': if fileObj.stem == arrayOfArguments[1]: return True return False pathToPythonFileForImport = myPyFunc.findFilePathBreadthFirst( pathToRepos, ifPythonFileToImport, pathsToExclude=[ str(Path(pathToRepos, 'privateData', 'python', 'dataFromStocks')), str(Path(pathToRepos, '.history')), str(Path(pathToRepos, '.vscode')), str(Path(pathToRepos, 'reposFromOthers')), '.git', 'node_modules' ]) # p(pathToPythonFileForImport) importedModuleSpec = importlib.util.spec_from_file_location( arrayOfArguments[1], pathToPythonFileForImport) importedModule = importlib.util.module_from_spec(importedModuleSpec) importedModuleSpec.loader.exec_module(importedModule) importedModule.mainFunction(arrayOfArguments[1:])
def validateAnswer(answer): if len(answer) == 0: p(1) return 'You must choose at least one topping.' else: p(2) return True
def fetchPrice(): public = 'CmmByW3DwxkXEEJ9kUWjAbpoMH37A99ZbOAP5R54arTOWmMxXoyEjNtY62PYIXmy' private = 'js7tj01HsBhi6uO8jOpcj09eLrDdaL1TQmVVE29T67K38DJoCRYDJr7rs3JBZfwH' #interval = 60 interval = 30 start_time = time.time() time_range = 5000 * interval BTC = 'BTCUSDT' client = Client(public, private) #klines = client.get_historical_klines(BTC, Client.KLINE_INTERVAL_1HOUR, str(time_range)+" hour ago UTC") klines = client.get_historical_klines(BTC, Client.KLINE_INTERVAL_30MINUTE, str(time_range) + " min ago UTC") p(klines) timeLen = len(klines) f = open("price.csv", 'w') for i in range(timeLen): f.write(str(klines[i][0]) + ",") f.write(klines[i][4]) f.write("\n") f.close() f = open("now_price.txt", 'w') f.write(klines[timeLen - 1][4]) f.close() print("--- %s seconds ---" % (time.time() - start_time))
def test_json_file(): with open('/tmp/wiki.json') as fd: cnt = 0 for line in fd: cnt += 1 if cnt > 100: break p(parse(line))
def GetCameraBrands(): """docstring for GetCameraBrands""" url = 'http://www.flickr.com/cameras/brands/' listCameraBrands = [] try: r = requests.get(url) except: print "Could not get url %s" %(url) return None else: soup = BeautifulSoup(''.join(r.text)) # Parse these tags div = soup.find("div", id="brands") table = div.find("table", {"id":"all-brands"}) rows = table.find_all("tr", recursive=False) print "Rows: %d" %(len(rows)) for tr in rows: camera_tags = tr.find_all("td") for tag in camera_tags: tag = str(tag) m = re.search(".*href=\"(?P<Url>.*)\">(?P<Brand>.*)</a>\n</td>",tag, re.MULTILINE) if m: listCameraBrands.append( CameraBrand(m.group('Brand'), m.group('Url') ) ) p(m.group('Brand')) # end if # end for # end try return listCameraBrands
def GetCameraModels(url): """docstring for GetCameraBrands""" listCameraModels = [] try: r = requests.get(url) except: print "Failed to get url %s" % (url) else: soup = BeautifulSoup("".join(r.text)) div = soup.find("div", id="models") table = div.find("table", {"id": "all-cameras"}) rows = table.findAll("tr", recursive=False) for tr in rows: camera_tags = tr.find_all("td") # Parse these tags for tag in camera_tags: tag = str(tag) m = re.match('.*\n.*href="(?P<Url>.*)">(?P<Model>.*)</a>', tag) if m: listCameraModels.append(CameraModel(m.group("Model"), m.group("Url")).__dict__) p(m.group("Model")) # end if # end for # end try return listCameraModels
def view(interpreter): ''' Pretty print the interpreter to stdout. ''' from pprint import pprint as p p(interpreter) return interpreter
def sifuApp(appName): userName = current_user.name templateDir = "app/" + appName + "/" baseAppDir = "templates/" + templateDir print("Trying to access app: " + baseAppDir + " / " + str(userName)) try: appSettings = utils.loadYamlFile(baseAppDir + "/settings.yaml") print("App Settings:") p(appSettings) canUseApp = core.isUserInList(userName, appSettings.get("allow", []), db) if not canUseApp: abort(404) # Are we trying to get a file? fileName = request.args.get("file") allVars = utils.requestToDict(request) if fileName: return send_from_directory(baseAppDir, fileName) isAdmin = db.isUserAdmin(userName) return render_template(templateDir + "index.html", user=userName, isAdmin=isAdmin, allVars=allVars) except: abort(404) return "Go somewhere else!"
def test_json_file(): with open('/tmp/wiki.json') as fd: cnt = 0 for line in fd: cnt += 1 if cnt > 100:break p (parse(line))
def main(): json_string = ''' { "key1": "val1", "boolkey": false, "boolkey2": true, "listkey": [1,2,3], "nest_listkey": [ {"key1": "val1"}, {"key2": "val2"} ], "nest_obj": { "key1": "val1", "key2": [4,5,6] }, "null_key": null } ''' #json_string = '[1.3242353453, "2", 3]' #json_string = '[1,2,"3"]' p( parse(json_string) ) return import time st = time.time() for i in range(10000): parse(json_string) print (time.time() - st) st = time.time() for i in range(10000): json.loads(json_string) print (time.time() - st)
def GetCameraModels(url): """docstring for GetCameraBrands""" listCameraModels = [] try: r = requests.get(url) except: print "Failed to get url %s" %(url) else: soup = BeautifulSoup(''.join(r.text)) div = soup.find("div", id="models") table = div.find("table", {"id":"all-cameras"}) rows = table.findAll("tr", recursive=False) for tr in rows: camera_tags = tr.find_all("td") # Parse these tags for tag in camera_tags: tag = str(tag) m = re.match(".*\n.*href=\"(?P<Url>.*)\">(?P<Model>.*)</a>",tag) if m: listCameraModels.append( CameraModel(m.group('Model'), m.group('Url') ).__dict__ ) p(m.group('Model')) # end if # end for # end try return listCameraModels
def performMouseActions(): # pyautogui.displayMousePosition() # coordinatesOfTextMessage = [1056, 737] # coordinatesOfArchiveButton = [1056, 90] # pyautogui.mouseDown(x=coordinatesOfTextMessage[0], y=coordinatesOfTextMessage[1], button='left') # time.sleep(.75) # pyautogui.mouseUp(x=coordinatesOfTextMessage[0], y=coordinatesOfTextMessage[1], button='left') # pyautogui.click(coordinatesOfArchiveButton[0], coordinatesOfArchiveButton[1]) # time.sleep(2) for iterationCount in range(500): startingDragPosition = [770, 740] endingDragPosition = [1200, 740] xDrag = endingDragPosition[0] - startingDragPosition[0] yDrag = endingDragPosition[1] - startingDragPosition[1] pyautogui.mouseDown(startingDragPosition[0], startingDragPosition[1]) # time.sleep(.75) pyautogui.moveTo(endingDragPosition[0], endingDragPosition[1], duration=.2) pyautogui.mouseUp(endingDragPosition[0], endingDragPosition[1]) # time.sleep(.1) pyautogui.moveTo(startingDragPosition[0], startingDragPosition[1]) time.sleep(1) p(iterationCount)
def getArrayOfFileObjFromDir(dirToAdd, pathsToExclude): arrayOfFileObjInDir = [] def fileObjHasPathToExclude(fileObj, pathsToExclude): for pathToExclude in pathsToExclude: if str(pathToExclude) in str(fileObj): return True return False try: dirToAddArray = os.listdir(dirToAdd) except: p('Can\'t add fileObj\'s from ' + str(dirToAdd)) dirToAddArray = [] dirToAddArray = [ Path(dirToAdd, fileObjName) for fileObjName in dirToAddArray ] for fileObjInDirToAdd in dirToAddArray: if not fileObjHasPathToExclude(fileObjInDirToAdd, pathsToExclude): arrayOfFileObjInDir.append(fileObjInDirToAdd) return arrayOfFileObjInDir
def rowForMatchedArrayOnAmountDate(gpArrayCurrentRow): rowToReturn = gpArrayCurrentRow dateComparisonFunction = myPyFunc.getColumnComparisonFunction( gpDateStrColIdx, bankDteStrColIdx) rowIndicesThatMatch = myPyFunc.rowIndicesInSecondFromTestsOnFirst( [amountComparisonFunction, dateComparisonFunction], gpArrayCurrentRow, bankArray) if len(rowIndicesThatMatch) == 1: filterFieldsForMatchStatus = myPyFunc.getFilterByIndexFunction( [gpNewAmtColIdx, gpDateStrColIdx]) rowToReturn.extend([ myPyFunc.getMatchStatus( myPyFunc.filterArray(filterFieldsForMatchStatus, gpArrayFirstRow)) ] + bankArray.pop(rowIndicesThatMatch[0])) elif len(rowIndicesThatMatch) > 1: p('More than one row matches on the first pass') for rowIndexThatMatches in rowIndicesThatMatch: p(bankArray[rowIndexThatMatches]) return rowToReturn
def main(): json_string = ''' { "key1": "val1", "boolkey": false, "boolkey2": true, "listkey": [1,2,3], "nest_listkey": [ {"key1": "val1"}, {"key2": "val2"} ], "nest_obj": { "key1": "val1", "key2": [4,5,6] }, "null_key": null } ''' #json_string = '[1.3242353453, "2", 3]' #json_string = '[1,2,"3"]' p(parse(json_string)) return import time st = time.time() for i in range(10000): parse(json_string) print(time.time() - st) st = time.time() for i in range(10000): json.loads(json_string) print(time.time() - st)
def main(): boards = s.import_file() boards = [s.import_string(board) for board in boards] def coord_board(): board[2][0] = 7 board[0][1] = 8 board[1][1] = 5 board[2][1] = 3 board[4][0] = 6 board[5][0] = 1 board[4][2] = 9 board[7][0] = 8 board[6][1] = 9 board[7][1] = 1 board[8][2] = 5 coords = (1,3,4), (2,3,1), (2,4,3), (0,5,6), (2,5,2), \ (3,4,2), (4,4,5), (5,4,4), \ (6,3,8), (8,3,3), (7,4,6), (6,5,5), (7,5,4), \ (0,6,1), (1,7,8), (2,7,4), (1,8,2), \ (4,6,4), (3,8,6), (4,8,1), \ (6,7,6), (7,7,5), (8,7,1), (7,8,9) for coord in coords: s.set_sq(board, coord[:2], coord[2]) for board in boards: poss = s.return_ledger(board) s.poss_grid(board, poss) # s.constraints(board, poss) s.constraints(board, poss) s.constraints(board, poss) s.constraints(board, poss) p(board)
def GetCameraBrands(): """docstring for GetCameraBrands""" url = "http://www.flickr.com/cameras/brands/" listCameraBrands = [] try: r = requests.get(url) except: print "Could not get url %s" % (url) return None else: soup = BeautifulSoup("".join(r.text)) # Parse these tags div = soup.find("div", id="brands") table = div.find("table", {"id": "all-brands"}) rows = table.find_all("tr", recursive=False) print "Rows: %d" % (len(rows)) for tr in rows: camera_tags = tr.find_all("td") for tag in camera_tags: tag = str(tag) m = re.search('.*href="(?P<Url>.*)">(?P<Brand>.*)</a>\n</td>', tag, re.MULTILINE) if m: listCameraBrands.append(CameraBrand(m.group("Brand"), m.group("Url"))) p(m.group("Brand")) # end if # end for # end try return listCameraBrands
def predict_dict(self, images): """ images: num_batch, img_size, img_size, 3 Preprocessed to range [-1, 1] Runs the model with images. """ plt.imshow(images.reshape(224, 224, 3)) plt.show() plt.close() print("images.shape is {0}".format( images.shape)) # images.shape is (1, 224, 224, 3) feed_dict = { self.images_pl: images, # self.theta0_pl: self.mean_var, } fetch_dict = { 'joints': self.all_kps[ -1], # maybe just don't fetch this "joints" variable, and instead turn absl.config.json_path into the old format HMR requests. 'verts': self.all_verts[-1], 'cams': self.all_cams[-1], 'joints3d': self.all_Js[-1], 'theta': self.final_thetas[-1], } print("fetch_dict is :") p(fetch_dict) ''' with open('tf_sess.pkl', 'wb') as f: pkl.dump(self.sess, f) # I think what's happening is it's impossible to pickle a class inside of a different module than the name of that class. ''' summ_writer = tf.summary.FileWriter(os.path.join('summaries', 'first'), self.sess.graph) results = self.sess.run( fetch_dict, feed_dict ) # This "(self.sess.run(fetch_dict, feed_dict))" must be called before we can get the betas out of smpl? # the real question is: CAN we even get the betas out? predict_dict() predicts the values of joints, verts, cams, joints3d, and theta just fine, but can we relearn the betas from these values? """ 'joints': self.all_kps[-1], 'verts': self.all_verts[-1], 'cams': self.all_cams[-1], 'joints3d': self.all_Js[-1], 'theta': self.final_thetas[-1], """ # Return joints in original image space. # (ie. we ran openpose on the 224x224 img, then resized them to the original image's size.) # -nxb, Fri Mar 29 11:21:27 EDT 2019 joints = results['joints'] joints = ((joints + 1) * 0.5) * self.img_size print("joints.shape:", joints.shape) # (1,19,2) plt.scatter(joints[:, :, 0], joints[:, :, 1]) plt.show() plt.close() results['joints'] = joints return results
def fix_strings(province, city): pattern = '\s' adj_prov = re.sub(pattern, '-', province) adj_city = re.sub(pattern, '-', city) url = "https://www.theweathernetwork.com/ca/weather/" + adj_prov.lower( ) + "/" + adj_city.lower() p(url) return url
def test_atingimento_ge(self): """ Tests that 1 + 1 always equals 2. """ qs = VwFIM.atingimento_ge(produto=u'PÓS PURO') p(qs.values_list('grupo_economico', 'gross', 'mes', 'dimensao_grupo')) self.failUnlessEqual(1 + 1, 2)
def printTimeSinceImport(): currentTime = time.time() elapsedMinutesStr = str((currentTime - startTime) // 60).split('.')[0] elapsedSecondsStr = str(round((currentTime - startTime) % 60, 0)).split('.')[0] p('Split time: ' + elapsedMinutesStr + ' minutes and ' + elapsedSecondsStr + ' seconds.')
def wrapper(*args, **kwargs): now_time = str(time.strftime('%Y-%m-%d %X', time.localtime())) print('------------------------------------------------') print('%s %s called' % (now_time, func.__name__)) print('Document:%s' % func.__doc__) print('%s returns:' % func.__name__) re = func(*args, **kwargs) p(re) return re
def main(): args = sys.argv check_args(args, 1) account_id = args[1] msf = LibMtSalesForce() soql = "SELECT Id From Contact Where AccountId='%s'" % (account_id) response = msf.query(soql) p(response) return response
def main(): args = sys.argv check_args(args, 1) msf = LibMtSalesForce() soql = "SELECT Id,Name,MstDisease__c,Other__c FROM AccountDisease__c WHERE AccountDisease2Account__c='%s'" % ( args[1]) response = msf.query(soql) p(response) return response
def main(): args = sys.argv check_args(args, 1) msf = LibMtSalesForce() soql = "Select CreatedDate, AccountPoint2Account__c, Point_i__c, Comment_c__c FROM AccountPoint__c WHERE IsDeleted = FALSE AND AccountPoint2Account__c = '%s' AND CreatedDate >= 2018-04-01T00:00:00+09:00 order by CreatedDate" % ( args[1]) response = msf.query(soql) p(response) return response
def main(): args = sys.argv check_args(args, 1) msf = LibMtSalesForce() soql = "SELECT Id,Name,MstCategory__c,Other__c,IsPrimary__c FROM AccountCategory__c WHERE AccountCategory2Address__c='%s'" % ( args[1]) response = msf.query(soql) p(response) return response
def main(): args = sys.argv check_args(args, 1) msf = LibMtSalesForce() soql = "SELECT Id,AddressClass__c,EmpDepartment__c,MstWorkClassId__c,EmpName__c,EmpClass__c,Zip__c,Pref__c,City__c,Address__c,Tel__c,isShippingAddressOfMT__c,shisetsu_code__c,checkFlag__c FROM Address__c WHERE AccountId__c='%s'" % ( args[1]) response = msf.query(soql) p(response) return response
def insertion_sort(nums: List[int]) -> List[int]: """ 插入排序类型斗地主发牌时,将新的牌插入到已经有序的手牌中 优化算法是通过binary_search找到插入的索引 「希尔排序」是插入排序的更高效算法 由于二分/折半查找只是减少了比较的次数,插入元素时元素的移动也耗费O(n)的时间,所以时间复杂度跟冒泡排序一样 平均O(n^2),最好O(n),最坏O(n^2);稳定排序 Worst Case: 入参是反序的 FIXME 我写的二分插入有点问题,还是用不加二分查找的原始插入排序更好 """ def binary_search(nums: List[int], target: int) -> int: left: int = 0 right: int = len(nums) - 1 middle: int while left < right and right > 1: # 如果middle是(left+right) // 2 # 遇到([1, 2, 3], 4)的测试用例时会陷入死循环(left, right = 1, 2) middle = (left + right) // 2 if nums[middle] == target: print("nums[middle] == target") return middle elif nums[middle] > target: print("nums[middle] > target") right = min(middle, right - 1) else: print("nums[middle] < target") left = max(middle, left + 1) print(f"left, right = {left}, {right}") # 一般的二分查找找不到是返回return -1 # 这里我想模仿Rust的二分查找,无论找不找得到,都返回一个应当插入位置的索引 if nums[left] > target: return left else: return right length: int = len(nums) current_num: int for i in range(1, length): print() p('==' * 10) p((nums[:i], nums[i])) binary_search_index = binary_search(nums[:i], nums[i]) p(f"binary_search_index, i = {binary_search_index}, {i}") if binary_search_index < i and nums[binary_search_index] > nums[i]: current_num = nums[i] # 将比nums[i]更大的元素往右移一格 for j in range(i, binary_search_index, -1): nums[j], nums[j - 1] = nums[j - 1], nums[j] nums[binary_search_index] = current_num # List[int]不需要pretty print p(nums) p('==' * 10) print() return nums
def load_client(self): """set simple_salesforce instance """ if os.path.exists(self.client_instance_file) == False: p("[Log]]autehntication from salesforce") self.authenticate() self.load_client() else: with open(self.client_instance_file, 'rb') as f: p("[Log]autehntication from file") self.client = pickle.load(f)
def main(): sol = Solution() board = [ ['X', 'X', 'X', 'X'], ['X', 'O', 'O', 'X'], ['X', 'X', 'O', 'X'], ['X', 'O', 'X', 'X'], ] sol.solve(board) from pprint import pprint as p p(board)
def addFileToAcrobatOCRList(fileObj): while not myPyAutoGui.locateOnScreenLocal( 'addFilesButton', pathToThisPythonFile. parents[0]) and not myPyAutoGui.locateOnScreenLocal( 'acrobatStartScreen', pathToThisPythonFile.parents[0]): p('Waiting for Acrobat to appear...') if myPyAutoGui.locateOnScreenLocal('addFilesButton', pathToThisPythonFile.parents[0]): myPyAutoGui.clickWhenLocalPNGAppears('addFilesButton', pathToThisPythonFile.parents[0]) elif myPyAutoGui.locateOnScreenLocal('acrobatStartScreen', pathToThisPythonFile.parents[0]): pyautogui.press(['alt', 'f', 'w', 'down', 'down', 'enter']) myPyAutoGui.clickWhenLocalPNGAppears('addFilesButton', pathToThisPythonFile.parents[0]) pyautogui.press('f') myPyAutoGui.getCoordinatesWhenLocalPNGAppears( 'filenameBoxReady', pathToThisPythonFile.parents[0]) pyperclip.copy(str(fileObj)) if platform.system() == 'Darwin': pyautogui.hotkey('command', 'v') else: pyautogui.hotkey('ctrl', 'v') # def type_unicode(word): # for c in word: # c = '%04x' % ord(c) # pyautogui.keyDown('optionleft') # pyautogui.typewrite(c) # pyautogui.keyUp('optionleft') # import pyautogui as px # def type_unicode(word): # for char in word: # num = hex(ord(char)) # px.hotkey('ctrl', 'shift', 'u') # for n in num: # px.typewrite(n) # px.typewrite('\n') pyautogui.press('enter') myPyAutoGui.waitUntilLocalPNGDisappears('addFilesDialogBox', pathToThisPythonFile.parents[0])
def writeXML(fileToCreateStr, root): try: os.remove(fileToCreateStr) except OSError: pass root.getroottree().write(fileToCreateStr, pretty_print=True, xml_declaration=True, encoding='utf-8') p('File complete: ' + fileToCreateStr + ' with length: ' + str(len(root)))
def main(): """ SOQLのテスト実行用。データの確認もこれが手っ取りばやい """ msf = LibMtSalesForce() # soql = "SELECT Name FROM CodeMaster__c WHERE Id='%s'" %(id) # soql = "SELECT Name, CodeKind__c FROM CodeMaster__c" soql = "SELECT id FROM AccountSymptom__c" response = msf.query(soql) p(response) return response
def main(inputs): '''this is basicly a loop over all the pools to do the calculations''' print "\n" # this the core loop do the calc for every month in a given timespan (year) for every pool data_list =[] for year in range(2000,2030): for month in range(1,13): for pool in pool_list: # call the calulations function to do the math data_list = calculations(year,pool,soil, month, data_list, inputs[0], inputs[1], inputs[2]) # plot everything if not FIT: plot(data_list) pool_dict = { "bio":bio.get_pool_size(), "hum":hum.get_pool_size(), "dpm":dpm.get_pool_size(), "rpm":rpm.get_pool_size(), "co2":co2.get_pool_size(), } '''make and two array to calulate the mean squared errors''' wanted_vals = np.array([init_pool_size["BIO"],init_pool_size["HUM"],init_pool_size["DPM"],init_pool_size["RPM"], abs(init_pool_size["CO2"])]) wanted_vals = wanted_vals.reshape(1,-1) vals_got = np.array([pool_dict["bio"], pool_dict["hum"], pool_dict["dpm"],pool_dict["rpm"], abs(pool_dict["co2"])]) vals_got = vals_got.reshape(1, -1) if NORMALIZE: wanted_vals = normalize(wanted_vals) vals_got = normalize(vals_got) "wanted array vs return array" print wanted_vals print vals_got '''calulate mse''' mse = (( wanted_vals - vals_got) ** 2).mean(axis=None) # with ax=None the average is performed element-wise along the array, returning a single value print "OC input", inputs[0], "dpm_rpm ratio", inputs[1] ,"bio hum factor", inputs[2], "\n" p(pool_dict) print "mean squared error", mse '''use the uncommented this in case you want to minimize just a single value ''' #return pool_dict["hum"] return mse
def parse_page(url, regex): resp = get_page(url) if resp.getcode() != 200: raise ParserException('cant load main page') text = str(resp.read()) p('loaded page {}, size = {}'.format(url_decode(url), len(text))) regex1 = regex[0] if isinstance(regex, tuple) else regex pages = re.findall(regex1, text) res = (True, pages) if len(pages) == 0 and isinstance(regex, tuple) and len(regex) > 1: pages = re.findall(regex[1], text) res = (False, pages) return res
def get_jinja_fields_and_colors(json_file=colorgen_json ): ''' Gets all fields from a theme json file for the A swatch''' # Reading in theme json data with open(json_file) as data_file: jqmt_rip = json.load(data_file) p(jqmt_rip) # Print out of json field_colors = {} # Dictionary of unique colors and names for k,v in jqmt_rip['themes'][0]['global'].iteritems(): field_colors["global_" + k] = v for k,v in jqmt_rip['themes'][0]['a'].iteritems(): for k2,v2 in v.iteritems(): field_colors['a_' + k + "_" + k2] = v2 return field_colors
def main(lat, temp_mean, precip_mean, max_tsmd): '''get evapotranspiration''' # taken from Richards pyeto, see doc in same folder for more informatios pyeto_lat = pyeto.deg2rad(lat) # converts the degree to radians mean_month_list = [temp_mean[i] for i in temp_mean] monthly_mean_daylight = pyeto.monthly_mean_daylight_hours(pyeto_lat) eto = pyeto.thornthwaite(mean_month_list, monthly_mean_daylight) '''make a "month_number" : "evapo" dic''' eto_dict = {} for n,i in enumerate(eto): eto_dict[n+1] = eto[n] print "\nWaterbalance with maximum deficientcy of", max_tsmd acc_TSMD = {} acc_factor = 0 budget = {} '''The following part is still a bit hacky. Two for loops that first calculates the the water buget for every mongth and then starts a second loop to substract the value of the month before from the current month ''' print "month \t rain \t eto \t\t TSMD" for month, rain, etom in zip(range(1,13),precip_mean, eto_dict): TSMD = precip_mean[rain] - ( eto_dict[etom] * 0.75 ) if TSMD <= max_tsmd: TSMD = max_tsmd print month,"\t", precip_mean[rain], "\t", eto_dict[etom],"\t", TSMD budget[month] = TSMD if TSMD < 0 else 0 for month, rain, etom in zip(range(1,13),precip_mean, eto_dict): TSMD = precip_mean[rain] - ( eto_dict[etom] * 0.75 ) if month == 1: acc_TSMD[month] = TSMD + budget[12] else: acc_TSMD[month] = TSMD + budget[month -1] if acc_TSMD[month] > 0 : acc_TSMD[month] = 0 if acc_TSMD[month] < max_tsmd: acc_TSMD[month] = max_tsmd print "\nWater budget" p(budget) print "" return acc_TSMD
def test(): server = Server(bulk_limit=201, limit=100) #db = server['stackoverflow_user'] db = server['stackoverflow_question'] db = db.get_view('rank', 'answer_rank', True) old_stdout = sys.stdout for i in db: out = StringIO() sys.stdout = out p(i) sys.stdout = old_stdout print out.getvalue() print print LOG.debug(out.getvalue())
def _update_index(): nons = [] for key in doc: if not doc[key]: nons.append(key) for key in nons: del doc[key] for key in doc.values(): if key in index: index[key].update(doc) for key2 in doc.values(): index[key2] = index[key] p (index[key]) return for key in doc.values(): index[key] = doc docs.append(doc)
def _cloudlb_request(self, url, method, **kwargs): if not self.region_account_url: self.authenticate() #TODO: Look over # Perform the request once. If we get a 401 back then it # might be because the auth token expired, so try to # re-authenticate and try again. If it still fails, bail. kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token kwargs['headers']['User-Agent'] = cloudlb.consts.USER_AGENT if 'body' in kwargs: kwargs['headers']['Content-Type'] = 'application/json' kwargs['body'] = json.dumps(kwargs['body']) ext = "" fullurl = "%s%s%s" % (self.region_account_url, url, ext) #DEBUGGING: if 'PYTHON_CLOUDB_DEBUG' in os.environ: sys.stderr.write("URL: %s" % (fullurl)) sys.stderr.write("ARGS: %s" % (str(kwargs))) sys.stderr.write("METHOD: %s" % (str(method))) if 'body' in kwargs: from pprint import pprint as p p("BODY: %s" % kwargs['body'], stream=sys.stderr) response, body = self.request(fullurl, method, **kwargs) if body: try: body = json.loads(body) except(ValueError): pass if (response.status < 200) or (response.status > 299): raise cloudlb.errors.ResponseError(response.status, response.reason) return response, body
def main(): c = int(raw_input()) n = int(raw_input()) agrmt = dd(set) for _ in xrange(n): a, b = map(lambda x: int(x) - 1, raw_input().split()) agrmt[a].add(b) agrmt[b].add(a) left_dp = [[False for _ in xrange(c)] for _ in xrange(c)] right_dp = [[False for _ in xrange(c)] for _ in xrange(c)] p(agrmt) for i in xrange(c): for j in xrange(i, c): if i == j: left_dp[i][j] = True right_dp[i][j] = True continue left_dp[i][j] = (left_dp[i][j - 1] and j in agrmt[j - 1]) or \ (right_dp[j - 1])
def test_rapport(data, filename): result = data array_0 = [] array_1 = [] array_2 = [] for (serie, size), time in result.items(): x = float(size) y = time / func_theoric(float(size)) if serie == 0: array_0.append((x,y)) elif serie == 1: array_1.append((x,y)) else: array_2.append((x,y)) array_0.sort(key=lambda x:x[0]) plt.plot([x for (x,y) in array_0], [y for (x,y) in array_0]) plt.savefig(filename + "0-9_rapport.png") plt.cla() plt.clf() plt.close() array_1.sort(key=lambda x:x[0]) p(array_1) plt.plot([x for (x,y) in array_1], [y for (x,y) in array_1]) plt.savefig(filename + "10-19_rapport.png") plt.cla() plt.clf() plt.close() array_2.sort(key=lambda x:x[0]) plt.plot([x for (x,y) in array_2], [y for (x,y) in array_2]) plt.savefig(filename + "20-29_rapport.png") plt.cla() plt.clf() plt.close()
def build(self): p(self._config['build'])
frames.extend(s.feed(f.read(400))) frames.extend(s.feed(f.read(800))) frames.extend(s.feed(f.read(1200))) frames.extend(s.feed(f.read(1600))) frames.extend(s.feed(f.read())) p(frames) #raw = ''.join(frame.data for frame in frames) #print s.last_frame #open(argv[1]+'.frames', 'wb').write(raw) ''' s = Segmenter(1000) res = [] res = s.feed(f.read(400)) p(res) res = s.feed(f.read(800)) p(res) res = s.feed(f.read(1200)) p(res) res = s.feed(f.read(2400)) p(res) res = s.feed(f.read(4800)) p(res) res = s.feed(f.read(4800)) p(res) res = s.feed(f.read(4800)) p(res) res = s.feed(f.read(4800)) p(res) res = s.feed(f.read(4800))
except KeyError: print 'Unable to obtain $SB_PIPE from the environment.' exit(-1) sys.path.append(os.path.join(sb_pipe, 'sb_pipe')) sys.path.append(os.path.join(sb_pipe, 'sb_pipe', 'pipelines')) sys.path.append(os.path.join(sb_pipe, 'sb_pipe', 'pipelines', 'create_project')) sys.path.append(os.path.join(sb_pipe, 'sb_pipe', 'pipelines', 'double_param_scan')) sys.path.append(os.path.join(sb_pipe, 'sb_pipe', 'pipelines', 'param_estim')) sys.path.append(os.path.join(sb_pipe, 'sb_pipe', 'pipelines', 'sensitivity')) sys.path.append(os.path.join(sb_pipe, 'sb_pipe', 'pipelines', 'simulate')) sys.path.append(os.path.join(sb_pipe, 'sb_pipe', 'pipelines', 'single_param_scan')) sys.path.append(os.path.join(sb_pipe, 'sb_pipe', 'utils', 'python')) from pprint import pprint as p p(sys.path) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax' ]
for tag in camera_tags: tag = str(tag) m = re.match('.*\n.*href="(?P<Url>.*)">(?P<Model>.*)</a>', tag) if m: listCameraModels.append(CameraModel(m.group("Model"), m.group("Url")).__dict__) p(m.group("Model")) # end if # end for # end try return listCameraModels if __name__ == "__main__": listCameraBrands = [] listCameraBrands = GetCameraBrands() print "Found %d brands\n" % len((listCameraBrands)) fdata = file("cameradata.yml", "w") # We should have a list of camera brands (with class CameraBrand) at this point. cameraDict = {} for brand in listCameraBrands: print "\nProcessing %s %s" % (brand.name, brand.url) time.sleep(1) # Dont' hammer the servers cameraDict[brand.name] = {"url": brand.url, "cameras": GetCameraModels(brand.url)} # end for # end for # yaml.dump(cameraDict, fdata) p(cameraDict) yaml.safe_dump(cameraDict, fdata) sys.exit(main())
first_url = 'http://www.prokerala.com/kids/baby-names/boy/page-'+str(counter)+'.html' second_url = 'http://www.prokerala.com/kids/baby-names/girl/page-'+str(counter)+'.html' file1 = urllib.urlopen(first_url) lines1 = file1.readlines() file1.close() file2 = urllib.urlopen(second_url) lines2 = file2.readlines() file2.close() for line1 in lines1: m1 = re.search("(nameDetails\">)([A-Z].*[a-z])<", line1) if m1: guys_names.append(m1.group(2)) for line2 in lines2: m2 = re.search("(nameDetails\">)([A-Z].*[a-z])<", line2) if m2: lady_names.append(m2.group(2)) counter += 1 p(guys_names) p(lady_names) for x in len(guys_names): List_name.Dolphins(random.choice(guys_names),dolphs, dolphs,random_sex) for x in len(lady_names): List_name.Dolphins(random.choice(lady_names),dolphs, dolphs,random_sex)
def list_tasks(): with jira_with_app_context() as j: assessment_tasks = j.get_assessment_tasks() for t in assessment_tasks: p(t)
def tasks_by_id(): with jira_with_app_context() as j: p(j.assessment_tasks_by_application_id())
def filter(self,index): rest = index % 4 if rest==0: return 'long_only' elif rest==1: return 'short_only' elif rest==2: return 'no_entry' elif rest==3: return 'long_and_short' if __name__=='__main__': _stock = Stock(1301, 't', 100) entry = MyEntry() entry.stock = _stock p( entry.check_long_entry(0) ) p( entry.check_long_entry(1) ) print p(entry.check_short_entry(0)) p(entry.check_short_entry(1)) print my_exit = MyExit() my_exit.stock = _stock trade1 = entry.check_long(0) my_exit.check_exit(trade1,1) p( trade1.entry_price ) p( trade1.exit_price ) print stop = MyStop()
def connect(): with jira_with_app_context() as j: si = j.generic_jira.jira.server_info() p(si)
#!/usr/bin/env python import scipy import pylab import csv import sys # WTH does this do? I try to import pprint and get error. # from pprint import pprint as p # Reading a csv file... OK, but it's not yet up to OnlineTable standards... with open("beer.tab") as tsv: for line in csv.reader(tsv, dialect="excel-tab"): print line[1] print "Script running under Python:" + sys.version # Supposedly anything in sys.path can be picked up as a module # what is sys.path? # print sys.path works fine p(sys.path) # prettier... a bit... # OK, try out my module... superlib.sayhi()
def syspath(): return p(sys.path)
'route_id' : route_cnt }) route_cnt += 1 urlhandle.close() if route == 0: return results if route > len(results): raise InvalidRouteNumber for res in results: if res['route_id'] != route: continue url = "http://journeyplanner.tfl.gov.uk/user/%s" % (res['url'].replace('&', '&')) req = urllib2.Request(url) req.add_header('Set-Cookie', cookie) r = urllib2.urlopen(req) return get_route(r) if __name__ == '__main__': origin_zipcode = "W13 8PH" destination_zipcode = "UB 111ET" results = get_journeys(origin_zipcode, destination_zipcode) # html = open("/tmp/a.html", 'r') # results = get_route(html) from pprint import pprint as p p(results)
def main(config): with open(config) as tf: bot_config = toml.loads(tf.read()) p(bot_config) mainLoop(bot_config)