def on_milist_activity(activity): if layout_content_find_text(driver, '系统维护中'): printf('播放视频失败,退出') driver.press_keycode(4) time.sleep(1) return False pre = el_find_by_select(driver) if pre == None: # 收藏 printf("Not select found") driver.press_keycode(20) time.sleep(1) driver.press_keycode(66) else: driver.press_keycode(22) time.sleep(1) post = el_find_by_select(driver) if pre == post: # last driver.press_keycode(20) time.sleep(1) pre = el_find_by_select(driver) if pre == post: driver.press_keycode(4) else: driver.press_keycode(66) time.sleep(1)
def _demo_update_status(): for api in APIs: t = Twitter(api) res = t.update_status('Test tweet posted to Twitter using %s' % api.title()) printf('Posted at: %s' % res.created_at) printf('----' * 10)
def precision_year(data_list, reward_list, L=628): reward_dict = {} topL_set = set() count = 1 year_to_num = {} each_year = {} for item in reward_list: if int(item[0]) not in reward_dict: reward_dict[int(item[0])] = int(item[1]) if item[1] in each_year.keys(): each_year[int(item[1])] += 1 else: each_year[int(item[1])] = 1 count = 1 for item in data_list: if count <= L: topL_set.add(int(item[0])) count += 1 for id in reward_dict: if id in topL_set: if reward_dict[id] in year_to_num: year_to_num[reward_dict[id]] += 1 else: year_to_num[reward_dict[id]] = 1 for year in year_to_num: year_to_num[year] = year_to_num[year] / L sort_year_to_num = sorted(year_to_num.items(), key=itemgetter(0)) printf("all year's precision is:") printf(sort_year_to_num)
def jsonWrite(self, filepath: str = None, filename: str = None, data: dict = {}) -> None: ''' jsonWrite will write data into a file named `filename` :param1: filepath, default output path where json result is :param2: filename, the filename without extension (default `.json`) :param3: data, a dict that packs data as corresponding json format which will be written This method will directly write result to the file path, no sepecific return value, must override this method when customized result format needed. ''' import json self.__filepath__ = filepath self.__filename__ = filename if not self.__fileCheck__(): self.__fileClean__() return fileLocation = self.__filepath__ + self.__filename__ + ".json" try: with open(fileLocation, "w") as wf: json.dump(data, wf) self.__fileClean__() except Exception as ex: self.__fileClean__() printf("*** Error: json data write failed! ***\n") printf("*** Error Message ***\n %s\n" % ex.message) return None
def do_all_rename(self, rename_func): for fname in os.listdir(self.path): fullname = os.path.join(self.path, fname) newname = rename_func(fname) newname = os.path.join(self.path, newname) printf(newname) os.rename(fullname, newname)
def antutu_scoretest(): printf('antutu_scoretest') while True: if driver.wait_activity( 'com.antutu.benchmark.activity.ScoreBenchActivity', 3) == False: return True
def time_distribution_new(data_list, time_list, limit=0.01, slices=30, top=267): max_year = 2015 min_year = 1995 slices = max_year - min_year + 1 slice_length = (len(time_list) // (slices)) data_length = len(time_list) distribution_dict = {} count = 0 for item in data_list: if count <= top: if item[3] in distribution_dict: distribution_dict[item[3]] += 1 else: distribution_dict[item[3]] = 1 count += 1 distribution_list = list(distribution_dict.values()) printf(distribution_dict) def rstd(data): #实际标准差 return np.std(data) def estd(N, S, z): #理想标准差 return math.sqrt(z * N / S * (1 - 1 / S) * (1 - z) * N / (N - 1)) #print(estd(data_length, slices, limit)) #print(rstd(distribution_list)) printf("balance: " + str(( (rstd(distribution_list) / estd(data_length, slices, limit)) - 1)))
def content(self, response): """ 详细页 :param response: :return: """ item = PictureItem() item['name'] = response.css(".content h5::text").extract_first() item['img_url'] = response.css(".content-pic img::attr(src)").extract() item['headers'] = {'Referer': response.url, 'User-Agent': UA.ch} store_path = response.url[21:-5] if store_path.find('_') > 0: store_path = store_path[:store_path.find('_')] item['store_path'] = store_path printf(item['store_path']) yield item # 提取图片,存入文件夹 # print(item['ImgUrl']) next_url = response.css( ".page-ch:last-child::attr(href)").extract_first() if next_url: # print 'next_url2: ' + next_url # 下一页 yield response.follow(next_url, callback=self.content, headers={ 'Referer': response.url, 'User-Agent': UA })
def _demo_update_status(): for api in APIs: t = Twitter(api) res = t.update_status( 'Test tweet posted to Twitter using %s' % api.title()) printf('Posted at: %s' % res.created_at) printf('----' * 10)
def getCSV(self, filepath: str = None, filename: str = None, separator: str = ",") -> list: ''' Read file type: .csv :param1: filepath :param2: filename :param3: data separator, default is comma `,` This method only return the raw list parsed from csv file, every row is a list nested in an outer list regarding to the whole file, must override this method when customized result format needed. ''' import csv self.__filepath__ = filepath self.__filename__ = filename if not self.__fileCheck__(): self.__fileClean__() return fileLocation = self.__filepath__ + self.__filename__ + ".csv" with open(fileLocation, 'r') as csvfile: try: reader = csv.reader(csvfile, delimiter=separator) rows = [row for row in reader] except Exception as ex: self.__fileClean__() printf("*** Error: CSV data read failed! ***\n") printf("*** Error Message ***\n %s\n" % ex.message) return None self.__fileClean__() return rows
def requestInit(self) -> None: if self.__request__ is None \ or self.__soup__ is None: self.__request__ = requests.get(self.url, verify=False) self.__soup__ = BeautifulSoup(self.__request__.text, "html.parser") else: printf('*** Warn: replicated request was dismissed! ***\n')
def make_cscope_files(self): for fname in list(self.file_map.keys()): if '..' in fname: fname = normpath(fname) if fname[0] == '/': if fname[0:len(self.srcroot)] == self.srcroot: fname = fname[len(self.srcroot) + 1:] self.final_map.setdefault(fname, True) else: self.final_map.setdefault(fname, True) for name in list(self.final_map.keys()): if not exists(join(self.srcroot, name)): if not exists(join(self.objroot, name)): #printf("%s not exists" % name) continue else: self.file_list.append(join(self.objroot, name)) else: self.file_list.append(name) self.file_list.sort() self.save_list_to_file(join(self.srcroot, "cscope.files"), self.file_list) printf("%s is created in srcroot." % "cscope.files")
def dbDump(self): '''向屏幕上显示正确的输出''' printf('\n%s' % ''.join(map(cformat, FIELDS))) users = self.ses.query(Users).all() for user in users: printf(user) self.ses.commit()
def recall_year(data_list, reward_list, L=628): reward_dict = {} topL_set = set() count = 1 year_to_num = {} each_year = {} for item in reward_list: if int(item[0]) not in reward_dict: reward_dict[int(item[0])] = int(item[1]) if item[1] in each_year.keys(): each_year[int(item[1])] += 1 else: each_year[int(item[1])] = 1 #print(reward_dict) count = 1 for item in data_list: if count <= L: topL_set.add(int(item[0])) count += 1 for id in reward_dict: if id in topL_set: #print(int(item)) if reward_dict[id] in year_to_num: year_to_num[reward_dict[id]] += 1 else: year_to_num[reward_dict[id]] = 1 for year in year_to_num: year_to_num[year] = year_to_num[year] / each_year[ year] #每一年的recall值等于top-L中该年获奖数/reward_list中该年的获奖数 sort_year_to_num = sorted(year_to_num.items(), key=itemgetter(0)) printf("all year's recall is:") printf(sort_year_to_num)
def precision(data_list, reward_list, scope=200, slice=10): ''' 计算precision :param data_list: :param reward_list: :param scope: :param slice: :return: ''' sum = 0 combine_list = [] result_list = [] reward_set = set([]) for x in reward_list: reward_set.add(int(x)) for item in data_list: if item[0] in reward_set: combine_list.append((1, item[1])) else: combine_list.append((0, item[1])) combine_list.sort(key=lambda item: item[1], reverse=True) for i in range(min(scope, len(combine_list))): sum += combine_list[i][0] if (i + 1) % slice == 0: result_list.append(sum * 1.0 / (i + 1)) else: pass printf('precision: ' + str(result_list))
def getJSON(self, filepath: str = None, filename: str = None) -> dict: ''' getJson will read a json file into a python dict with the same data structure :param1: filepath, input file path where json file is :param2: filename, the filename without extension (default `.json`) This method will read a json file, return a python dict variable with that json structure ''' import json self.__filepath__ = filepath self.__filename__ = filename if not self.__fileCheck__(): self.__fileClean__() return fileLocation = self.__filepath__ + self.__filename__ + ".json" try: with open(fileLocation, "r") as lf: data = json.load(lf) self.__fileClean__() return data except Exception as ex: self.__fileClean__() printf("*** Error: json data read failed! ***\n") printf("*** Error Message ***\n %s\n" % ex.message) return None
def dbDump(self,newest5=False): printf('\n%s' % ''.join(map(cformat, FIELDS))) if newest5==True: users = self.ses.query(Users). users = self.ses.query(Users).all() for user in users: printf(user) self.ses.commit()
def name(self) -> str: if not self.__checkInit__(): printf('*** Error: Web reqeust initialization missed! ***') return tbl_title = re.search(self.__patterns__["tbl_name"], self.__soup__.find('h2').get_text()) _name = tbl_title.group(1) return _name
def on_player_activity(activity): global play_count play_count += 1 turn_screen(driver) playing_video_wait_quit(driver, 100) while is_detail_activity(): driver.press_keycode(4) time.sleep(1) printf("%s:%d" % (time.ctime(), play_count))
def antutu_resulttest(): printf('antutu_resulttest') if driver.wait_activity('com.antutu.benchmark.activity.TestResultActivity', 300): print_layout_text(driver) driver.press_keycode(4) return True else: return False
def dbDump(cur): cur.execute('SELECT * FROM adaya') printf('\n%s' % ''.join(map(cformat, FIELDS))) for data in cur.fetchall(): # print(data) dat = [] for field in FIELDS: dat.append(data[field]) printf(''.join(map(tformat, dat)))
def get_elfdump(self): for fname in os.listdir(os.path.curdir): sizeMB = os.path.getsize(fname) >> 20 if sizeMB > 511: ftype = os.popen('file ' + fname) if 'ELF' in ftype.read(): printf("ELF dump found :%s" % fname) self.elfdump = fname self.dumptype = 'elf' break
def make_cmd(self): if self.dumptype == 'raw': for ramname in self.rawdump: offset = ramname.split('-')[0].split('_')[-1] self.crash_cmd += ramname+'@'+offset+',' self.crash_cmd = "crash64 "+self.crash_cmd+' vmlinux' elif self.dumptype == 'elf': self.crash_cmd = 'crash64 '+self.elfdump+' vmlinux' printf(self.crash_cmd)
def main(): if len(sys.argv) < 2: printf("Usage: python %s branch1:cnum1,branch2:num2" % sys.argv[0]) sys.exit("invalid params") p = wraper() p.get_commits() p.get_diff()
def _demo_user_timeline(): for api in APIs: printf(api.upper()) t = Twitter(api) tweets = t.user_timeline() for tweet in tweets: printf('----' * 10) printf('Status: %s' % tweet.text) printf('Posted at: %s' % tweet.created_at) printf('----' * 10)
def do_ep_rename(self, fname): if fname.split(".")[-1] == 'mp3': s = filter(str.isdigit, fname.split(".")[0]) else: s = filter(str.isdigit, fname) printf(s) newname = os.path.join(self.path, s+"_"+fname) return newname
def main(): from distutils.log import warn as printf dbtype = 'sqlite' # 'mysql' 'sqlite' printf('*** Connect to %r database ***' % DBName) try: orm = SQLAlchemyTest(DSNs[dbtype]) orm.insert() orm.update() except RuntimeError: printf('\nError: %r not supported, exit' % dbtype)
def get_rawdump_list(self): for fname in os.listdir(os.path.curdir): sizeMB = os.path.getsize(fname) >> 20 if fname[:8] == 'ap_sdram' and fname[-4:] == '.lst': if sizeMB < 512: printf("ramdump file %s is probaly broken" % fname) self.rawdump.append(fname) self.dumptype = 'raw' for i in range(len(self.rawdump)): printf(self.rawdump[i])
def dbDump(self, newest5=False): printf("\n%s" % ''.join(map(cformat, FIELDS))) if not newest5: users = self.ses.query(Users).all() else: users = self.ses.query(Users).order_by( Users.userid.desc())[: 5] # I don't see any need of offset here. for user in users: printf(user) self.ses.commit()
def process_request(self, request, spider): """ 这里可以进行设置 headers 和切换 Proxy 等处理 """ # proxy = '127.0.0.1:8888' # request.meta['proxy'] = "http://" + proxy # if request.meta and 'headers' in request.meta: # request.headers = Headers(request.meta['headers']) printf('【REQUEST】: %s, %s', request.url, request.headers) return None
def dbDump(self,newest5=False,pyslice=False): printf('\n%s' % ''.join(map(cformat, FIELDS))) if newest5: if pyslice: users = self.ses.query(Users).order_by(Users.userid.desc()).all()[:5] else: users = self.ses.query(Users).order_by(Users.userid.desc()).limit(5) else: users = self.ses.query(Users).all() for user in users: printf(user) self.ses.commit()
def dbDump(self, newest5=False): printf('\n%s' % ' '.join(map(cformat, FIELDS))) if not newest5: users = self.ses.query(Users).all() else: #users = self.ses.query(Users).order_by(Users.userid.desc()).limit(5).all() #users = self.ses.query(Users).order_by(Users.userid.desc()).limit(5).offset(0).all() users = self.ses.query(Users).order_by(Users.userid.desc())[0:5] for user in users: printf(user) self.ses.commit()
def dump_to_files(self): source_list = [] flist = list(self.opened_files.keys()) flist.sort() for name in self.opened_files.keys(): if name[-2:] in [".c", ".S", ".h"]: source_list.append(name) source_list.sort() self.save_list_to_file(join(self.srcroot, "cscope.files"), source_list) printf("cscope.files generated in srcroot")
def dump_to_files(self): source_list = [] flist = list(self.opened_files.keys()) flist.sort() for name in self.opened_files.keys(): if name[-2:] in ['.c', '.S', '.h']: source_list.append(name) source_list.sort() self.save_list_to_file(join(self.srcroot,"cscope.files"), source_list) printf("cscope.files generated in srcroot")
def antutu_start(): printf('antutu_start') if driver.is_app_installed('com.antutu.ABenchMark'): driver.start_activity('com.antutu.ABenchMark', '.ABenchMarkStart') if driver.wait_activity('com.antutu.benchmark.activity.MainActivity', 10): return True else: return False else: #TODO:install aututu app printf('Not install aututu App')
def recall(data_list, reward_list, L=628): count = 1 total_num = 0 reward_set = set(reward_list) for item in data_list: if count <= L: if item[0] in reward_set: total_num += 1 count += 1 else: break recall = total_num / len(reward_set) printf('recall: ' + str(recall))
def select_by_text(driver, text): try: cstr = 'new UiSelector().text("%s")' % text printf(cstr) els = driver.find_element_by_android_uiautomator(cstr) if (els): els.click() time.sleep(1) return True else: return False except NoSuchElementException: return False
def main(): w = crash_wrapper() w.check_vmlinux() w.get_rawdump_list() if w.dumptype == '': w.get_elfdump() if w.dumptype == '': printf("No dump found!!!!\n") sys.exit() w.make_cmd() w.open_dump()
def __init__(self, dsn): try: printf(dsn) cxn = connectionForURI(dsn) print(cxn) except ImportError: return RuntimeError try: cxn.releaseConnection(cxn.getConnection()) except dberrors.OperaionalError: cxn = connectionForURI(dirname(dsn)) cxn.query("CREATE TABLE %s" % DBNAME) cxn = connectionForURI(dsn) self.cxn = sqlhub.processConnection = cxn
def _demo_ver_creds(): for api in APIs: t = Twitter(api) res = t.verify_credentials() status = ResultsWrapper(res.status) printf('@%s' % res.screen_name) printf('Status: %s' % status.text) printf('Posted at: %s' % status.created_at) printf('----' * 10)
def check_dstroot(self): if self.dstroot == None: self.cscope_files_only = True else: if exists(self.dstroot): printf("%s exited!" % self.dstroot) if sys.version[0] < "3": rm = raw_input("Enter Y if you agree to remove:") else: rm = input("Enter Y if you agree to remove:") if rm in ["y", "Y"]: shutil.rmtree(self.dstroot) else: printf("Exit because not agree to remove " + self.dstroot) sys.exit() os.makedirs(self.dstroot, mode=0o777)
def procinfo(str): if "sched_getcpu" not in show_available_c_functions(): cpu_idx = -1 else: cpu_idx = sched_getcpu() pid = os.getpid() ppid = os.getppid() uid = os.getuid() gid = os.getgid() euid = os.geteuid() egid = os.getegid() hostname = gethostname() procs = os.listdir("/proc") printf("""%s: cpu: %d pid: %d ppid: %d uid %d gid %d euid %d egid %d hostname: %s procs: %s""" % (str, cpu_idx, pid, ppid, uid, gid, euid, egid, hostname, ", ".join(procs[-4:])))
def load_and_ajust_lines(self): f = open(self.fname) ## load lines and remove blank lines and page number lines for l in f.readlines(): #printf("len %d, %s" % (len(l),l)) if len(l) > 4: self.lines.append(l) ## align title printf("total line %d " % len(self.lines)) i = 0; while i < len(self.lines) - 1: #printf("i:%d :%s" % (i, self.lines[i])) if " - " in self.lines[i]: if "(" not in self.lines[i]: subject = self.lines[i][:-2] + self.lines[i+1] self.full.append("\n### ") self.full.append(subject) i += 1 else: self.full.append("\n### ") self.full.append(self.lines[i]) i += 1 elif ':' in self.lines[i]: if ':' in self.lines[i+1]: self.full.append(self.lines[i]) i += 1 else: j = i+1 sentence = self.lines[i] while ':' not in self.lines[j]: if " - " in self.lines[j]: break sentence = sentence[:-2] + self.lines[j] j += 1 i += 1 self.full.append(sentence) else: i += 1 f.close()
def save_to_each_file(self): i = 0 while i < len(self.full): if i < len(self.full) and "###" in self.full[i]: line = self.full[i+1] line = line.strip() title = line[:-1].replace(' ', '_') title = title.replace('(', '') title = title.replace('__', '') title = title[-4:] + '_' + title + '.txt' fname = os.path.join(self.eppath, title) printf("Create: %s" % fname) f = open(fname, 'w') j = i + 2 while j < len(self.full) and "###" not in self.full[j]: f.write("%d\t %s" % (j,self.full[j])) j += 1 else: f.close() i = j else: i += 1
def extract_opened_files(p): try: f = open(p.strace_log, "r") for line in f.readlines(): if " -1 " not in line: name = extract_fname(line, p.srcroot) p.file_map.setdefault(name, True) for name in p.file_map.keys(): if name.find("..") != -1: p.opened_files.setdefault(normpath(name), True) else: p.opened_files.setdefault(name, True) for name in p.opened_files.keys(): if name[:2] == "./": del (p.opened_files[name]) name = name[2:] p.opened_files.setdefault(name, True) except IOError as e: printf(e) sys.exit()
def usage(): help_info = [ "Usage:", " steps::", " 1, setup cross compiling environmet for kernel", " 2, source make_tags.sh your_proj_defconfig", " to generate a clean kernel tree:", " 3, python somewhere/kernel_pruner.py -f strace_log.txt -s origpath/kernel -d dstpath/k", "", " Options:", " -f strace_log -- output file of strace", " -s srcdir -- original kernel path,", " -d dstdir -- pruned kernel path,", " -h -- help info,", " -l -- create symbol link for all files,", "", " README.txt for more info", ] for line in help_info: printf(line) sys.exit()
def main(): p = wraper() try: opts, args = getopt.getopt(sys.argv[1:], "hf:s:d:l") for opt, arg in opts: if opt == "-h": usage() elif opt == "-f": p.strace_log = arg elif opt == "-s": p.srcroot = abspath(arg) elif opt == "-d": p.dstroot = abspath(arg) elif opt == "-l": p.link = True else: printf("Ignore invalid opt:%s\n" % opt) printf("srcroot: %s" % p.srcroot) printf("dstroot: %s" % p.dstroot) printf("strace file: %s" % p.strace_log) except getopt.GetoptError: usage() p.check_dstroot() if p.strace_log == None: usage() extract_opened_files(p) p.dump_to_files() if not p.cscope_files_only: build_clean_tree(p)
def get_commits(self): for i in [0, 1]: cmd = "git log %s --graph --pretty=format:'%%h %%s' --abbrev-commit -%s > %s" % ( self.bname[i], self.cnum[i], self.fname[i], ) printf("cmd: %s" % cmd) msg = os.popen(cmd) printf(msg) f = open(self.fname[i], "r") printf("================== %s =================" % self.fname[i]) for line in f.readlines(): cid = line.split(" ", 2)[1] # commit id csbj = line.split(" ", 2)[2] # commit subject self.plist[i].append("%s %s" % (cid, csbj)) self.phash[i].setdefault(csbj, cid) self.save_list_to_file(self.bname[0] + "_list_0.txt", self.plist[0]) self.save_list_to_file(self.bname[1] + "_list_1.txt", self.plist[1]) self.save_list_to_file(self.bname[0] + "_hash_0.txt", list(self.phash[0].keys())) self.save_list_to_file(self.bname[1] + "_hash_1.txt", list(self.phash[1].keys()))
def main(dbo): printf('\n*** Inserting names into table') dbo.insert() dbo.dbDump() printf('\n*** Randomly moving folks') fr, to, num = dbo.update() printf('\t(%d users moved) from (%d) to (%d)' % (num, fr, to)) dbo.dbDump() printf('\n*** Randomly choosing group') rm, num = dbo.delete() printf('\t(group #%d; %d users removed)' % (rm, num)) dbo.dbDump() printf('\n*** Dropping users table') dbo.drop() printf('\n*** Close cxns')
def _demo_search(): for api in APIs: printf(api.upper()) t = Twitter(api, auth=False) tweets = t.search('twython3k') for tweet in tweets: printf('----' * 10) printf('@%s' % tweet.from_user) printf('Status: %s' % tweet.text) printf('Posted at: %s' % tweet.created_at) printf('----' * 10)
#!/usr/bin/env python import os from distutils.log import warn as printf import re with os.popen('who', 'r') as f: for eachLine in f: printf(re.split('\s\s+|\t', eachLine.rstrip()))
from distutils.log import warn as printf try: from procszoo.c_functions import * except ImportError: this_file_absdir = os.path.dirname(os.path.abspath(__file__)) procszoo_mod_dir = os.path.abspath("%s/.." % this_file_absdir) sys.path.append(procszoo_mod_dir) from procszoo.c_functions import * if sys.version_info >= (3, 0): unicode_str = "Hello" bytes_str = b"Hello" char = chr(0x006) for s in unicode_str, bytes_str, char: printf(type(to_unicode(s))) for s in unicode_str, bytes_str, char: printf(type(to_bytes(s))) else: unicode_str = "Hello".decode('utf-8') bytes_str = "Hello" char = chr(0x006) for s in unicode_str, bytes_str, char: printf(type(to_unicode(s))) for s in unicode_str, bytes_str, char: printf(type(to_bytes(s)))
def main(): printf('*** Connect to %r database' % DBNAME) db = setup() if db not in DSNs: printf('\nERROR: %r not supported, exit' % db) return try: orm = SQLAlchemyTest(DSNs[db]) except RuntimeError: printf('\nERROR: %r not supported, exit' % db) return printf('\n*** Create users table (drop old one if appl.)') orm.drop(checkfirst=True) orm.create() printf('\n*** Insert names into table') orm.insert() orm.dbDump() printf('\n*** Move users to a random group') fr, to, num = orm.update() printf('\t(%d users moved) from (%d) to (%d)' % (num, fr, to)) orm.dbDump() printf('\n*** Randomly delete group') rm, num = orm.delete() printf('\t(group #%d; %d users removed)' % (rm, num)) orm.dbDump() printf('\n*** Drop users table') orm.drop() printf('\n*** Close cxns') orm.finish()
def dbDump(self): printf('\n%s' % ''.join(map(cformat, FIELDS))) users = self.users.select().execute() for user in users.fetchall(): printf(''.join(map(tformat, (user.login, user.userid, user.projid))))
def dbDump(self): printf('\n%s' % ''.join(map(cformat, FIELDS))) users = self.ses.query(Users).all() for user in users: printf(user) self.ses.commit()
def call_iquery_train_ticket(location_from_entity,location_to_entity,time_entity): printf(subprocess.call(["iquery",location_from_entity,location_to_entity,time_entity]))
def main(): query = " ".join(sys.argv[1:]) printf(query) printf("*"*8) pattern_train_ticket(query)
#!/usr/bin/env python import csv from distutils.log import warn as printf DATA = ( (9, 'Web Clients and Servers', 'base64, urllib'), (10, 'Web Programming: CGI & WSGI', 'cgi, time, wsgiref'), (13, 'Web Services', 'urllib, twython'), ) printf('*** WRITING CSV DATA') f = open('bookdata.csv', 'w') writer = csv.writer(f) for record in DATA: writer.writerow(record) f.close() printf('*** REVIEW OF SAVED DATA') f = open('bookdata.csv', 'r') reader = csv.reader(f) for chap, title, modpkgs in reader: printf('Chapter %s: %r (featuring %s)' % ( chap, title, modpkgs)) f.close()