def saveQuit(master): global themeFile global themeFile2 global excelFile global vidFile newRes = themeFile + ";" + themeFile2 + ";" + excelFile + ";" + vidFile file = open("./Resources/resumeFile.txt", 'w') file.write(newRes) _quit()
def projSave(input, pop): global themeFile global themeFile2 global excelFile global vidFile filename = "./Projects/" + input + ".txt" file = open(filename, "w") file.write(themeFile + ";" + themeFile2 + ";" + excelFile + ";" + vidFile) pop.destroy()
def get_file_content(): if len(G_file_no_dict) == 0: print convert("索引号列表为空") conn = get_connection() if conn != 0: cursor = conn.cursor() result_file_name = (G_path_prefix + r"result.txt").decode('utf-8').encode('cp936') file = open( result_file_name, "a+" ) for file_uniq_no in G_file_no_dict.keys(): sql = "select d.mr_content from jhmr_file_content_TEXT d where d.file_unique_id = '%s'" % file_uniq_no #print sql try: cursor.execute(sql) content_list = get_content_from_lob(cursor) except cx_Oracle.DatabaseError: print convert("获取病历内容失败") return 0 for item in content_list: print G_file_no_dict[file_uniq_no] file.write(G_file_no_dict[file_uniq_no]) file.write("\r\n") item = item.decode('cp936').encode('utf-8') file.write(item) file.write("\r\n") close_connection(conn) file.close()
def newTheme(input, pop3): global themeBuild global buttonDict for (key, but) in buttonDict.items(): if but.getUsed() == 0: del themeBuild[key] filename = "./Themes/" + input + ".txt" file = open(filename, "w") for (key, items) in themeBuild.items(): line = "" + key + "," + items[0] + "," + items[1] + "," + items[ 2] + "\n" file.write(line) pop3.destroy()
def export_ramp_color_library(library, library_rules): dir_path = tempfile.mkdtemp(suffix='', prefix='tmp-library-') for library_rule in library_rules: try: resource_path = dir_path + "/" + library_rule + "/" os.makedirs(resource_path) i = 0 for color_ramp in library_rules[library_rule]: file = open( resource_path + "/color_ramp-" + color_ramp.name + "-" + str(i) + ".rmf", 'w+') file.write( build_library_color_ramp(color_ramp.name, color_ramp.definition)) file.close() i = i + 1 except Exception as e: raise e buffer = StringIO.StringIO() z = zipfile.ZipFile(buffer, "w") relroot = dir_path for root, dirs, files in os.walk(dir_path): rel_path = os.path.relpath(root, relroot) if rel_path != ".": z.write(root, os.path.relpath(root, relroot)) for file in files: filename = os.path.join(root, file) if os.path.isfile(filename): arcname = os.path.join(os.path.relpath(root, relroot), file) z.write(filename, arcname) z.close() buffer.seek(0) response = HttpResponse(content_type='application/zip; charset=utf-8') response[ 'Content-Disposition'] = 'attachment; filename=' + library.name + '.zip' response.write(buffer.read()) utils.__delete_temporaries(dir_path) return response
def precomputeFromNLTK(): """ precompute with nltk's corpus as wordbase """ language = set() print(len(words.words())) for word in words.words(): word = word.lower() sortW = "".join(char for char in sorted(word)) if sortW[0] >= "a" and sortW[0] <= "z": word = word + ":" + sortW language.add(word) print("Loaded %d words from NLTK wordnet" % (len(language))) buckets = [set() for x in xrange(25)] for word in language: buckets[len(word) / 2].add(word) count = 0 for word in language: if count % 1000 == 0: print("Done for %d words" % count) count += 1 sortedW = word.split(":")[1] if sortedW not in nltkHashMap: nltkHashMap[sortedW] = set() for word2 in buckets[len(sortedW)]: sortedW2 = word2.split(":")[1] if sortedW == sortedW2: nltkHashMap[sortedW].add(word2.split(":")[0]) file = open(nltkAnagramsFile, "w") file.truncate() count = 0 for anagrams, listOfAnagrams in nltkHashMap.items(): if count % 1000 == 0: print("%d anagram lists written" % count) file.flush() count += 1 file.write("%s:%s\n" % (anagrams, listOfAnagrams)) file.close() print("Precomputation with NLTK done")
def append_to_file(path, data): with open(path, 'a') as file: file.write(data + '\n')
date = line.split(" ")[0].strip() if (date == yesterday_date.strip()): yesterday_emails.append(line) if (date == today_date.strip()): break file.close() # write yesterday emails to a file yesterday_emails_path = tmp_path + '_yesterday_mails' file = open(yesterday_emails_path, 'w') for line in yesterday_emails: file.write(line + "\n") file.close() # now its time to find spammers possible_spammers_file_path = tmp_path + '_spammers' cmd = "awk \'$3 ~ /^cwd/{print $3}\' /tmp/_yesterday_mails | sort | uniq -c | sed \"s|^ *||g\"| sort -nr " spammers = os.popen(cmd).readlines() filtered_spammers = list() for spammer in spammers: if (spammer.find("/home") != -1): num = spammer.strip().split(" ")[0] if int(num) > spam_detection_point: filtered_spammers.append(spammer.strip())
def write(self, str_): self.is_modified = True return file.write(self, str_)
tmp_path = r"/tmp/" spam_detection_point = 50 yesterday_date = os.popen("date -d \'-1 day\' \'+%Y-%m-%d\'").read() today_date = os.popen("date \'+%Y-%m-%d\'").read() yesterday_emails = list() #gather emails of yesterday file = open(log_path, 'r') for line in file.readlines(): date = line.split(" ")[0].strip() if (date == yesterday_date.strip()): yesterday_emails.append(line) if (date == today_date.strip()): break file.close() # write yesterday emails to a file yesterday_emails_path = tmp_path + '_yesterday_mails' file = open(yesterday_emails_path, 'w') for line in yesterday_emails: file.write(line + "\n") file.close()
from __builtin__ import file #fexport = open('/home/nicholas/Scaricati/5_export.csv', 'r') fdataset = open('/home/nicholas/Documenti/Dataset_FraudDetection/12cyclic_sameCF.csv', 'r') #export = fexport.read().split('\n') dataset = fdataset.read().split('\n') i = 0 file = open('/home/nicholas/Documenti/Dataset_FraudDetection/12cyclic_sameCF_u.csv','w') #for row1 in export : for row in dataset : i += 1 print (row + ',user') file.write(row + ',user\n') print (i) ''' for row1 in export : if not row1 in dataset : print (row1) i += 1 print (i) ''' file.close() #fexport.close() fdataset.close()