def main(mp_rage_images_folder, pet_images_folder, segmented_regions_folder, output_folder): mp_rage_images = FileUtils.get_files_name_in(mp_rage_images_folder) for mp_rage_image in mp_rage_images: try: patient_id = FileUtils.extract_patient_id_from(mp_rage_image) pet_image = FileUtils.get_file_of_patient_with_id_from_folder( patient_id, pet_images_folder) subject_folder = os.path.join(output_folder, patient_id) # Register the mp rage to the pet space CoRegistrationProcessFactory.create_co_registration_process_with( mp_rage_image, pet_image, subject_folder).run() segmented_regions_image = FileUtils.get_file_of_patient_with_id_from_folder( patient_id, segmented_regions_folder) transformation_matrix_path = \ CoRegistrationProcessFactory.get_transformation_matrix_file_name_from(subject_folder, mp_rage_image) # Register the mask to the subject space using the transformation matrix CoRegistrationProcessFactory \ .create_co_registration_process_with(segmented_regions_image, pet_image, subject_folder, transformation_matrix_path).run() except IndexError as e: print("Unable to read file for patient with id {}".format( patient_id))
def __init__(self, path=None, output_path=None, debug=False): self.file = FileUtils(path) self.output_path = output_path self.debug = debug rate, data = self.file.get_file_data() self.song = Song(samples=data, sample_rate=rate, debug=self.debug) print "song is {}".format(str(self.song))
def sync(self, gitcmd=None, logger=None, quickcopy=False): if not quickcopy: ret = self._sync(gitcmd=gitcmd, logger=logger) else: self._timestamp = _timestamp(self.src) print self.sccsp.get_patterns() FileUtils.rmtree(self.dest, ignore_list=self.sccsp.get_patterns()) FileUtils.copy_files(self.src, self.dest) if gitcmd: ret = gitcmd.add('--all', self.dest) return ret
def __init__(self, host, apikey, spid): super(BasicCrawler, self).__init__() self.host = host self.apikey = apikey self.spid = spid #be careful with the table names self.dbSaver = NovelSaver(BasicCrawler.getDefaultDBConf(), 'ien_book', 'ien_chapter') self.lock = thread.allocate_lock() #create the book image directory if it doesn't exist cwd = os.getcwd() self.img_dir = cwd + os.sep + "images" + os.sep + TimeUtils.getDay() self.server_img_dir = 'uploads/images/' + TimeUtils.getDay() FileUtils.mkdir_p(self.img_dir)
def sync(self, logger=None, quickcopy=False, symlinks=True, scmtool=None): if not quickcopy: ret = self._sync(logger=logger, symlinks=symlinks, scmtool=scmtool) else: self._timestamp = FileUtils.last_modified(self.src) FileUtils.rmtree(self.dest, ignore_list=self.sccsp.get_patterns(), scmtool=scmtool) ret = FileUtils.copy_files(self.src, self.dest, symlinks=symlinks, scmtool=scmtool) return ret
class PyChorus(object): def __init__(self, path=None, output_path=None, debug=False): self.file = FileUtils(path) self.output_path = output_path self.debug = debug rate, data = self.file.get_file_data() self.song = Song(samples=data, sample_rate=rate, debug=self.debug) print "song is {}".format(str(self.song)) def find_chorus(self): return self.song.find_chorus() def write_chorus(self): """ write the calculated chorus to self.output_path :return: True if the chorus was able to be written False otherwise """ chorus_start, chorus_stop = self.find_chorus() print "calculated chorus was {start} to {stop}".format( start=chorus_start, stop=chorus_stop) if chorus_start and chorus_stop: if self.debug: print "chorus timing was {start} to {stop}".format( start=chorus_start.index, stop=chorus_stop.index) return True else: return False
def chrBackup(): ''' copy and rename 'Bookmarks' file with date appended to it to specified directory in config, ''' fileUtils = FileUtils() # Get chrome path of Bookmarks file chr_fullPath = chr_path.getChromeJSON(chr_path.getChromePath()) chr_file = os.path.split(chr_fullPath)[1] # Append date fileWithDate = DA.date_append_filename(chr_file) # Return path of copied file - error thrown if exists return fileUtils.copy_to_chrJsonBackupsDir(chr_fullPath, fileWithDate)
def test_get_file_name_available(self): file_name = FileUtils.get_available_file_name( FileUtilsTests.FILE_AVAILABLE.get("name"), FileUtilsTests.FILE_AVAILABLE.get("extension")) self.assertEqual( file_name, f"{FileUtilsTests.FILE_AVAILABLE.get('name')}.{FileUtilsTests.FILE_AVAILABLE.get('extension')}" )
def test_get_file_name_exists(self): file_name = FileUtils.get_available_file_name( FileUtilsTests.FILE_EXISTS.get("name"), FileUtilsTests.FILE_EXISTS.get("extension")) self.assertEqual( file_name, f"{FileUtilsTests.FILE_EXISTS.get('name')} (1).{FileUtilsTests.FILE_EXISTS.get('extension')}" )
def __init__(self): self.global_count = {} self.document_length = {} self.document_count = {} self.word_counts = {} self.files = FileUtils().get_files_from_path(sys.argv[1]) self.stop_words = map( lambda x: unicode(x), codecs.open('stop_words.txt', 'r', 'utf-8').read().split())
def main(subjects_folder_path, masks_folder_path, template_path, output_folder): subjects = FileUtils.get_files_name_in(subjects_folder_path) masks = FileUtils.get_files_name_in(masks_folder_path) for subject in subjects: patient_id = FileUtils.extract_patient_id_from(subject) subject_folder = os.path.join(output_folder, patient_id) # Register the template to the subject space CoRegistrationProcessFactory.create_co_registration_process_with( template_path, subject, subject_folder).run() for mask in masks: transformation_matrix_path = \ CoRegistrationProcessFactory.get_transformation_matrix_file_name_from(subject_folder, template_path) # Register the mask to the subject space using the transformation matrix CoRegistrationProcessFactory \ .create_co_registration_process_with(mask, subject, subject_folder, transformation_matrix_path).run()
def __init__(self): self.files = FileUtils().get_files_from_path(sys.argv[1]) self.file_count = len(self.files) self.k1 = 1.2 self.b = 0.75 self.max_score = 0 self.min_score = 100 self.avg_doc_length = 0 self.idf = {} self.word_count_calculator = WordCount() self.bm_25_scores = {}
def post(self): response = { "summary": "Training data created!" } data = self.request.body logging.info("---------------------------") logging.info(data) FileUtils().create_file(data=data) logging.info("---------------------------") # response = Prediction().get_abstract_data(json_data=data) self.response.headers['content-Type'] = 'application/json' self.response.out.write(json.dumps(response))
def post(self): logging.info("---------------------------") logging.info("Class ProcessDataSummary: post") # Validate request redirect_url = authorise_user(self.request.uri) if redirect_url: logging.info("Redirecting") self.redirect(redirect_url) # Process data. data = self.request.body logging.info(">>>>> {}".format(data)) file_name = FileUtils().create_file(data=data) ProcessData().process_data(data=data) logging.info("---------------------------") self.response.headers['content-Type'] = 'application/json' self.response.out.write(json.dumps({"file_name": file_name}))
def load_and_process_samples(self, pathFilesF, pathFilesM, pathSave, setId): fileUtils = FileUtils(pathSave) if self.loadCSV == False: samplesInClasses = fileUtils.load_object( "samples_in_classes_" + str(setId), "dict") samplesInClassesMA = fileUtils.load_object( "samples_in_classes_MA_" + str(setId), "dict") else: samplesInClasses = self._read_csv(pathFilesF, pathFilesM) fileUtils.save_object(samplesInClasses, "samples_in_classes_" + str(setId), "dict") '''Transforma em meta-atributos''' samplesInClassesMA = self._extract_meta_attributes( samplesInClasses) fileUtils.save_object(samplesInClassesMA, "samples_in_classes_MA_" + str(setId), "dict") return samplesInClasses, samplesInClassesMA
def download_book_image(self, url): url = 'http://www.tcss88.com/imagedata/2017/09/25/59c8c5a10d5b0015591035.jpg' if url: #generate the new file name t = str(time.time()) file_name = FileHash.md5val(t) + ".jpg" print("generate image file:%s" % file_name) file_path = self.img_dir + os.sep + file_name print("file path is : %s" % file_path) urllib.urlretrieve(url, file_path) #now save it to db size = FileUtils.getFileSize(file_path) md5v = FileHash.GetFileMd5(file_path) sha1v = FileHash.GetFileSha1(file_path) server_file_path = self.server_img_dir + os.sep + file_name return self.dbSaver.saveImage(file_name, server_file_path, size, md5v, sha1v) else: return 0
def ExampleGen(data_path, num_epochs=None): """Generates tf.Examples from path of data files. ExampleGen Binary data format: <length><blob>. <length> represents the byte size of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains the tokenized article text and summary. Args: data_path: path to tf.Example data files. num_epochs: Number of times to go through the data. None means infinite. Yields: Deserialized tf.Example. If there are multiple files specified, they accessed in a random order. """ epoch = 0 while True: if num_epochs is not None and epoch >= num_epochs: break filelist = FileUtils().get_files() assert filelist, 'Empty filelist.' # random.shuffle(filelist) for f in filelist: with open(f) as json_file: tf_example = example_pb2.Example() json_text = json.load(json_file) article = str(json_text["article"]) summary = str(json_text["summary"]) tf_example.features.feature['article'].bytes_list.value.extend( [article]) tf_example.features.feature[ 'abstract'].bytes_list.value.extend([summary]) yield tf_example epoch += 1
def tagging(self, documents, savePath, language): nDocs = len(documents) documentsProcessed = [] unigram_tagger = [] from file_utils import FileUtils file_utils = FileUtils(savePath) try: unigram_tagger = file_utils.load_object('tagger_' + language, 'tagger') except: if language == "pt": train_set = mac_morpho.tagged_sents() elif language == "en": train_set = brown.tagged_sents(tagset='universal') #print(train_set[0:1]) nSents = len(train_set) train_set_lower = [] for iSent in range(0, nSents): nWords = len(train_set[iSent]) words = [] for iWord in range(0, nWords): words.append( (self.text_lower_one([train_set[iSent][iWord][0] ])[0], train_set[iSent][iWord][1])) train_set_lower.append(words) #print(train_set_lower[0:1]) #test_set = mac_morpho.tagged_sents()[10001:10010] unigram_tagger = nltk.UnigramTagger(train_set_lower) file_utils.save_object(unigram_tagger, 'tagger_' + language, 'tagger') for iDoc in range(0, nDocs): #tokens = documents[iDoc] documentsProcessed.append(unigram_tagger.tag(documents[iDoc])) return documentsProcessed
def pickle_load(file_path): with open(file_path, "rb") as f: return pickle.load(FileUtils(f))
def pickle_dump(obj, file_path): with open(file_path, "wb") as f: return pickle.dump(obj, FileUtils(f), protocol=pickle.HIGHEST_PROTOCOL)
import pytest import shutil import os import sys from .chr_backup import chrBackup cwd = os.getcwd() # this import only works if you're in this directory sys.path.insert(0, '../utils') from file_utils import FileUtils fUtil = FileUtils() def test_copy_to_chromeJSON(): path = chrBackup() # creates file in default dir assert isinstance(path, str) assert len(path) > 0
logging.warning(str(descriptor)) return True else: return False if __name__ == "__main__": # configuracoes basicas BASE_FOLDER = '/home/rafael/Temp/rev-saude/' FILE_DESCRIPTORS = BASE_FOLDER + 'data/descriptors_no_edat.tsv' FILE_INVALID_DESCRIPTORS = BASE_FOLDER + 'data/invalid_descriptors.tsv' FILE_LOG = BASE_FOLDER + 'data/removed_descriptors.tsv' # le descritores descriptors = FileUtils.get_models_from_path_csv(FILE_DESCRIPTORS, sep='\t', model=Descriptor) # le descritores invalidos invalid_descriptors = FileUtils.get_models_from_path_csv(FILE_INVALID_DESCRIPTORS, sep='\t', model=InvalidDescriptor) # adiciona a lista de descritores invalidos descritor invalido com nome vazio invalid_descriptors.append(Descriptor(**{'names': ''})) # elimina descritores invalidos da lista de descritores descriptors = [d for d in descriptors if not DescriptorManager.filter_descriptor(d, invalid_descriptors, FILE_LOG)] # separa artigos por ano year2articles = {} for d in descriptors: if d.year not in year2articles: year2articles[d.year] = {d.id_medline: [d]}
def _sync(self, gitcmd=None, logger=None): # pylint: disable=R0915 changes = 0 def debug(msg): if logger: logger.debug(msg) slen = len(self.src) + 1 dlen = len(self.dest) + 1 # remove files for root, dirs, files in os.walk(self.src): for name in files: oldf = os.path.join(root, name) if self.sccsp.match(oldf[slen:]): continue elif self.pattern.match(oldf[slen:]): debug('filter out %s' % oldf) continue newf = oldf.replace(self.src, self.dest) if not os.path.lexists(newf): debug('remove file %s' % oldf) changes += 1 if gitcmd: gitcmd.rm(oldf) else: os.unlink(oldf) for dname in dirs: oldd = os.path.join(root, dname) if self.sccsp.match_dir(oldd[slen:]): continue elif self.pattern.match_dir(oldd[slen:]): debug('filter out %s' % oldd) continue newd = oldd.replace(self.src, self.dest) if not os.path.lexists(newd): debug('remove directory %s' % oldd) changes += 1 if gitcmd: gitcmd.rm('-r', oldd) else: shutil.rmtree(oldd) for root, dirs, files in os.walk(self.dest): for dname in dirs: newd = os.path.join(root, dname) oldd = newd.replace(self.dest, self.src) if self.pattern.match(newd[dlen:]): debug('filter out %s' % oldd) elif not os.path.lexists(os.path.dirname(oldd)): debug('ignored %s without dir' % oldd) elif not os.path.lexists(oldd): debug('makedir %s' % oldd) os.makedirs(oldd) elif not os.path.isdir(oldd): debug('type changed %s' % oldd) os.unlink(oldd) os.makedirs(oldd) else: debug('no change %s' % oldd) for name in files: newf = os.path.join(root, name) timest = os.lstat(newf) if timest.st_mtime > self._timestamp: self._timestamp = timest.st_mtime oldf = newf.replace(self.dest, self.src) if self.pattern.match(newf[dlen:]): debug('filter out %s' % oldf) elif not os.path.lexists(os.path.dirname(oldf)): debug('ignored %s without dir' % oldf) elif os.path.islink(newf): if not self._equal_link(oldf, newf): debug('copy the link file %s' % oldf) FileUtils.copy_file(newf, oldf) if gitcmd: gitcmd.add(oldf) changes += 1 elif not os.path.lexists(oldf): debug('add file %s' % newf) dirn = os.path.dirname(oldf) if not os.path.lexists(dirn): os.makedirs(dirn) FileUtils.copy_file(newf, oldf) if gitcmd: gitcmd.add(oldf) changes += 1 else: if os.path.islink(oldf): debug('link file %s' % newf) FileUtils.copy_file(newf, oldf) if gitcmd: gitcmd.add(oldf) changes += 1 elif not filecmp.cmp(newf, oldf): debug('change file %s' % newf) FileUtils.copy_file(newf, oldf) if gitcmd: gitcmd.add(oldf) changes += 1 else: debug('no change %s' % newf) return changes
def __init__(self, text): import time start = time.clock() ''' ----------------------------------------------------------------------------------------------------------------------- DEFINICAO DOS PARAMETROS DE CONTROLE ----------------------------------------------------------------------------------------------------------------------- ''' tp = TextProcessing() self.nMaxLengthFreq = 16 # OBS1: Tamanho maximo de palavra a ser considerado na frequencia do tamanho de palavras savePath = "/home/ahirton/Python/gender_classification/outputfiles/" #savePath = "/home/rpasti/workspace/gender_classification/outputfiles/" tagged = tp.tagging([tp.tokenize([text])[0]],savePath,"en")[0] fileUtils = FileUtils(savePath) text = re.sub("http","", text) self.raw = text # print tagged self.PARAGRAPHS = [] self.SENTENCES = [] self.WORDS = [] delimiters = '\n','. \n', '! \n', '?\n', '.\n', '!\n', '?\n', '... \n' #, '... \n'#, ' \n ' #, " .\n", " !\n", ' ?\n' regexPattern = '|'.join(map(re.escape, delimiters)) for paragraph in re.split(regexPattern,self.raw): p = [] # print "" # print paragraph # raw_input(".----------------.FIM DE PARÁGRAFO----------------.") #sentences = tp.tokenize_sentence([paragraph])[0] for sentence in tp.tokenize_sentence([paragraph])[0]: # print "" # print sentence # print tp.tagging(tp.tokenize([sentence])) # raw_input(".---------------..FIM DE FRASE...------.") words = tp.tokenize([sentence])[0] #words = tp.remove_punctuation([words])[0] self.WORDS.extend(words) self.SENTENCES.append(sentence) p.append(words) # print paragraph # print sentence # print words # print self.WORDS # raw_input('XXXXXXXXXXXXXXXXXXXXXXXXXXXXX') self.PARAGRAPHS.append(p) self.C = len(text) self.LOWER = MetaAttributes._count_char(text, "^[a-z_-]*$") self.UPPER = MetaAttributes._count_char(text, "^[A-Z_-]*$") self.NUMBERS = MetaAttributes._count_char(text, "^[\d]*$") self.WHITE = MetaAttributes._count_char(text, "^[ ]*$") self.TAB = MetaAttributes._count_char(text, "^[\t]*$") self.N = len(self.WORDS) self.SIZES = [] self.FREQ = {} for w in self.WORDS: self.SIZES.append(len(w)) self.FREQ = dict(nltk.FreqDist(self.WORDS)) self.V = dict(nltk.FreqDist(self.FREQ.values())) self.VRICH = self.N - len(self.V) self.HXLEGO = [] self.HXDISLEGO = [] for w, t in self.FREQ.items(): if t == 1: self.HXLEGO.append(w) elif t == 2: self.HXDISLEGO.append(w) self.TAGGED = tagged self.S = len(self.SENTENCES) self.pwdictionary = semantic_dictionaries.extended_positive() self.nwdictionary = semantic_dictionaries.extended_negative() self.neutralwdictionary = semantic_dictionaries.extended_neutral_words() self.LIWCdict = fileUtils.load_object("liwc", "dict")
def add_hparams(sequence_dir, sequence_name, run_name, args_dict, metrics_dict, global_step): # create sequence file and for each run separate if not os.path.exists(sequence_dir): os.makedirs(sequence_dir) sequence_path = os.path.join(sequence_dir, f'{sequence_name}.csv') run_path = os.path.join(sequence_dir, f'{run_name}.csv') if not os.path.exists(sequence_path): open(file=sequence_path, mode='a').close() if not os.path.exists(run_path): open(file=run_path, mode='a').close() # edit args and metrics dict args_dict = copy.copy(args_dict) metrics_dict = copy.copy(metrics_dict) for each_dict in [args_dict, metrics_dict]: for key in list(each_dict.keys()): if not isinstance(each_dict[key], float) and \ not isinstance(each_dict[key], int) and \ not isinstance(each_dict[key], str) and \ not isinstance(each_dict[key], np.float) and \ not isinstance(each_dict[key], np.int) and \ not isinstance(each_dict[key], np.float32): del each_dict[key] # add all params into sequence and run files for path_csv in [sequence_path, run_path]: with open(path_csv, 'r+') as outfile: FileUtils.lock_file(outfile) lines_all = outfile.readlines() lines_all = [ it.replace('\n', '').split(',') for it in lines_all if ',' in it ] if len(lines_all) == 0: # or len(lines_all[0]) < 2: headers = ['step'] + list(args_dict.keys()) + list( metrics_dict.keys()) headers = [str(it).replace(',', '_') for it in headers] lines_all.append(headers) values = [global_step] + list(args_dict.values()) + list( metrics_dict.values()) values = [str(it).replace(',', '_') for it in values] if path_csv == run_path: lines_all.append(values) else: # sequence file existing_line_idx = -1 args_values = list(args_dict.values()) args_values = [ str(it).replace(',', '_') for it in args_values ] for idx_line, line in enumerate(lines_all): if len(line) > 1: is_match = True for idx_arg in range(len(args_values)): if line[idx_arg + 1] != args_values[idx_arg]: is_match = False break if is_match: existing_line_idx = idx_line break if existing_line_idx >= 0: lines_all[existing_line_idx] = values else: lines_all.append(values) outfile.truncate(0) outfile.seek(0) outfile.flush() rows = [','.join(it) for it in lines_all] rows = [ it for it in rows if len(it.replace('\n', '').strip()) > 0 ] outfile.write('\n'.join(rows).strip()) outfile.flush() os.fsync(outfile) FileUtils.unlock_file(outfile)
args = parser.parse_args() if __name__ == '__main__': if args.range is None: print("请输入'-r'参数控制题目中数值(自然数、真分数和真分数分母)的范围") if args.exercise is None: args.exercise = os.path.join(os.getcwd(), 'Exercises.txt') if args.answer is None: args.answer = os.path.join(os.getcwd(), 'Answer.txt') if args.grade is None: args.grade = os.path.join(os.getcwd(), 'Grade.txt') print("欢迎进入答题模式......(输入'exit'可退出程序)") t = Tree() u_answer = list() # 用户答案 formula, s_answer = t.generate_formula(args.range, args.number, args.minus) # 随机生成表达式 FileUtils.write_file(formula, s_answer, args.exercise, args.answer) # 保存题目文件 for i in range(args.number): print(formula[i], end='') answer = input() # 获取用户输入的答案 if answer == 'exit': print('退出程序成功!') sys.exit() u_answer.append(answer) correct, wrong = CalculatorUtils.grading(u_answer, s_answer) # 统计答题结果 print("答题结果:") print(correct) print(wrong) FileUtils.write_grade_file(args.grade, correct, wrong) # 保存答题结果
def getContent(self): return FileUtils.getFileContent(self.file)
import os import csv import re from getpass import getpass from email_utils import Email, EmailConnection from file_utils import FileUtils from datetime import datetime f_utils = FileUtils() # ------ Variables gloables de configuration ------ SERVER_SMTP = "smtp.gmail.com" SERVER_PORT = 587 # Port SMTP #FROM = "*****@*****.**" # Adresse Reflets par défaut FROM = "" # ------------------------------------------------- print('================================================') print(' Bievenue sur ImageMailer! V1.1 ') print() print(' Écrit par Skander pour le club photo ') print(' R E F L E T S ') print() print(' Contact: skander.kc AT gmail.com ') print('================================================') print() print("°°° Connexion au serveur de messagerie Gmail °°°") # Connexion au serveur SMTP Gmail password = getpass(" - Entrer le mot de passe de " + FROM + " : ") print("Connexion au serveur de messagerie...")
import re import shutil # Local modules: from chr_path import getChromeJSON from markdown_formatter import markdownFormatMap # this import only works if you're in this directory sys.path.insert(0, '../utils') import date_append as DA from file_utils import FileUtils from get_config import get_json_config config = get_json_config() fileUtils = FileUtils() class MarkdownCreator: def __init__(self): self.md_output = '' def deleter(self, f_str): '''deletes type, url in f_str ''' return f_str\ .replace('"type": "url",', '') def replacer(self, f_str): '''replaces html <a> tags '''
def _sync(self, logger=None, symlinks=False, scmtool=None): # pylint: disable=R0915 changes = 0 def debug(msg): if logger: logger.debug(msg) def unlink(filename): if scmtool: scmtool.rm('-rf', filename) elif os.path.islink(filename): os.unlink(filename) elif os.path.isdir(filename): shutil.rmtree(filename) else: os.unlink(filename) slen = len(self.src) + 1 dlen = len(self.dest) + 1 # remove files for root, dirs, files in os.walk(self.src): for name in files: oldf = os.path.join(root, name) if self.sccsp.match(oldf[slen:]): continue elif not self.pattern.match(oldf[slen:]): debug('ignore %s with file pattern' % oldf) continue newf = oldf.replace(self.src, self.dest) if not os.path.lexists(newf): debug('remove %s' % oldf) changes += 1 unlink(oldf) if self.pattern.has_dir_rule(): for dname in dirs: oldd = os.path.join(root, dname) if self.sccsp.match_dir(oldd[slen:]): continue elif not self.pattern.match_dir(oldd[slen:]): debug('ignore %s with dir pattern' % oldd) continue newd = oldd.replace(self.src, self.dest) if not os.path.lexists(newd): debug('remove %s' % oldd) changes += 1 unlink(oldd) for root, dirs, files in os.walk(self.dest): for dname in dirs: newd = os.path.join(root, dname) oldd = newd.replace(self.dest, self.src) if self.sccsp.match_dir(newd[dlen:]): continue elif not self.pattern.match_dir(newd[dlen:]): debug('ignore %s with file pattern' % oldd) elif os.path.islink(newd): if not self._equal_link(oldd, newd): debug('mkdir %s' % newd) FileUtils.copy_file(newd, oldd, symlinks=symlinks) changes += 1 elif os.path.exists(oldd) and not os.path.isdir(oldd): debug('type changed %s' % oldd) unlink(oldd) for name in files: newf = os.path.join(root, name) timest = FileUtils.last_modified(newf) if timest > self._timestamp: self._timestamp = timest oldf = newf.replace(self.dest, self.src) if self.sccsp.match(newf[dlen:]): continue elif not self.pattern.match(newf[dlen:]): debug('ignore %s with file pattern' % oldf) elif os.path.islink(newf): if not self._equal_link(oldf, newf): debug('copy %s' % newf) FileUtils.copy_file(newf, oldf, symlinks=symlinks, scmtool=scmtool) changes += 1 elif not os.path.lexists(oldf): debug('add %s' % newf) dirn = os.path.dirname(oldf) if not os.path.lexists(dirn): os.makedirs(dirn) FileUtils.copy_file(newf, oldf, symlinks=symlinks, scmtool=scmtool) changes += 1 else: if os.path.islink(oldf): debug('link %s' % newf) FileUtils.copy_file(newf, oldf, symlinks=symlinks, scmtool=scmtool) changes += 1 elif not filecmp.cmp(newf, oldf): debug('change %s' % newf) FileUtils.copy_file(newf, oldf, symlinks=symlinks, scmtool=scmtool) changes += 1 else: debug('nochange %s' % oldf) return changes
except DuplicateError: # print("题目重复,删除该式子") continue else: num += 1 return self.formula, self.answer @staticmethod def duplicate_check(target_expr, result_expr): """检查新生成的式子是否重复""" for expr in result_expr: if expr == target_expr: return True if target_expr[0] == '+' or target_expr[0] == '*': temp = target_expr[1] target_expr[1] = target_expr[2] target_expr[2] = temp if expr == target_expr: return True return False return False if __name__ == '__main__': t = Tree() e_file = os.path.join(os.getcwd(), 'Exercises.txt') a_file = os.path.join(os.getcwd(), 'Answer.txt') g_file = os.path.join(os.getcwd(), 'Grade.txt') formula_list, ans_list = t.generate_formula(10, 100, False) FileUtils.write_file(formula_list, ans_list, e_file, a_file) # 保存题目文件