def error(self, p): if type(p) == Token: line_number = p.line_number line = self.raw_prog.split("\n")[line_number - 1] error_message(p, line, "Parser", message="Syntax error") else: print("reached end of file. Are you missing a semi colon ?") return None
def make_string(self, char: tuple): temp: str = "" + char[0] start = char[2] end = int(char[2]) line = char[1] next_char = None scan = True is_string = True if char[0] == '"' else False while scan: next_char = self.advance() if next_char[0] == None: break if next_char[0] in list( self.tokens.values())[3:] and not is_string: self.cont = False break if next_char[0] == " ": if not is_string: break if next_char[0] == '"' and is_string: scan = False if next_char[0] not in string.ascii_letters + string.digits + "_": if not is_string: # error_message(Token("IDENTIFIER", temp+char[0], char[1], start, end+1), self.current_line, "Tokenizer", # message=f"Character '{next_char[0]}' not allowed in identifier") # return False, None self.cont = False break end = next_char[2] temp += next_char[0] if is_string and not temp.endswith('"'): error_message( Token("STRING", temp + char[0], char[1], start, end + 1), self.current_line, "Tokenizer", message=f"Character '\"' Expected at the end of the string") return False, None if temp in list(self.tokens.values())[2:]: return next_char, Token(self.get_key(temp), temp, line, start, end) elif temp.startswith('"') and temp.endswith('"'): return next_char, Token("STRING", temp, line, start, end) else: return next_char, Token("IDENTIFIER", temp, line, start, end)
def tokenize(self, program: str) -> Tuple[bool, List[Token]]: self.raw_program = program self.new_session(program) char = None while True: if self.cont: char = self.advance() else: self.cont = True if char[0] == None: break if char[0] == " ": continue if char[0] in self.tokens["NUMBER"]: char, tok = self.make_number(char) if char == False: return char, tok self.tokenized.append(tok) elif char[0] in self.tokens["STRING"] + '"': char, tok = self.make_string(char) if char == False: return char, tok self.tokenized.append(tok) else: found = False last_token = None for token, value in self.tokens.items(): if value == char[0]: self.tokenized.append(Token(token, *char, char[2])) found = True break last_token = token if not found: error_message(Token(last_token, *char, char[2]), self.current_line, unit="Tokenizer", message=f"Unknown token '{char[0]}'") return False, None return True, self.tokenized
def make_number(self, char) -> int: dot_count = 0 num = char[0] start = char[2] end = int(char[2]) line = char[1] if num[0] == ".": dot_count += 1 while True: next_char = self.advance() if next_char[0] == None: break if next_char[0] in string.digits + ".": num += next_char[0] end = next_char[2] if next_char[0] == ".": dot_count += 1 else: self.cont = False break if next_char[0] == ".": if dot_count > 1: error_message(Token("NUMBER", num + char[0], char[1], start, end + 2), self.current_line, "Tokenizer", message=f"float has more than one '.' ") return False, None if dot_count == 1: return next_char, Token("FLOAT", float(num), line, start, end) else: return next_char, Token("INT", int(num), line, start, end)
def exec_err(self, token, message): print(token._slice) print(token._namemap) print(token._stack) print(token.lineno) print(token.index) tok = None for x in token._slice: if isinstance(x, Token): tok = x print(tok.type) if tok: line_number = tok.line_number print(self.prog.split("\n"), line_number) line = self.prog.split("\n")[line_number - 1] tok.start = 0 tok.end = len(line) error_message(tok, line, "Executor", message=message) else: print(message)
import utils from netCDF4 import Dataset import numpy as np from datetime import datetime #modules initialisation merra2_module.initialise() wrf_module.initialise() merra2wrf_mapper.initialise() #----------------------------- #Sanity checks: #check species availability in wrf and in merra files for var in merra2wrf_mapper.get_merra_vars(): if var not in merra2_module.merra_vars: utils.error_message("Could not find variable " + var + " in MERRA2 file. Exiting...") for var in merra2wrf_mapper.get_wrf_vars(): if var not in wrf_module.wrf_vars: utils.error_message("Could not find variable " + var + " in WRF input file. Exiting...") #check that spatial dimensions are covered if ((min(wrf_module.wrf_bnd_lons) < min(merra2_module.mera_lon)) | (max(wrf_module.wrf_bnd_lons) > max(merra2_module.mera_lon)) | (min(wrf_module.wrf_bnd_lats) < min(merra2_module.mera_lat)) | (max(wrf_module.wrf_bnd_lats) > max(merra2_module.mera_lat))): utils.error_message( "WRF area is not fully covered by MERRA2 area. Exiting...") time_intersection = wrf_module.wrf_times.viewkeys(
def main(): # # start_dir = get_dir("forrás") # dest_dir = get_dir("cél") start_dir = "./teszt_video" dest_dir = convert_relative_path_to_absolute("./videos") dest_dir_no_exif = create_no_exif_data_dir(dest_dir) #print(daily_config_filename, dest_dir) file_hash_in_dest_dir = get_hash_of_files_from_file(daily_config_filename, dest_dir) #print(file_hash_in_dest_dir, get_count_of_files_in_dir(dest_dir), len(file_hash_in_dest_dir)) # sys.exit() file_counter_for_same_name = 0 file_counter_dest_dir_no_exif = 0 file_counter_dest_dir = 0 file_count_deleted = 0 for root, dirs, files in os.walk(start_dir, topdown=True): for name in files: original_name = name original_name_with_path = os.path.join(root, name) filename, filename_ext = os.path.splitext(name) filename = clean_file_name(filename) if filename_ext.lower() in FILE_EXTENSIONS: video_date_converted = get_video_exif_info(original_name_with_path) if video_date_converted: filename = set_exif_info_in_filename(video_date_converted, filename) target_dir = dest_dir else: target_dir = dest_dir_no_exif error_message("\nAz EXIF információ nem elérhető... " + original_name) new_name = filename + filename_ext #print(new_name) file_hash = get_file_hash(original_name_with_path) # print("HASH elenőrzés: ", file_hash in file_hash_in_dest_dir) if file_hash in file_hash_in_dest_dir: warning_message("Már létezik ugyanilyen tartalmú fájl" \ + "a célkönyvtárban, ezért törlöm a forráskönyvtárban...") file_count_deleted += 1 os.remove(original_name_with_path) else: # A forrás állomány áthelyezése a célkönyvtárba try: if os.path.isfile(os.path.join(target_dir, new_name)): # már van ilyen nevű fájl és a kettő tartalma nem egyezik meg, # ezért a fájlnevet ellátjuk időbélyeggel is warning_message("Már van ilyen nevű fájl és a kettő tartalma nem egyezik meg...") filename = set_timestamp_in_filename(filename) new_name = filename + filename_ext file_counter_for_same_name += 1 message = " ".join(["Áthelyezés: ", original_name_with_path, os.path.join(target_dir, new_name)]) info_message(message) if target_dir == dest_dir: file_counter_dest_dir += 1 else: file_counter_dest_dir_no_exif += 1 file_hash_in_dest_dir.add(file_hash) os.rename(original_name_with_path, os.path.join(target_dir, new_name)) except: print("Hiba lépett fel a következő fájl esetén: ", os.path.join(target_dir, new_name)) continue if file_hash_in_dest_dir: save_file_hash_to_file(daily_config_filename, file_hash_in_dest_dir) summary_report(file_counter_dest_dir, file_counter_dest_dir_no_exif, file_counter_for_same_name, file_count_deleted)
def main(): """ A forrás könyvtárban lévő jpg fájlokat áthelyezi a cél könyvtárba úgy, hogy az exif információval rendelkező fájlok esetében a fájl nevébe beírja a készítés dátumát is. Fájlnévütközés esetén - ha a két fájl tartalma eltér - a fájl nevét kiegészíti az aktuális időből készített időbélyeggel a felülírás elkerülése érdekében. A fájlok neveit optimalizálja: - lecseréli a magyar ékezetes karaktereket ékezet nélkülire (angol ábc) - eltávolítja szóközöket, pontokat és egyéb speciális jeleket """ start_dir = get_dir("forrás") dest_dir = get_dir("cél") dest_dir_no_exif = create_no_exif_data_dir(dest_dir) file_hash_in_dest_dir = get_hash_of_files_from_file( daily_config_filename, dest_dir) file_counter_for_same_name = 0 file_counter_dest_dir_no_exif = 0 file_counter_dest_dir = 0 # file_counter_bad_image = 0 file_count_deleted = 0 for root, dirs, files in os.walk(start_dir, topdown=True): for name in files: original_name = name original_name_with_path = os.path.join(root, name) filename, filename_ext = os.path.splitext(name) filename = clean_file_name(filename) # jpg fájlokból kiszedjük az exif információt if filename_ext.lower() in FILETYPES: # try: image_date_converted = get_image_exif_info( original_name_with_path) if image_date_converted: filename = set_exif_info_in_filename( image_date_converted, filename) target_dir = dest_dir else: target_dir = dest_dir_no_exif error_message("\nAz EXIF információ nem elérhető... " + original_name) new_name = filename + filename_ext print(new_name) file_hash = get_file_hash(original_name_with_path) if file_hash in file_hash_in_dest_dir: warning_message("Már létezik ugyanilyen tartalmú fájl\ a célkönyvtárban, ezért törlöm a forráskönyvtárban..." ) file_count_deleted += 1 os.remove(original_name_with_path) else: # A forrás állomány áthelyezése a célkönyvtárba try: if os.path.isfile(os.path.join(target_dir, new_name)): # már van ilyen nevű fájl és a kettő tartalma nem egyezik meg, # ezért a fájlnevet ellátjuk időbélyeggel is warning_message( "Már van ilyen nevű fájl és a kettő tartalma nem egyezik meg..." ) filename = set_timestamp_in_filename(filename) new_name = filename + filename_ext file_counter_for_same_name += 1 message = " ".join([ "Áthelyezés: ", original_name_with_path, os.path.join(target_dir, new_name) ]) info_message(message) if target_dir == dest_dir: file_counter_dest_dir += 1 else: file_counter_dest_dir_no_exif += 1 file_hash_in_dest_dir.add(file_hash) os.rename(original_name_with_path, os.path.join(target_dir, new_name)) except: print("Hiba lépett fel a következő fájl esetén: ", os.path.join(target_dir, new_name)) continue if file_hash_in_dest_dir: save_file_hash_to_file(daily_config_filename, file_hash_in_dest_dir) summary_report(file_counter_dest_dir, file_counter_dest_dir_no_exif, file_counter_for_same_name, file_count_deleted)