def main(): options, filename = parse_cmdline() setup_logging(options.debug) total = {} with open(filename) as f: for i in f: i = utils.sanitise(i) if i: try: logger.debug("Parsing %s", i) name, size_bytes = parse(i) if name in total: total[name] += size_bytes else: total[name] = size_bytes except ValueError as e: if options.strict: logger.critical("%s Aborting", e) sys.exit(-1) else: logger.warn("%s", e) for n,s in total.items(): val, unit = utils.humanise(s) print "{:8} {:10} {}".format(n, val, unit)
def parse(filename): f = open(filename) ret = {} hostname = "" mdict = {} servers = [] for line in f: line = utils.sanitise(line) if line: k,v = parse_line(line) if k == "hostname": # We're parsing a new machine now. This means, that we've # finished parsing the previous one. If we have something # already parsed, store it into the top level dictionary # and reset the machine dictionary if hostname: ret[hostname] = mdict # Store everything parsed so far ts = Server(hostname, mdict['HDD Capacity'], mdict['HDD Tech'], mdict['Mem'], mdict['Partitions'], mdict['commission date']) servers.append(ts) mdict, hostname = {}, v # Reset else: hostname = v elif k != "Partitions": # For everything other than partitions, we just store the value into # mdict mdict[k] = v else: part_dict = {} # For partitions, we parse separately till we encounter an empty line for part in f: part = part.strip() if not part: break partition, size = parse_partition(part) if size == "rest": total_size = mdict['HDD Capacity'] part_size = 0 for s in part_dict.values(): part_size += s size = total_size - part_size part_dict[partition] = size mdict['Partitions'] = part_dict # Put in the last machine ts = Server(hostname, mdict['HDD Capacity'], mdict['HDD Tech'], mdict['Mem'], mdict['Partitions'], mdict['commission date']) servers.append(ts) return servers
def add_module(arg): fptr = open(os.getcwd() + "/project.json") j = json.load(fptr) tp = str(j["type"]) name= str(j["name"]) fptr.close() f=open(os.getenv("HOME")+"/Documents/jarvis/config.json") j=json.load(f) workspace = str(j["workspace"]) author = str(j["author"]) f.close() project_folder = os.getenv("HOME") + "/" + workspace + "/" + tp + "/" + name type_folder = os.getenv("HOME")+"/Documents/jarvis/templates/" + tp module_name = str(raw_input(utils.get_color("blue") + "Enter Module Name : " + utils.reset_color())) module_name = module_name.replace(" ","_") files = os.listdir(type_folder) ls = os.listdir(os.getcwd()) for fname in files: if fname.startswith("module"): fname = fname.replace("module",module_name) if fname in ls: print utils.get_color("red") + "Cannot add "+module_name+" : file " + fname + " already exists" + utils.reset_color() return cmt = str(raw_input(utils.get_color("blue") + "Comments : " + utils.reset_color())) for fname in files: if fname.startswith("module"): fptr = open(type_folder + "/" + fname) content = fptr.read() fptr.close() fname = fname.replace("module",module_name) print utils.get_color("blue") + "Creating " + fname + utils.reset_color() fptr = open(project_folder + "/" + fname , "w") fptr.write(utils.sanitise(content,{"name":name , "comments":cmt , "author":author,"module":module_name})) fptr.close() print utils.get_color("green") + "Module Added" + utils.reset_color()
def is_simple(abbreviation, longform): ''' Checks if an abbreviation belongs to category type 'simple' ''' temp ="" result = False ## ## Sanitise abbreviation ## Clean up non letter charcters from string ## Ex : "RADIUS" ## Output : remote authentication dial in user service ## abbreviation = sanitise(abbreviation,"abbv") ## ## Longform ## Add first letters of long form into a list ## lf = [ i[0].lower() for i in longform.split()] ## ## Append list items into a temp variable ## for i in lf: temp+=i ## ## Compare ## if temp == abbreviation and len(temp) == len(abbreviation): result = True return result
def is_complex(abbreviation, longform): ''' Checks if an abbreviation belongs to category type 'complex' ''' result= False ## Santise abbreviation ## abbreviation = sanitise(abbreviation,"abbv") ## Santise longform ## longform = sanitise(longform,"longform") ## Split abbreviation sting into a list ## with all possible linear combinations with only unique elements ## ## Ex : "TexMex" ## ## Output : ['e', 'ex', 'exm', 'exme', 'exmex', 'm', 'me', 'mex', 't', ## 'te', 'tex', 'texm', 'texme', 'x', 'xm', 'xme', 'xmex'] abbv_list = abbv_split(abbreviation) ## Change longform string into into a list split ## into lenghtwise increments ## ## Ex : "Texas Mexico" ## ['T', 'Te', 'Tex', 'Texa', 'Texas', 'M', ## 'Me', 'Mex', 'Mexi', 'Mexic', 'Mexico'] longform_list = lng_form_split(longform) ## ## Create an intersection of both lists ## intersection = list(set(abbv_list).intersection(longform_list)) ## Interate through all possible combinations from the intersection ## Compare each combiation with sanitised abberviation ## If value exits break and return true for L in range(0, len(intersection)+1): for subset in itertools.combinations(intersection, L): l="" for i in subset: l+=i if len(l) == len(abbreviation) and l == abbreviation: result = True break if result == False: for L in range(0, len(intersection)+1): for subset in itertools.permutations(intersection, L): l="" for i in subset: l+=i if len(l) == len(abbreviation) and l == abbreviation: result = True break return result
def create_project(arg): import readline types = os.listdir(os.getenv("HOME")+"/Documents/jarvis/templates") print utils.get_color("cyan") + "Select a project type : " for tp in types: print "\t" , tp print utils.reset_color() readline.parse_and_bind("tab: complete") def complete_type(text, state): for tp in types: if tp.startswith(text): if not state: return tp else: state -= 1 readline.set_completer(complete_type) tp = str(raw_input(utils.get_color("blue") + "Enter Type : " + utils.reset_color())) check = tp in types while not check: readline.parse_and_bind("tab: complete") readline.set_completer(complete_type) tp =str(raw_input(utils.get_color("blue") + "Project Type not recognized. \nEnter type : " + utils.reset_color())) check = tp in types name = str(raw_input(utils.get_color("blue") + "Enter Name : " + utils.reset_color())) name = name.replace("(","_") name = name.replace(")","_") cmt = str(raw_input(utils.get_color("blue") + "Enter Comments : " + utils.reset_color())) name = name.replace(" ","_") f=open(os.getenv("HOME")+"/Documents/jarvis/config.json") j=json.load(f) f.close() project_folder = os.getenv("HOME") + "/" + str(j["workspace"]) + "/" + tp + "/" + name if os.path.exists(project_folder): print utils.get_color("red") + "Project already exists" + utils.reset_color() return type_folder = os.getenv("HOME")+"/Documents/jarvis/templates/" + tp os.system("mkdir " + project_folder) files = os.listdir(type_folder) def span_dir(name): os.system("mkdir " + os.path.join(project_folder , name)) for folder in os.listdir(os.path.join(type_folder , name)): if not folder.startswith("."): if os.path.isdir(os.path.join(os.path.join(type_folder , name), folder)): span_dir(os.path.join(name,folder)) elif os.path.isfile(os.path.join(os.path.join(type_folder , name), folder)): files.append(os.path.join(name,folder)) for fname in files: if os.path.isdir(os.path.join(type_folder,fname)): span_dir(fname) mainfiles = [] for fname in files: path = os.path.join(project_folder,fname) if os.path.isfile(os.path.join(type_folder,fname)): if path.count("/")==project_folder.count("/")+1 : mainfiles.append(path) fptr = open(os.path.join(type_folder,fname)) content = fptr.read() fptr.close() print utils.get_color("blue") + "Creating " + fname + utils.reset_color() fptr = open(path , "w") fptr.write(utils.sanitise(content,{"name":name , "comments":cmt , "author":str(j["author"]) , "workspace":str(j["workspace"])})) fptr.close() os.chdir(project_folder) os.system("gnome-terminal -e jarvis &") f=open(os.path.join(project_folder , "project.json")) pj=json.load(f) editor = str(pj["editor"]) if "editor" in pj else str(j["editor"]) cmd=editor+" " + " ".join([filename for filename in mainfiles if self.is_text(filename)])+" &" os.system(cmd)
def main(scan_dir, output_dir): data = {} visited_folders = set() # Keeps track of which folders have been checked for additional files items = list(Path(scan_dir).rglob("*")) if len(items) == 0: print(f"No files/folders in: {scan_dir}") exit(0) for item in items: if item.is_dir(): continue if is_audio_file(item): # Load the file mutagen_file = load_mutagen(item) # Retrieve the tags we want filename = item.name # Filename album, album_artist, date, codec, title, tracknum = retrieve_metadata(mutagen_file, filename) # Album artist tag is required if album_artist == None: print(f"No \"album artist\" tag in file \"{item}\"") continue # Album tag is required if album == None: print(f"No \"album\" tag in file \"{item}\"") continue # Title tag is required if title == None: print(f"No \"title\" tag in file \"{item}\"") continue # Enables these keys to be hashable by ensuring they're strings # This check should only happen after they're confirmed to exist # Also cleans up album/artist/title names by removing whitespaces album_artist, album, title = str(album_artist), str(album), str(title) # Notifies user if extra whitespace is found in the core tags if album_artist != album_artist.strip(): album_artist = album_artist.strip() print(f"Album artist tag for {item} has whitespace") if album != album.strip(): album = album.strip() print(f"Album tag for {item} has whitespace") if title != title.strip(): title = title.strip() print(f"Title tag for {item} has whitespace") # Ensure the Album and Album Artist keys exist in the data dictionary if album_artist not in data: data[album_artist] = {} if album not in data[album_artist]: data[album_artist][album] = {"date": set(), "codecs": set(), "songs": [], "extras": []} # Create a sanitised audio filename audio_filename = "" if tracknum != None: audio_filename = sanitise(f"{tracknum} {title}{item.suffix}") else: audio_filename = sanitise(f"{title}{item.suffix}") # Add the metadata to the data struct data[album_artist][album]["codecs"].add(codec) if date != "": data[album_artist][album]["date"].add(date) data[album_artist][album]["songs"].append([item.as_posix(), audio_filename]) # Include additional files, e.g. covers. Anything in the same # directory as the audio file counts as an additional file if item.parent.as_posix() not in visited_folders: visited_folders.add(item.parent.as_posix()) for extra in item.parent.iterdir(): if not extra.is_dir() and not is_audio_file(extra): data[album_artist][album]["extras"].append([extra.parent.as_posix(), sanitise(extra.name)]) # Once metadata is collected about each file they're processed and moved to a correctly named folder for artist in data: for album in data[artist]: codec = "-".join(sorted(data[artist][album]["codecs"])) date = "-".join(sorted(data[artist][album]["date"])) album_dirname = sanitise(f"{artist} – {album}{' (' + date + ') ' if date else ' '}[{codec}]") folder_path = (f"{Path(output_dir).as_posix()}/{sanitise(artist)}/{album_dirname}") # Copy songs for src, filename in data[artist][album]["songs"]: dst = f"{folder_path}/{filename}" if src != dst: move_file(src, dst, output_dir) # Copy additional files for folder, filename in data[artist][album]["extras"]: src = f"{folder}/{filename}" dst = f"{folder_path}/{filename}" if src != dst: move_file(src, dst, output_dir) # Removes all empty directories delete_empty(scan_dir)