def __init__(self, database: str, *paths: Path, **kwargs: Mapping[str, int]) -> None: """ :param database: :param paths: :param kwargs: """ collection = [] # type: Any self._database = database # type: str self._genres = kwargs.get("genres", {"Rock": 9}) # type: Mapping[str, int] self._languages = kwargs.get("languages", {"English", 1}) # type: Mapping[str, int] self._providers = kwargs.get("providers", {}) # type: Mapping[str, int] for extension, group in filter( contains_(".ape", ".flac", ".wv")(lower_(itemgetter(0))), groupby(self._get_files(*paths), key=attrgetter("suffix"))): for file in group: collection.append(iter(self.MAPPING[extension](file))) collection = [ list(filter(itemgetter_(0)(partial(contains, self.TAGS)), item)) for item in collection ] self._collection = list(starmap(self.__update, collection)) # type: Any
def format_(*iterables: List[str]) -> Iterator[Tuple[str, int]]: """ :param iterables: :return: """ # 1. Compress collection. Keep only file and last change UTC date. files = [tuple(compress(file, [0, 0, 1, 1, 0])) for file in iterables] # type: Any # 2. Filter collection. Keep only FLAC files. files = filter(itemgetter_(0)(match_(arguments.pattern.search)), files) # 3. Convert last change UTC date into timestamp. files = map_(1)(get_timestamp)(*files) # 4. Remove duplicate files. Keep only the most recent last change UTC date for every file. files = { key: list(group) for key, group in groupby(files, key=itemgetter(0)) } for key, values in files.items(): files[key] = values if len(values) > 1: files[key] = [(key, max(itemgetter(1)(value))) for value in values] # 5. Yield collection content. for _, container in files.items(): for file, ts in container: yield file, ts
def fromfile(cls, fil: str, enc: str = UTF16): with open(fil, encoding=enc) as stream: tags = csv.reader(stream, dialect=CustomDialect()) # type: Any tags = filterfalse( decorators.itemgetter_(0)(partial(eq_string_, "encoder+")), tags) tags = [(decorators.rstrip_( decorators.lower_(callables.group_(1)(cls.REGEX.match)))(key), value) for key, value in tags] return cls(**dict(tags))
def __call__(self, parsobj, namespace, values, option_string=None): """ :param parsobj: :param namespace: :param values: :param option_string: :return: """ # Tous les scripts associés au workspace sont sélectionnés par défaut. # Les scripts n'appartenant pas au workspace sont éliminés si une liste de scripts est reçue par le programme. targets = filter( itemgetter_(1)(partial(eq, namespace.workspace)), namespace.config.items()) # type: Any if values: targets = filter( itemgetter_(0)(partial(contains, values)), targets) targets = [compress(target, [1, 0]) for target in targets] setattr(namespace, self.dest, list(chain.from_iterable(targets)))
def __init__(self, database: str, *paths: Path, **kwargs: Mapping[str, int]) -> None: """ :param database: :param paths: :param kwargs: """ super(DefaultTrack, self).__init__(database, *paths, **kwargs) tracks = starmap(self._update, self._collection) # type: Any # ----- _ok, self._ko = partitioner(tracks, predicate=eq_( self.LENGTH)(len)) # type: Any, Any _, _ok = partitioner(_ok, predicate=none_(itemgetter_(11))) # ----- tracks = sorted(set(_ok), key=itemgetter(20)) tracks = sorted(tracks, key=itemgetter(9)) tracks = sorted(tracks, key=itemgetter(2)) tracks = [self.TRACKS(*track) for track in tracks] self._collection = list(tracks)
arguments = parser.parse_args() # ================ # Local functions. # ================ rjustify = partial(rjustify_index, width=4) # ======================= # Templating environment. # ======================= template = TemplatingEnvironment(_MYPARENT / "Templates") template.set_environment(filters={"rjustify": rjustify}) # ============ # Main script. # ============ stream = csv.reader(arguments.collection, CustomDialect()) collection = [(Path(image), format_date(LOCAL.localize(parse(datetime.replace(":", '-', 2))), template=TEMPLATE2), int(parse(datetime.replace(":", '-', 2)).timestamp())) for image, datetime in stream] # type: Any collection = sorted(collection, key=itemgetter(2)) collection = sorted(collection, key=itemgetter_(0)(attrgetter("parent"))) collection = enumerate(collection, start=1) collection = [((index, ), tuple(compress(file, [1, 1, 0]))) for index, file in collection] collection = [tuple(chain.from_iterable(item)) for item in collection] print(template.get_template("T01").render(collection=collection))
# Parse arguments. arguments = vars(tags_grabber.parse_args()) # Get audio tags processing profile. with open(_MYPARENT.parent / "Resources" / "profiles.yml", encoding=UTF8) as stream: tags_config = yaml.load(stream, Loader=yaml.FullLoader)[arguments.get("tags_processing", "default")] # Configure logging. if tags_config.get("debug", False): with open(_MYPARENT.parents[1] / "Resources" / "logging.yml", encoding=UTF8) as stream: log_config = yaml.load(stream, Loader=yaml.FullLoader) for item in LOGGERS: with suppress(KeyError): log_config["loggers"][item]["level"] = "DEBUG" logging.config.dictConfig(log_config) logger = logging.getLogger("MyPythonProject.AudioCD.Grabber.{0}".format(splitext(basename(abspath(__file__)))[0])) logger.debug(mainscript(__file__)) # Process tags from input file. value, _ = upsert_audiotags(arguments["profile"], arguments["source"], arguments["sequence"], *arguments.get("decorators", ()), genres=CustomAudioGenres(), languages=CustomAudioLanguages(), **dict(filterfalse(itemgetter_()(partial(contains, ["debug", "console"])), tags_config.items()))) sys.exit(value)
# Get target destination path. destination = root.xpath("medium")[0].get("path").replace( "//", "/") # Get target filters. regexes = [ regex.get("rgpattern").replace(">", ">") for regex in root.xpath("filter_group/regex_filter") ] # Append target to collection. collection.append( (workspace, uid, name, source, destination, regexes)) # Sort targets collection. collection = sorted(collection, key=itemgetter_(1)(int)) collection = sorted(collection, key=itemgetter(2)) collection = sorted(collection, key=itemgetter(0)) # Dump targets configuration into a JSON file. json.dump([ dict([("workspace", workspace), ("description", name), ("target", uid)]) for workspace, uid, name, _, _, _ in collection ], json_file, ensure_ascii=False, indent=4) # Dump targets configuration into a TXT file. main_content = [ ] # type: List[Tuple[str, str, str, List[List[Tuple[str, ...]]]]]
_ME = Path(os.path.abspath(__file__)) _MYNAME = Path(os.path.abspath(__file__)).stem _MYPARENT = Path(os.path.abspath(__file__)).parent # =========== # Main logic. # =========== REGEX = re.compile(r"^(\d(\d)?)(/.+)?$") encoder, tracknumber = "", "0" # type: str with open(sys.argv[1], encoding="UTF_16") as stream: collection = list( csv.reader(stream, delimiter="=", quoting=csv.QUOTE_NONE, lineterminator="\r\n", doublequote=False)) # type: Any if collection: collection = filterfalse( itemgetter_(0)(partial(eq_string_, "encoder+")), collection) collection = dict(collection) track = collection.get("Track", collection.get("track")) # type: str match = REGEX.match(track) if match: tracknumber = match.group(1) encoder = collection.get("Encoder", collection.get("encoder", "")) # type: str if encoder: encoder = encoder.split()[0] print(tracknumber) print(encoder)
status, codes, = 100, [] # type: int, List[int] # =============== # Main algorithm. # =============== arguments = GetConfig.get_fromjsonfile(_THATFILE.parent / "backup.json") parser.parse_args(namespace=arguments) # -------------------- # 1. Log input arguments. # -------------------- # 1.a. Targets available in JSON reference file. logger.debug("Configured targets.") for target, workspace in sorted(sorted(arguments.config.items(), key=itemgetter_()(int)), key=operator.itemgetter(1)): logger.debug("\t%s: %s.".expandtabs(TABS), target, workspace) # 1.b. Targets given by parser. logger.debug("Processed targets.") if arguments.targets: for target in sorted(arguments.targets, key=int): logger.debug("\t%s.".expandtabs(TABS), target) elif not arguments.targets: logger.debug( "\tAny coherent target hasn\'t been given: backup can\'t be processed!" .expandtabs(TABS)) # ------------------ # 2. Process arguments.
artistsort = data.text if all([any([artist, artistsort]), album, albumsort, disc, genre, utc_played]): albumid = None if artist: albumid = f"{artist[0]}.{artist}" if artistsort: albumid = f"{artistsort[0]}.{artistsort}" if albumid: collection.append((f"{albumid}.{albumsort[:-3]}.{disc}", utc_played, genre, albumsort[:-3], album)) # 2. Sort collection by ascending artist then descending played date. collection = sorted(sorted(collection, key=itemgetter(1), reverse=True), key=itemgetter(0)) # 3. Remove albums played prior to the last run date. collection = filter(itemgetter_(1)(partial(le, pytz.timezone("UTC").localize(utc_run))), collection) # 4. Remove albums without a compliant genre. collection = filter(itemgetter_(2)(partial(contains, GENRES)), collection) # 5. Remove albums without a compliant albumsort. collection = filter(itemgetter_(3)(partial(valid_albumsort_)), collection) # 6. Remove albums absent from the local audio database. collection = filter(itemgetter_(0)(partial(contains, list(get_disc_(arguments.db)))), collection) # 7. Map UTC datetime to local datetime. # update_playeddisccount assumes that datetime is local and converts therefore to UTC. collection = map_(1)(localize_)(*collection) # 8. Remove both genre and albumsort from the collection.
# Jinja2 environment. # =================== environment = TemplatingEnvironment(_MYPARENT.parent / "Templates") # ============ # Main script. # ============ collection: Any = sorted( shared.get_targets( Path(os.path.expandvars("%_BACKUP%")) / "workspace.music"), key=itemgetter(0) ) # (target1, 1234), (target2, 5678), (target3, 9012), (target4, 3456), ... # ----- collection1: Any = filter( itemgetter_(0)(match_( re.compile(r"\bspringsteen\b", re.IGNORECASE).search)), collection) # (target1, 1234), (target2, 5678), ... collection1 = list( shared.format_collection(*sorted(collection1, key=itemgetter( 0)))) # (target1, 1, 1234), (target2, 2, 5678), ... # ----- collection2: Any = filter( itemgetter_(0)(match_(re.compile(r"\bpearl jam\b", re.IGNORECASE).search)), collection) # (target3, 9012), (target4, 3456), ... collection2 = list( shared.format_collection(*sorted(collection2, key=itemgetter(0)), start=len(collection1) + 1)) # (target3, 3, 9012), (target4, 4, 3456), ... # -----