예제 #1
0
def replace_suffix():
    '''
    Only the last suffix (I guess that's the only suffix, technically) can be replaced
    - A suffix can be added to anything
    '''
    relative = PurePath('austinchang/tutorials/python/language/python_37/popular_modules/pathlib_/purepath/methods.r2d2.py')
    print(relative.with_suffix('.js')) # austinchang/tutorials/python/language/python_37/popular_modules/pathlib_/purepath/methods.r2d2.js
    no_suffix = PurePath('/foobar/')
    print(no_suffix.with_suffix('.dope')) # /foobar.dope
    removed_suffix = PurePath('help.js')
    print(removed_suffix.with_suffix('')) # help
예제 #2
0
 def __init__(self, fullDirectoryFilename):
     path = PurePath(fullDirectoryFilename)
     self.full = path.as_posix()
     self.directory = path.parent.as_posix()
     self.filename = path.name
     self.simplename = PurePath(path.with_suffix("")).name
     self.extention = path.suffix.split('.', 1)[-1]
예제 #3
0
파일: _ann.py 프로젝트: bioShaun/omSnpScore
def make_gene_bed(gene_bed, genes, target_bed):
    gene_bed_df = pd.read_csv(gene_bed,
                              sep='\t',
                              header=None,
                              names=['chrom', 'start', 'end'],
                              index_col=3)
    tmp_target_bed = PurePath(target_bed).with_suffix('.tmp.bed')
    sorted_tmp_target_bed = tmp_target_bed.with_suffix('.sorted.bed')
    gene_index = pd.Index(genes)
    intersect_genes = gene_index.intersection(gene_bed_df.index)
    missed_genes = gene_index.difference(gene_bed_df.index)
    if intersect_genes.empty:
        logger.error('None of input genes is in database.')
        sys.exit(1)
    if not missed_genes.empty:
        missed_genes = missed_genes.astype('str')
        logger.warning('Input genes {} not found.'.format(
            ','.join(missed_genes)))
    logger.info('Making target region bed from input genes...')
    target_bed_df = gene_bed_df.loc[intersect_genes]
    target_bed_df.to_csv(tmp_target_bed, sep='\t', index=False, header=False)
    sort_cmd = f'sort -k1,1 -k2,2n {tmp_target_bed} > {sorted_tmp_target_bed}'
    delegator.run(sort_cmd)
    merge_region_cmd = f'bedtools merge -i {sorted_tmp_target_bed} > {target_bed}'
    delegator.run(merge_region_cmd)
예제 #4
0
파일: fs.py 프로젝트: zkl-tech/meson
 def replace_suffix(self, state: 'ModuleState', args: typing.Sequence[str],
                    kwargs: dict) -> ModuleReturnValue:
     if len(args) != 2:
         raise MesonException('method takes exactly two arguments.')
     original = PurePath(args[0])
     new = original.with_suffix(args[1])
     return ModuleReturnValue(str(new), [])
예제 #5
0
    def _render_path(self, site_path: Path, relative_path: PurePath,
                     output_dir: Path) -> None:
        """Render a Jinja2 template and write it to the same relative path
        in the output directory.

        Notes
        -----
        The output path will be the same as the ``site_path``, but without
        the original ``.jinja`` extension.
        """
        # Relative path of the output (remove the .jinja extension)
        relative_output_path = relative_path.with_suffix("").with_suffix(
            "".join(site_path.suffixes[:-1]))
        # Remove the .jinja extension while also locating the rendered output
        # in the build directory.
        output_path = output_dir.joinpath(relative_output_path)
        if not output_path.parent.exists():
            output_path.parent.mkdir(parents=True, exist_ok=True)

        template_name = f"${self.name}/{relative_path!s}"
        self.logger.debug("Rendering templated file: %s", relative_output_path)
        jinja_template = self.jinja_env.get_template(template_name)
        context = self.create_jinja_context(
            path=PurePosixPath(relative_output_path),
            template_name=template_name,
        )
        content = jinja_template.render(**context)

        output_path.write_text(content)
예제 #6
0
    def get_ld_section(self):
        replace_ext = self.options.get("ld_o_replace_extension", True)
        sect_name = self.ld_name_override if self.ld_name_override else self.get_ld_section_name()
        vram_or_rom = self.rom_start if self.vram_addr == 0 else self.vram_addr
        subalign_str = "" if self.subalign == default_subalign else f"SUBALIGN({self.subalign})"

        s = (
            f"SPLAT_BEGIN_SEG({sect_name}, 0x{self.rom_start:X}, 0x{vram_or_rom:X}, {subalign_str})\n"
        )

        i = 0
        for subdir, path, obj_type, start in self.get_ld_files():
            # Hack for non-0x10 alignment
            if start % 0x10 != 0 and i != 0:
                tmp_sect_name = path.replace(".", "_")
                tmp_sect_name = tmp_sect_name.replace("/", "_")
                tmp_vram = start - self.rom_start + self.vram_addr
                s += (
                    "}\n"
                    f"SPLAT_BEGIN_SEG({tmp_sect_name}, 0x{start:X}, 0x{tmp_vram:X}, {subalign_str})\n"
                )

            path = PurePath(subdir) / PurePath(path)
            path = path.with_suffix(".o" if replace_ext else path.suffix + ".o")

            s += f"    BUILD_DIR/{path}({obj_type});\n"
            i += 1

        s += (
            f"SPLAT_END_SEG({sect_name}, 0x{self.rom_end:X})\n"
        )

        return s
예제 #7
0
def make_version_iri_from_iri(iri, epoch):
    head, tail = iri.split('/', 1)
    pp = PurePath(tail)
    vp = (pp.with_suffix('') / 'version' / str(epoch) / pp.stem).with_suffix(pp.suffix)
    viri = head + str(vp)
    versionIRI = rdflib.URIRef(viri)
    return rdflib.URIRef(versionIRI)
예제 #8
0
def transfer_synthetic_dataset(src_dir, tar_dir):
    for root, dirs, files in os.walk(src_dir):
        for filename in files:
            src = PurePath(path_append(root, filename))
            if src.suffix != ".csv":  # pragma: no cover
                continue
            tar = path_append(tar_dir, src.with_suffix(".json").name)
            synthetic2json(src, tar)
예제 #9
0
        def push_file(name: PurePath, data: pandas.DataFrame):
            db_file = csv_file_schema.load({
                'name': name.with_suffix('.csv').name,
                'table': data.to_csv(index=False),
                'meta': meta
            })

            db_files.append(db_file)
            db.session.add(db_file)
예제 #10
0
def publish_azw3_document(doc):
    input_file = PurePath(doc.filename)

    output_file = input_file.with_suffix('.azw3')
    result = subprocess.run(
        [calibre_ebook_convert,
         str(input_file),
         str(output_file)])
    return output_file
예제 #11
0
 def replace_suffix(self, state: 'ModuleState',
                    args: T.Tuple['FileOrString',
                                  str], kwargs: T.Dict[str, T.Any]) -> str:
     if isinstance(args[0], File):
         FeatureNew('fs.replace_suffix_file',
                    '0.59.0').use(state.subproject)
     original = PurePath(str(args[0]))
     new = original.with_suffix(args[1])
     return str(new)
예제 #12
0
    def get_ld_section(self):
        replace_ext = options.get("ld_o_replace_extension", True)
        sect_name = self.ld_name_override if self.ld_name_override else self.get_ld_section_name()
        vram_or_rom = self.rom_start if self.vram_start == 0 else self.vram_start
        subalign_str = f"SUBALIGN({self.subalign})"

        s = (
            f"SPLAT_BEGIN_SEG({sect_name}, 0x{self.rom_start:X}, 0x{vram_or_rom:X}, {subalign_str})\n"
        )

        i = 0
        do_next = False
        for subdir, path, obj_type, start in self.get_ld_files():
            # Manual linker segment creation
            if obj_type == "linker":
                s += (
                    "}\n"
                    f"SPLAT_BEGIN_SEG({path}, 0x{start:X}, 0x{self.rom_to_ram(start):X}, {subalign_str})\n"
                )

            # Create new sections for non-0x10 alignment (hack)
            if start % 0x10 != 0 and i != 0 or do_next:
                tmp_sect_name = path.replace(".", "_")
                tmp_sect_name = tmp_sect_name.replace("/", "_")
                s += (
                    "}\n"
                    f"SPLAT_BEGIN_SEG({tmp_sect_name}, 0x{start:X}, 0x{self.rom_to_ram(start):X}, {subalign_str})\n"
                )
                do_next = False

            if start % 0x10 != 0 and i != 0:
                do_next = True

            path_cname = re.sub(r"[^0-9a-zA-Z_]", "_", path)
            s += f"    {path_cname} = .;\n"

            if subdir == options.get("assets_dir"):
                path = PurePath(path)
            else:
                path = PurePath(subdir) / PurePath(path)

            # Remove leading ..s
            while path.parts[0] == "..":
                path = path.relative_to("..")

            path = path.with_suffix(".o" if replace_ext else path.suffix + ".o")

            if obj_type != "linker":
                s += f"    BUILD_DIR/{path}({obj_type});\n"
            i += 1

        s += (
            f"SPLAT_END_SEG({sect_name}, 0x{self.rom_end:X})\n"
        )

        return s
예제 #13
0
async def yaml_read(
        filepath: pathlib.PurePath,
        type_: const.ConfigPathType = const.ConfigPathType.CONFIG
) -> typing.Any:
    """
    Read a YAML file and convert it into a Config object.

    Args:
        filepath (pathlib.PurePath): The relative path to the YAML file being
            read. Do not add the ``.yaml`` or ``.yaml.gz`` to the end of the
            filename, this will be added automatically.
        editable (bool): If set to True, the original file must be in the
            playlist/core/defaults directory, and will be copied as a
            compressed file into the correct user-accessible location to
            permit user editing. This is used for
            :py:attr:`playlist.core.config.settings`.  Defaults to False.

    Returns:
        Any: The Config object compatable form of the data from the YAML
        file. Typically, this will be a :py:class:`DictConfig`,
        :py:class:`tuple`, or :py:class:`frozenset`.

    Note:
        Why is gzip used? Because it is much faster compression/decompression
        than bzip2, xz, or even zip. Further, it actually compresses small
        datafiles (like what playlist has) better than the alternatives.

        Gzip is so firmly embraced in Linux that most standard tools either
        natively read/write gzip-compressed files (like vi), or have an
        alternative gzip-handling equivalent (like zcat for cat).

        Smaller files results in less File I/O, which improves the overall
        performance of checks, as filesystems are substantially slower than
        RAM -- it takes less time and resources to simply read a small
        compressed file and decompress it into memory than the leave the
        files uncompressed and read them straight from the filesystem.

    References:
        :py:func:`playlist.core.config.yaml_read`,
        :py:func:`parse_element`,
        :py:class:`DictConfig`,
        :py:class:`tuple`,
        :py:class:`frozenset`

    """
    filepath = filepath.with_suffix(const.FileExt.YAML.value)
    subpath = filepath.parent
    filename = filepath.name

    if type_ == const.ConfigPathType.CONFIG:
        data = await _yaml_read_config(subpath, filename)

    else:
        data = await _yaml_read_editable(type_, subpath, filename)

    return _config.parse_element(data)
예제 #14
0
def add_doc_target_ext(target: str, docpath: PurePath, project_root: Path) -> Path:
    """Given the target file of a doc role, add the appropriate extension and return full file path"""
    # Add .txt to end of doc role target path
    target_path = PurePath(target)
    # Adding the current suffix first takes into account dotted targets
    new_suffix = target_path.suffix + ".txt"
    target_path = target_path.with_suffix(new_suffix)

    fileid, resolved_target_path = reroot_path(target_path, docpath, project_root)
    return resolved_target_path
예제 #15
0
    def _build_and_verify_path(self,
                               filename,
                               alt_filename=None,
                               allow_compressed=False):
        """
        Added to Sample as class_method:
            if the matching filename for idat file is not in the same folder.
            check if exists:
            then look recursively for that filename and update the data_dir for that Sample.
            return the complete filepath.

        alt_filename:
            because public data sets on GEO have samplesheets with a different pattern, if the primary file pattern
            fails to match, it will try the alt_filename pattern before returning a FileNotFoundError.

        replaces _build_path
        """
        same_dir_path = PurePath(self.data_dir, str(self.sentrix_id), filename)
        if Path(same_dir_path).is_file():
            # this idat file is in the same folder, no more searching needed.
            return same_dir_path

        if allow_compressed and Path(
                same_dir_path.with_suffix('.gz')).is_file():
            return same_dir_path

        # otherwise, do a recursive search for this file and return the first path found.
        #file_pattern = f'{self.data_dir}/**/{filename}'
        #file_matches = glob(file_pattern, recursive=True)
        file_matches = list(Path(self.data_dir).rglob(filename))
        if (not file_matches) and allow_compressed:
            file_matches = list(Path(self.data_dir).rglob(filename + '.gz'))
        if file_matches == []:
            if alt_filename != None and alt_filename != filename:
                # Note: both patterns will be identical if GSM_ID missing from sample sheet.
                alt_file_matches = list(
                    Path(self.data_dir).rglob(alt_filename))
                if (not alt_file_matches) and allow_compressed:
                    alt_file_matches = list(
                        Path(self.data_dir).rglob(alt_filename + '.gz'))
                if len(alt_file_matches) > 1:
                    LOGGER.warning(
                        f'Multiple ({len(alt_file_matches)}) files matched {alt_file_pattern} -- saved path to first one: {alt_file_matches[0]}'
                    )
                if len(alt_file_matches) > 0:
                    return alt_file_matches[0]
            raise FileNotFoundError(
                f'No files in {self.data_dir} (or sub-folders) match this sample id: {filename} OR {alt_filename}'
            )
        elif len(file_matches) > 1:
            LOGGER.warning(
                f'Multiple ({len(file_matches)}) files matched {file_pattern} -- saved path to first one: {file_matches[0]}'
            )
        return file_matches[0]
예제 #16
0
def load_graph_from_npz(name, root, print_shape=True):
    import scipy.sparse as sp
    import networkx as nx
    from pathlib import PurePath
    path = PurePath(root, name)
    if path.suffix is not '.npz':
        path = path.with_suffix('.npz')
    part_adj = sp.load_npz(path)
    graph = nx.from_scipy_sparse_matrix(part_adj)
    if print_shape:
        print(f'shape: {part_adj.shape}')
    return part_adj, graph
예제 #17
0
def create_ipynb_from_py(py_path) -> str:
    """Create an .ipynb notebook file from a Jupytext .py file

    :param py_path: path to a Jupytext-generated .py file
    :type py_path: path-like
    :return: path to a newly created .ipynb file
    :rtype: path-like
    """
    py_path = PurePath(py_path)  # if not already
    converted_nb = jupytext.read(py_path)
    ipynb_path = py_path.with_suffix(IPYNB_SUFFIX)
    jupytext.write(converted_nb, ipynb_path)
    return str(ipynb_path)
예제 #18
0
    def get_ld_section(self):
        replace_ext = self.options.get("ld_o_replace_extension", True)
        sect_name = self.ld_name_override if self.ld_name_override else self.get_ld_section_name()
        vram_or_rom = self.rom_start if self.vram_addr == 0 else self.vram_addr

        s = (
            f"/* 0x{self.vram_addr:08X} {self.rom_start:X}-{self.rom_end:X} (len {self.rom_length:X}) */\n"
            "#ifdef SHIFT\n"
            f"{sect_name}_ROM_START = __romPos;\n"
            "#else\n"
            f"{sect_name}_ROM_START = 0x{self.rom_start:X};\n"
            "#endif\n"
            f"{sect_name}_VRAM = ADDR(.{sect_name});\n"
            f".{sect_name} 0x{vram_or_rom:X} : AT({sect_name}_ROM_START) {{\n"
        )

        i = 0
        for subdir, path, obj_type, start in self.get_ld_files():
            # Hack for non-0x10 alignment
            if start % 0x10 != 0 and i != 0:
                tmp_sect_name = path.replace(".", "_")
                tmp_sect_name = tmp_sect_name.replace("/", "_")
                tmp_vram = start - self.rom_start + self.vram_addr
                s += (
                    "}\n"
                    "#ifdef SHIFT\n"
                    f"{tmp_sect_name}_ROM_START = __romPos;\n"
                    "#else\n"
                    f"{tmp_sect_name}_ROM_START = 0x{start:X};\n"
                    "#endif\n"
                    f".{tmp_sect_name} 0x{tmp_vram:X} : AT({tmp_sect_name}_ROM_START) {{\n"
                )
                
            path = PurePath(subdir) / PurePath(path)
            path = path.with_suffix(".o" if replace_ext else path.suffix + ".o")

            s += f"    BUILD_DIR/{path}({obj_type});\n"
            i += 1

        s += (
            "}\n"
            "#ifdef SHIFT\n"
            f"{sect_name}_ROM_END = __romPos + SIZEOF(.{sect_name});\n"
            f"__romPos += SIZEOF(.{sect_name});\n"
            "#else\n"
            f"{sect_name}_ROM_END = 0x{self.rom_end:X};\n"
            f"__romPos += 0x{self.rom_length:X};\n"
            "#endif\n"
        )

        return s
예제 #19
0
def merge_contig_fa(genome_fa, congtig_list, n_sep=100,
                    merge_name='chrUn'):
    '''
    merge contigs to a super contig in genome file.
    each contig is seperated with a number of N in super contig.
    output a new genome file with the super contig and
    a table with each contig's offset in super contig
    '''
    contig_df = pd.read_table(congtig_list, index_col=0, header=None)
    contig_df.index = [str(each) for each in contig_df.index]
    n_sep_str = 'N' * n_sep
    contig_seq_list = []
    congtig_offset_dict = {}
    offset = 0
    all_seq_list = []
    for seq_record in SeqIO.parse(genome_fa, "fasta"):
        if seq_record.id in contig_df.index:
            contig_seq_list.append(str(seq_record.seq))
            congtig_offset_dict.setdefault(
                'contig_id', []).append(seq_record.id)
            congtig_offset_dict.setdefault(
                'offset', []).append(offset)
            offset += len(seq_record.seq) + n_sep
        else:
            all_seq_list.append(seq_record)
    merged_contig_seq = Seq(n_sep_str.join(contig_seq_list))
    merged_contig_seq_rd = SeqRecord(id=merge_name,
                                     seq=merged_contig_seq,
                                     description='')
    all_seq_list.append(merged_contig_seq_rd)
    genome_fa = PurePath(genome_fa)
    genome_merge_ctg_fa = genome_fa.with_suffix('.merge_ctg.fa')
    SeqIO.write(all_seq_list, genome_merge_ctg_fa, "fasta")
    ctg_offset_file = genome_fa.with_suffix('.ctg.offset.txt')
    ctg_offset_df = DataFrame(congtig_offset_dict)
    ctg_offset_df.to_csv(ctg_offset_file, sep='\t', index=False,
                         columns=['contig_id', 'offset'])
    return genome_merge_ctg_fa, ctg_offset_file
예제 #20
0
def load_label_from_npy(name, root, mode='raw', print_label=False):
    import numpy as np
    from pathlib import PurePath
    path = PurePath(root, name)
    if path.suffix is not '.npy':
        path = path.with_suffix('.npy')
    graph_label = np.load(path)

    if print_label:
        print_label_table(graph_label)
    if graph_label.shape == 2:
        graph_label = np.argmax(graph_label, axis=1)
    node_pos = get_node_pos(graph_label, mode)
    return graph_label, node_pos
예제 #21
0
def main(argv):
    in_filename = None
    out_dir = None
    out_filename = None
    sampler = 'bilinear_no_mipmap|clamp'
    font = 'droid_sans_24.json'

    arg_it = iter(argv[1:])
    for arg in arg_it:
        if arg[0] == '-':
            if arg == '-d':
                out_dir = PurePath(next(arg_it))
            elif arg == '-f':
                font = next(arg_it)
            elif arg == '-s':
                sampler = next(arg_it)
            else:
                print("Unknown option {}.".format(arg), file=stderr)
                usage()
        elif in_filename is None:
            in_filename = PurePath(arg)
        elif out_filename is None:
            out_filename = PurePath(arg)
        else:
            usage()

    if in_filename is None:
        usage()

    if out_filename is None:
        out_filename = in_filename.with_suffix('.ldl')
        if out_dir:
            out_filename = out_dir / out_filename

    if out_dir is None:
        out_dir = out_filename.parent

    loader = Loader(in_filename)
    tilemap = loader.load_map(in_filename)

    with open(str(out_filename), 'w') as out_file:
        out = LdlWriter(out_file)

        sampler = Sampler(sampler)

        converter = TiledMapConverter(tilemap, out_filename, out_dir, sampler,
                                      font, loader)
        tile_map_as_dict = converter.convert()
        out.write(tile_map_as_dict)
예제 #22
0
def feature_bed(gtf_file, features=['cds', 'exon']):
    gtf_df = gtfparse.read_gtf(gtf_file)
    gtf_df.loc[:, 'feature_lower'] = gtf_df.feature.map(str.lower)
    gtf_df.loc[:, 'bed_start'] = gtf_df.start - 1
    gtf_file = PurePath(gtf_file)
    for feature in features:
        feature_gtf_df = gtf_df[gtf_df.feature_lower == feature]
        feature_bed_file = gtf_file.with_suffix('.{}.bed'.format(feature))
        feature_gtf_df.to_csv(str(feature_bed_file),
                              index=False,
                              columns=['seqname', 'bed_start', 'end'],
                              sep='\t',
                              header=False)
        sort_bed_file = gtf_file.with_suffix('.{}.sort.bed'.format(feature))
        sort_bed = 'sort -k1,1 -k2,2n {bed}'.format(bed=feature_bed_file)
        sort_bed_response = envoy.run(sort_bed)
        with open(str(sort_bed_file), 'w') as bed_inf:
            bed_inf.write(sort_bed_response.std_out)
        merge_bed_file = gtf_file.with_suffix('.{}.merged.bed'.format(feature))
        merge_bed = 'bedtools merge -i {sorted_bed}'.format(
            sorted_bed=sort_bed_file)
        merge_bed_response = envoy.run(merge_bed)
        with open(str(merge_bed_file), 'w') as m_bed_inf:
            m_bed_inf.write(merge_bed_response.std_out)
def main(path_to_cache_definitions: Path):
    """Main function for extracting OSRS model ID numbers that map to names.

    :param path_to_cache_definitions: File location of compressed cache definition files.
    """
    all_models = {}

    # Loop the three cache dump files (items, npcs, objects)
    for cache_file in osrs_cache_constants.CACHE_DUMP_FILES:
        # Set the path to the compressed JSON files
        compressed_json_file = Path(path_to_cache_definitions / cache_file)

        # Set the current cache dump type
        cache_type = PurePath(cache_file)
        cache_type = str(cache_type.with_suffix(""))

        # Load and decompress the compressed definition file
        definitions = osrs_cache_data.CacheDefinitionFiles(
            compressed_json_file)
        definitions.decompress_cache_file()

        # Loop all entries in the decompressed and loaded definition file
        for id_number in definitions:
            # Fetch the decompressed JSON data
            json_data = definitions[id_number]

            # Name check (it is of no use if it is empty/null, so exclude)
            if json_data["name"] in SKIP_EMPTY_NAMES:
                continue

            # Process cache definition based on type (item, npc, object)
            # Items: Have single interger model IDs
            # NPCs: Have list of interger model IDs
            # Objects: Have list of integer model IDs
            if cache_type == "items":
                extracted_models = extract_model_ids_int(json_data)
            elif cache_type == "npcs":
                extracted_models = extract_model_ids_list(json_data)
            elif cache_type == "objects":
                extracted_models = extract_model_ids_list(json_data)

            # Add extracted models to all_models dictionary
            all_models.update(extracted_models)

    # Save all extracted models ID numbers to JSON file
    out_fi = Path(config.DOCS_PATH / "models-summary.json")
    with open(out_fi, "w") as f:
        json.dump(all_models, f, indent=4)
예제 #24
0
def main(config_path):
    # Load config
    with open(config_path) as f:
        config = yaml.safe_load(f.read())

    options = config.get("options")
    replace_ext = options.get("ld_o_replace_extension", True)

    # Initialize segments
    all_segments = initialize_segments(options, config_path, config["segments"])

    for segment in all_segments:
        for subdir, path, obj_type, start in segment.get_ld_files():
            path = PurePath(subdir) / PurePath(path)
            path = path.with_suffix(".o" if replace_ext else path.suffix + ".o")

            print(path)
예제 #25
0
class SqliteDatabase(Database):
    db_ext = '.db'

    def __init__(self, name, db_path, config_path=None):
        super().__init__(name, db_path, config_path)
        self.cursor = None
        self.connection = None
        self.complete_path = PurePath(path.join(self.path, self.name))
        if not self.name.endswith(SqliteDatabase.db_ext):
            self.complete_path = self.complete_path.with_suffix(
                SqliteDatabase.db_ext)

    def initialise(self):
        self.load()  # Connecting to non-existent db file creates a new db file
        self.cursor.execute("""CREATE TABLE tasks (
        id TEXT PRIMARY_KEY,
        name TEXT,
        description TEXT,
        due DATETIME
        );""")

        self.save()
        self.close()
        self.dump_config()
        self.commit_config()

    def load(self):
        self.connection = sqlite3.connect(self.complete_path)
        self.cursor = self.connection.cursor()

    def save(self):
        self.connection.commit()

    def close(self):
        self.connection.close()
        self.connection = None
        self.cursor = None

    def dump_config(self):
        dump_config(self.to_dict(), self.config_path)

    def to_dict(self):
        return {"db_name": self.name, "db_path": self.path}

    def commit_config(self):
        pass
예제 #26
0
 def download(self,
              file: YaDiskFile,
              local_destination: Path,
              ov: bool = False) -> None:
     """
     Download file on remote to local_destination.
     """
     p = PurePath(file.id)
     if local_destination is None:
         dl_path = PurePath(p.name)
     else:
         dl_path = PurePath(local_destination, p.name)
     if file.type == "dir":
         dl_path = dl_path.with_suffix(".zip")
     dl_path = Path(dl_path)
     print(YadiskDLMessage(dl_path, file.type, file.id, ov).str_value())
     download_link = self._storage.get_download_link(file.id)
     dl_path.write_bytes(self._storage.download(download_link))
예제 #27
0
def relative_uri(
    current: PurePath,
    target: PurePath,
    is_mapping: Optional[bool] = False,
    suffix: str = '',
) -> PurePath:
    result = PurePath(relpath(target, current))

    if target.name == 'index.html':
        target = target.parent

    if current == target:
        return PurePath('')

    if is_mapping:
        result = result / 'index'

    result = result.with_suffix(suffix)
    return result
예제 #28
0
def publish_pdf_document(doc, page_format='a5'):
    input_file = PurePath(doc.filename)
    output_file = input_file.with_suffix(
        '.{page_format}.pdf'.format(page_format=page_format))

    if page_format == 'a5' or page_format == 'b6':
        parameters = [
            "--paper-size", page_format, "--pdf-page-margin-bottom", "36",
            "--pdf-page-margin-left", "24", "--pdf-page-margin-right", "24",
            "--pdf-page-margin-top", "24", "--pdf-page-numbers"
        ]
    else:
        raise Exception('Unsupported page format: {page_format}'.format(
            page_format=page_format))

    result = subprocess.run(
        [calibre_ebook_convert,
         str(input_file),
         str(output_file)] + parameters)

    return output_file
예제 #29
0
def rnaseq_matrix_plot(matrix_file):
    matrix_file = PurePath(matrix_file)
    matrix_prefix = matrix_file.with_suffix('')
    sample_name = matrix_prefix.stem
    # plot reads distribution in genome region
    pie_plot_prefix = matrix_prefix.with_suffix('.genome_region')
    labels, values = genomic_origin_stats(matrix_file)
    pie_plot(labels, values, pie_plot_prefix, sample_name)
    # plot reads cov
    cov_df = pd.read_csv(matrix_file, sep='\t', skiprows=list(range(10)))
    cov_df.columns = ['position', 'coverage']
    ax = sns.lineplot(x="position",
                      y="coverage",
                      markers=True,
                      dashes=False,
                      data=cov_df)
    ax.set_title(sample_name)
    fig = ax.get_figure()
    outprefix = matrix_prefix.with_suffix('.gene_coverage')
    fig.savefig(f'{outprefix}.png')
    fig.savefig(f'{outprefix}.pdf')
예제 #30
0
def main(path_to_cache_definitions: Path):
    """Main function for extracting OSRS model ID numbers.

    :param path_to_cache_definitions: File system location of compressed cache definition files.
    """
    models_dict = {}

    # Loop the three cache dump files (items, npcs, objects)
    for cache_file in osrs_cache_constants.CACHE_DUMP_FILES:
        # Set the path to the compressed JSON files
        compressed_json_file = Path(path_to_cache_definitions / cache_file)

        # Set the current cache dump type
        cache_type = PurePath(cache_file)
        cache_type = str(cache_type.with_suffix(""))

        # Load and decompress the compressed definition file
        definitions = osrs_cache_data.CacheDefinitionFiles(
            compressed_json_file)
        definitions.decompress_cache_file()

        # Loop all entries in the decompressed and loaded definition file
        for id_number in definitions:
            # Extract model ID numbers
            model_list = extract_model_ids(definitions[id_number], cache_type)

            # Loop the extracted model IDs
            if model_list:
                for model in model_list:
                    # Generate a unique key (e.g., items_10_2361, an item with ID of 10 and model ID of 2361)
                    key = f"{model['type']}_{model['type_id']}_{model['model_id']}"
                    # Add to the dict for outputting
                    models_dict[key] = model

    # Save all extracted models ID numbers to JSON file
    out_fi = Path(config.DOCS_PATH / "models-summary.json")
    with open(out_fi, "w") as f:
        json.dump(models_dict, f, indent=4)
예제 #31
-1
 def __call__(self, instance, filename):
     path = PurePath(
         instance._meta.app_label,
         instance._meta.model_name,
         str(uuid.uuid4())
     )
     return str(path.with_suffix(PurePath(filename).suffix))
예제 #32
-1
    def open(self, audio):
        self.pause()
        path = PurePath(audio)
        if path.suffix == ".wav":
            self._audio = wave.open(audio, "rb")
        else:
            self._audio = None
            raise ValueError("Unsupported format " + path.suffix)
        self._length = self._audio.getnframes() / self._audio.getframerate() / self._audio.getnchannels() * 1000
        self._device.setchannels(self._audio.getnchannels())
        self._device.setrate(self._audio.getframerate())

        # 8bit is unsigned in wav files
        if self._audio.getsampwidth() == 1:
            self._device.setformat(alsaaudio.PCM_FORMAT_U8)
        # Otherwise we assume signed data, little endian
        elif self._audio.getsampwidth() == 2:
            self._device.setformat(alsaaudio.PCM_FORMAT_S16_LE)
        elif self._audio.getsampwidth() == 3:
            self._device.setformat(alsaaudio.PCM_FORMAT_S24_LE)
        elif self._audio.getsampwidth() == 4:
            self._device.setformat(alsaaudio.PCM_FORMAT_S32_LE)
        else:
            raise ValueError('Unsupported format')

        self._device.setperiodsize(self._CHUNK)

        try:
            with open(str(path.with_suffix(".srt"))) as subs:
                self._subs = SubRip.parse(subs.read())
        except Exception:
            self._subs = None
        self._emit_sub_changed_new_thread()