Example #1
1
def clean_template(template, removals=None, cfmt=cfmt):
    if removals is None:
        removals = ['temp', 'dx', 'dy']
    d = cfmt.format(template, defaultdict(str))

    for r in removals:
        d = d.replace('[{}=]'.format(r), '')
    z = re.sub(r"__+", "_", d)
    z = z.replace('_.', '.')
    e = z.replace('[', '')
    e = e.replace(']', '')
    e = e.replace('(', '')
    e = e.replace(')', '')
    f = Path(e).as_posix()
    f = f.replace('/_', '/')
    return f
def update(generated_folder, destination_folder, global_conf, local_conf):
    """Update data from generated to final folder"""
    wrapper_files_or_dirs = merge_options(global_conf, local_conf, "wrapper_filesOrDirs") or []
    delete_files_or_dirs = merge_options(global_conf, local_conf, "delete_filesOrDirs") or []
    generated_relative_base_directory = local_conf.get('generated_relative_base_directory') or \
        global_conf.get('generated_relative_base_directory')

    client_generated_path = Path(generated_folder)
    if generated_relative_base_directory:
        client_generated_path = next(client_generated_path.glob(generated_relative_base_directory))

    for wrapper_file_or_dir in wrapper_files_or_dirs:
        for file_path in Path(destination_folder).glob(wrapper_file_or_dir):
            relative_file_path = file_path.relative_to(destination_folder)
            file_path_dest = client_generated_path.joinpath(str(relative_file_path))
            file_path.replace(file_path_dest)

    for delete_file_or_dir in delete_files_or_dirs:
        for file_path in client_generated_path.glob(delete_file_or_dir):
            if file_path.is_file():
                file_path.unlink()
            else:
                shutil.rmtree(str(file_path))

    shutil.rmtree(destination_folder)
    client_generated_path.replace(destination_folder)
Example #3
0
def validate_io_streams(input_file: pathlib.Path, output_file: pathlib.Path) -> bool:
    """
    Ensure I/O paths are valid and clean for program
    :param input_file: Input file (JSON)
    :param output_file: Output file (SQLite)
    :return: Good to continue status
    """
    if not input_file.is_file():
        LOGGER.fatal("Input file {} does not exist.".format(input_file))
        return False

    output_file.parent.mkdir(exist_ok=True)
    if output_file.is_file():
        LOGGER.warning("Output file {} exists already, moving it.".format(output_file))
        output_file.replace(output_file.parent.joinpath(output_file.name + ".old"))

    return True
Example #4
0
    def _extract_to_folder(db_archive_path, db_folder):
        with tarfile.open(db_archive_path) as tar_archive:
            for member in tar_archive.getmembers():
                if not member.isreg():
                    continue
                # Will skip the dirs to extract only file objects

                tar_archive.extract(member, db_folder)
                # The files are extract to a subfolder (with a date in the name)
                # We want to move these into the main folder above this.
                targetname = Path(member.name).name
                if targetname != member.name:
                    curr_file = Path(db_folder).joinpath(member.name)
                    extr_folder = Path(db_folder).joinpath(member.name).parent
                    curr_file.replace(Path(db_folder).joinpath(targetname))
                    # if the folder is empty, remove it
                    if not list(extr_folder.glob("*")):
                        extr_folder.rmdir()
Example #5
0
def _transform_mobile_v1_2_to_pprf_2_0(rec_dir: str):
    _generate_pprf_2_0_info_file(rec_dir)

    # rename info.csv file to info.mobile.csv
    info_csv = Path(rec_dir) / "info.csv"
    new_path = info_csv.with_name("info.mobile.csv")
    info_csv.replace(new_path)

    recording = PupilRecording(rec_dir)

    # patch world.intrinsics
    # NOTE: could still be worldless at this point
    update_utils._try_patch_world_instrinsics_file(
        rec_dir,
        recording.files().mobile().world().videos())

    _rename_mobile_files(recording)
    _rewrite_timestamps(recording)
Example #6
0
 def _fmt(path):
     text = Path(path).read_text().strip()
     # Remove repositories tag, as it's a detail that's always the same
     text = re.sub(r" +<repositories>.*?</repositories>.*?\n  <o",
                   "  <o",
                   text,
                   flags=re.S | re.M)
     text = text.replace("\n\n", "\n")
     return text
Example #7
0
    def move_file(self, source, destination, overwrite=False):
        """Move file from source path to destination path,
        optionally overwriting the destination.

        :param source:      source file path for moving
        :param destination: path to move to
        :param overwrite:   replace destination file if it already exists
        """
        src = Path(source)
        dst = Path(destination)

        if not src.is_file():
            raise FileNotFoundError(f"Source {src} is not a file")
        if dst.exists() and not overwrite:
            raise FileExistsError(f"Destination {dst} already exists")

        src.replace(dst)
        self.logger.info("Moved file: %s -> %s", src, dst)
Example #8
0
    def postdownloading_callback(self, input_kwargs, *args):
        r = args[-1]
        url = args[-2]
        if not r:
            print("Error: No response\n")
            return
        song_id = get_song_id(url)
        if r.error:
            print(f"Error: {r.error}", file=sys.stderr)
            if any(err in r.error for err in errors):
                print(f"Error: Beatmap not found: {url}\n", file=sys.stderr)
            if any(rerr in r.error for rerr in retry_errors):
                print(f"{url}\nAdded retry after queue end\n")
                if input_kwargs and "proxies" in input_kwargs:
                    proxy = re.search(r"//([^/]*)/",
                                      input_kwargs["proxies"]["http"]).group(1)
                    if proxy in self._proxy.proxies:
                        self._proxy.proxies.remove(proxy)
                    if len(self._proxy.proxies) < 2:
                        print("No valid proxies, exiting\n")
                        del self._proxy
                return self.retry_download(url)
            return

        if r.url == 'https://osu.ppy.sh/p/error':
            print("Error: Osu site internal error", file=sys.stderr)
            return Path(r.out_file).unlink()

        try:
            old_filename = Path(r.out_file).resolve(strict=True)
            name = r.info._headers[6][1].split('"')[1::2][0]
            name = re.sub(r'[^\w_.)( -]', '', name)
            name = old_filename.parent.joinpath(name)
            old_filename.replace(name)
        except Exception as e:
            print(f"Error: Failed to rename beatmap: {url}\n{e}",
                  file=sys.stderr)
            pass
        else:
            if self.auto_start:
                os.startfile(name)
            print(f"Successfully downloaded: {name.stem}")
            del old_filename, name
        del r, song_id
Example #9
0
def before_all(context):
    """Environment preparation before other cli tests are run.
    Installs kedro by running pip in the top level directory.
    """

    def call(cmd, verbose=False):
        res = run(cmd, env=context.env)
        if res.returncode or verbose:
            print(">", " ".join(cmd))
            print(res.stdout)
            print(res.stderr)
        assert res.returncode == 0

    # make a venv
    if "E2E_VENV" in os.environ:
        context.venv_dir = Path(os.environ["E2E_VENV"])
    else:
        context.venv_dir = Path(create_new_venv())

    # note the locations of some useful stuff
    # this is because exe resolution in supbrocess doens't respect a passed env
    if os.name == "posix":
        bin_dir = context.venv_dir / "bin"
        path_sep = ":"
    else:
        bin_dir = context.venv_dir / "Scripts"
        path_sep = ";"
    context.pip = str(bin_dir / "pip")
    context.python = str(bin_dir / "python")
    context.kedro = str(bin_dir / "kedro")

    # clone the environment, remove any condas and venvs and insert our venv
    context.env = os.environ.copy()
    path = context.env["PATH"].split(path_sep)
    path = [p for p in path if not (Path(p).parent / "pyvenv.cfg").is_file()]
    path = [p for p in path if not (Path(p).parent / "conda-meta").is_dir()]
    path = [str(bin_dir)] + path
    context.env["PATH"] = path_sep.join(path)

    # install this plugin by resolving the requirements using pip-compile
    # from pip-tools due to this bug in pip: https://github.com/pypa/pip/issues/988
    call([context.python, "-m", "pip", "install", "-U", "pip", "pip-tools"])
    pip_compile = str(bin_dir / "pip-compile")
    with tempfile.TemporaryDirectory() as tmpdirname:
        reqs = Path("requirements.txt").read_text()
        reqs = reqs.replace("kedro", "git+https://github.com/quantumblacklabs/kedro")
        complied_reqs = Path(tmpdirname) / "requirements.txt"
        complied_reqs.write_text(reqs)
        call([pip_compile, str(complied_reqs)])
        call([context.pip, "install", "-r", str(complied_reqs)])

    for wheel_path in glob.glob("dist/*.whl"):
        os.remove(wheel_path)
    call([context.python, "setup.py", "clean", "--all", "bdist_wheel"])

    call([context.pip, "install", "-U"] + glob.glob("dist/*.whl"))
Example #10
0
def compile_tex(tex_file, tex_compiler, output_format):
    """Compiles a tex_file into a .dvi or a .xdv or a .pdf

    Parameters
    ----------
    tex_file : :class:`str`
        File name of TeX file to be typeset.
    tex_compiler : :class:`str`
        String containing the compiler to be used, e.g. ``pdflatex`` or ``lualatex``
    output_format : :class:`str`
        String containing the output format generated by the compiler, e.g. ``.dvi`` or ``.pdf``

    Returns
    -------
    :class:`str`
        Path to generated output file in desired format (DVI, XDV or PDF).
    """
    result = tex_file.replace(".tex", output_format)
    result = Path(result).as_posix()
    tex_file = Path(tex_file).as_posix()
    tex_dir = Path(config.get_dir("tex_dir")).as_posix()
    if not os.path.exists(result):
        command = tex_compilation_command(tex_compiler, output_format,
                                          tex_file, tex_dir)
        exit_code = os.system(command)
        if exit_code != 0:
            log_file = tex_file.replace(".tex", ".log")
            if not Path(log_file).exists():
                raise RuntimeError(
                    f"{tex_compiler} failed but did not produce a log file. "
                    "Check your LaTeX installation.")
            with open(log_file, "r") as f:
                log = f.readlines()
                log_error_pos = [
                    ind for (ind, line) in enumerate(log)
                    if line.startswith("!")
                ]
                if log_error_pos:
                    logger.error(
                        f"LaTeX compilation error! {tex_compiler} reports:")
                    for lineno in log_error_pos:
                        # search for a line starting with "l." in the next
                        # few lines past the error; otherwise just print some lines.
                        printed_lines = 1
                        for _ in range(10):
                            if log[lineno + printed_lines].startswith("l."):
                                break
                            printed_lines += 1

                        for line in log[lineno:lineno + printed_lines + 1]:
                            logger.error(line)

            raise ValueError(f"{tex_compiler} error converting to"
                             f" {output_format[1:]}. See log output above or"
                             f" the log file: {log_file}")
    return result
Example #11
0
    def _make_pt_vrt(self, layer_name, out_dir):
        """
        Make a vrt file for station point ratios in summary CSV for a 
        specific layer or field name. Save to out_dir. Used for gdal_grid 
        interpolation commands of scatter point data.
        """
        if not Path(out_dir).is_dir():
            os.makedirs(out_dir)
        # old method
        #summary_file = Path(self.summary_csv_path).name
        
        point_data = Path(self.summary_csv_path).name.replace(
                '.csv', '_tmp.csv')

        # make tmp point data csv for given layer, drop missing values
        df = pd.read_csv(self.summary_csv_path)
        df = df[['STATION_LAT', 'STATION_LON', layer_name]]
        df = df[df[layer_name] != -999]
        tmp_out_path = str(Path(self.summary_csv_path).parent / point_data)
        df.to_csv(tmp_out_path, index=False)

        # if out_dir adjust summary CSV path by prepending parent dirs 
        tmp = copy(Path(out_dir))
        n_parent_dirs = 0
        while len(Path(tmp).parents) > 0:    
            if tmp.name == self.summary_csv_path.parent.name:
                break
            tmp = Path(tmp).parent
            n_parent_dirs+=1
        
        path_to_data = str(
            Path(
                '..{}'.format(os.sep)*n_parent_dirs
            ).joinpath(point_data)
        )
        
        out_file = '{}.vrt'.format(layer_name) # keep it simple just layer name

        # VRT format for reading CSV point data
        root = ET.Element('OGRVRTDataSource')
        OGRVRTLayer = ET.SubElement(root, 'OGRVRTLayer', 
                                    name=point_data.replace('.csv', ''))
        # set all fields, SRS WGS84, point geom
        ET.SubElement(OGRVRTLayer, 'SrcDataSource').text = path_to_data
        ET.SubElement(OGRVRTLayer, 'LayerSRS').text = 'epsg:4326'
        ET.SubElement(OGRVRTLayer, 'GeometryType').text = 'wkbPoint'
        ET.SubElement(OGRVRTLayer, 'GeometryField', encoding='PointFromColumns',
                     x='STATION_LON', y='STATION_LAT', z=layer_name)
        
        tree = ET.ElementTree(root)
        # indent xml, save to out_dir
        out_xml_str = _prettify(root)
        
        out_path = os.path.join(out_dir, out_file)
        with open(out_path, 'w') as outf:
            outf.write(out_xml_str)
Example #12
0
    def TempDirectory(self, iTempDirectory=None, oError=None):
        if iTempDirectory is None:
            return Path(self.mTempDirectory)

        previous_value = Path(self.mTempDirectory)

        oError['id'] = 0
        oError['message'] = ''

        if not iTempDirectory:
            if sys.platform.startswith('cygwin'):
                current_path = Path('.').resolve(
                )  # Not a good idea to create/remove/create/remove/... many files on a USB key
                current_path = str(current_path)
                # r'...' = raw string
                # >>> "\1"
                # '\x01'
                # >>> r"\1"
                # '\\1'
                # >>>
                current_path = re.sub(
                    r'/cygdrive/([a-z])', r'\1:',
                    current_path).upper()  # '/cygdrive/c' -> 'C:'
                current_path = current_path.replace('/', '\\')
                self.mTempDirectory = current_path + '\\tmp-stocks'
                # oError['id'] = 110
                # oError['message'] = 'custom tmp folder must be set for cygwin'
                # return previous_value
            elif sys.platform.startswith('linux'):
                self.mTempDirectory = tempfile.gettempdir() + '/tmp-stocks'
            else:
                oError['id'] = 100
                oError['message'] = 'platform is not managed'
                return previous_value
        else:
            # if os.path.exists( os.path.normpath( iTempDirectory ) ) and os.listdir( iTempDirectory ):
            # oError['id'] = 200
            # oError['message'] = 'custom tmp folder already exists and not empty'
            # return previous_value

            parent_dir = os.path.dirname(os.path.normpath(iTempDirectory))
            if not os.path.exists(parent_dir):
                oError['id'] = 210
                oError[
                    'message'] = 'parent of custom tmp folder doesn\'t exist'
                return previous_value

            if sys.platform.startswith('cygwin'):
                iTempDirectory = re.sub(
                    r'/cygdrive/([a-z])', r'\1:',
                    iTempDirectory)  # '/cygdrive/c' -> 'c:'
                iTempDirectory = iTempDirectory.replace('/', '\\')

            self.mTempDirectory = iTempDirectory

        return previous_value
Example #13
0
def main(args):
    """Main script"""
    pdb_parser = PDBParser()

    pdb_name = Path(args.pdb).stem
    # deal with FoldX repaired PDBs
    if pdb_name.endswith('_Repair'):
        pdb_name = pdb_name.replace('_Repair', '')

    structure = pdb_parser.get_structure(pdb_name, args.pdb)

    sections = import_sections(args.yaml, pdb_name)

    variants = []
    if sections is not None:
        for section in sections:
            filter_region = 'region' in section
            for residue in structure[0][section['chain']]:
                if not residue.id[0] == ' ':
                    continue  # Filter HETATMs

                position = int(residue.id[1])
                amino_acid = seq1(residue.get_resname())

                if not amino_acid in AA_ALPHABET:
                    # Filter non-standard AAs, required when processing
                    # foldx repaired PDBs as they turn HETATMs to regular ATOMs
                    # for regular proteins
                    continue

                if (filter_region and (position > section['region'][1]
                                       or position < section['region'][0])):
                    continue

                variants.extend([
                    f"{amino_acid}{section['chain']}{position}{x}"
                    for x in AA_ALPHABET if not x == amino_acid
                ])
    else:
        for chain in structure[0]:
            for residue in chain:
                if not residue.id[0] == ' ':
                    continue  # Filter HETATMs

                position = int(residue.id[1])
                amino_acid = seq1(residue.get_resname())

                if not amino_acid in AA_ALPHABET:
                    continue

                variants.extend([
                    f"{amino_acid}{chain.id}{position}{x}" for x in AA_ALPHABET
                    if not x == amino_acid
                ])

    print(*variants, sep=';\n', end=';\n', file=sys.stdout)
Example #14
0
def _transform_invisible_v1_0_to_pprf_2_1(rec_dir: str):
    _generate_pprf_2_1_info_file(rec_dir)

    # rename info.json file to info.invisible.json
    info_json = Path(rec_dir) / "info.json"
    new_path = info_json.with_name("info.invisible.json")
    info_json.replace(new_path)

    recording = PupilRecording(rec_dir)

    # patch world.intrinsics
    # NOTE: could still be worldless at this point
    update_utils._try_patch_world_instrinsics_file(
        rec_dir,
        recording.files().pi().world().videos())

    _rename_pi_files(recording)
    _rewrite_timestamps(recording)
    _convert_gaze(recording)
Example #15
0
    def Sync(self):
        Notify("Verification Started")
        diff = self.Diff()
        if diff:
            for info in diff:
                path = Path(info["filename"])
                url = info["raw_url"]
                url = url.replace("%20", " ")  #weird git space bug in url
                if path.name == "dawn.exe":
                    continue

                parent = path.parent
                parent.mkdir(parents=True, exist_ok=True)

                status = info["status"]
                try:
                    if status == "added" or status == "modified":
                        temp = Path(wget.download(url))
                        temp.replace(path)
                    elif status == "renamed":
                        previous = Path(info["previous_filename"])
                        if previous.is_file():
                            previous.rename(path)
                    elif status == "removed":
                        if path.is_file():
                            path.unlink()
                        if parent.is_dir():
                            empty = list(os.scandir(parent)) == 0
                            if empty:
                                parent.rmdir()

                except Exception as e:
                    print("ERROR:", status, path)
                    Notify(traceback.format_exc())
                    sys.exit()

                print(status, path)

        self.Config.Decode()
        self.Config["commit"] = self.New
        self.Config["last"] = str(datetime.datetime.now())
        Notify("Verification Complete")
        return
Example #16
0
def main(argc, argv):
    logs = glob("*-*-*.log")

    for f in logs:
        # Strip IPs
        with open(f, "r+") as fp:
            out = ""
            for l in fp:
                out += re.sub(r"(Quits|Joins|Parts): (\S+?) \(.*?@.*?\)", r"\1: \2", l)
            fp.seek(0)
            fp.truncate()
            fp.write(out)

        old_path = Path(f)
        new_path = Path("/".join(old_path.stem.split("-")) + ".log")
        print(f"Moving {old_path} to {new_path}")

        new_path.parent.mkdir(parents=True, exist_ok=True)
        old_path.replace(new_path)
Example #17
0
def test_postmortem():
    if sys.version_info < (3, 8):
        return  # postmortem example uses walrus operator
    with cd(tests_dir):
        expected = Path('postmortem.json').read_text()
        user_home_dir = str(to_path('~'))
        expected = expected.replace('~', user_home_dir)

        pm = Run('./postmortem', modes='sOEW')
        assert pm.stdout.strip() == expected.strip()
Example #18
0
 def get_text(self, encoding) -> str:
     """Gets file text based on file type."""
     text = ""
     if self.path.endswith(".txt"):
         text = Path(self.path).read_text(encoding=encoding)
     elif self.path.endswith(".docx"):
         text = docx2txt.process(self.path)
     else:
         raise Exception("File type must be .txt or .docx")
     return text.replace("\t", "")
Example #19
0
def write_file(
    serializer: ser_interface.SerializationDialect,
    collection: MagicCollection,
    path: Path,
) -> None:
    """Write print counts to a file, backing up existing target files."""
    if not path.exists():
        print(f"Writing collection to file: {path}")
        serializer.write(path, collection)
    else:
        backup_path = get_backup_path(path)
        with tempfile.TemporaryDirectory() as temp_dir:
            temp_path = Path(temp_dir) / path.name
            print("Writing to temporary file.")
            serializer.write(temp_path, collection)
            print(f"Backing up existing file to: {backup_path}")
            path.replace(backup_path)
            print(f"Writing collection: {path}")
            temp_path.replace(path)
Example #20
0
def build_script():
    to_encode = \
        list(Path('molecule').glob('*.py')) + \
        list(Path('configs').glob('*.json')) + \
        [Path('setup.py')]
    file_data = {str(path): encode_file(path) for path in to_encode}
    template = Path('script_template.py').read_text('utf8')
    Path('build/script.py').write_text(
        template.replace('{file_data}', str(file_data)),
        encoding='utf8')
def seq_read_fasta(filename):
    # read the text
    content = Path(filename).read_text()
    # find the line jumps
    space = content.find("\n")
    # starts in the second line
    content = content[space + 1:]
    #  remove the white spaces
    content = content.replace("\n", "")
    return content
Example #22
0
def main():
    template = Path('service_worker.tpl.js').read_text(encoding='utf-8')
    cacheURLs = {str(p).replace('\\', '/') for p in Path('.').glob('**/*.*') if allow(p)}
    path = Path('.')
    for p in path.glob('**/*.html'):
        html = p.read_text(encoding='utf-8')
        cacheURLs |= set(linkiter(html))

    template = template.replace('$cacheURLs', json.dumps(sorted(cacheURLs), indent=4))
    Path('service_worker.js').write_text(template, encoding='utf-8')
Example #23
0
def move_file():
    new_file = Path("xxx.txt")
    print(f"The new file is {new_file}")
    new_file.touch()
    newest_file = "data" / new_file
    if newest_file.exists():
        print(f"Warning! This file '{newest_file}' already exists.")
    else:
        newest_file = new_file.replace("data/xxx.txt")
        print(f"File moved from '{new_file}' to '{newest_file}'.")
Example #24
0
def build_script(project_path,script_pyfile):
    # to_encode = list(Path(project_path).glob('*.py')) + [Path('setup.py')]
    to_encode = list(Path(project_path).glob('*.py'))
    to_encode1 = [Path('setup.py')]
    file_data = {str(path)[46:]: encode_file(path) for path in to_encode}
    file_data.update({str(to_encode1[0]): encode_file(to_encode1[0])})
    template = Path(script_pyfile).read_text('utf8')
    Path('build/script.py').write_text(
        template.replace('{file_data}', str(file_data)),
        encoding='utf8')
Example #25
0
def resolve_new_types():
    data = [
        ("user_types", "source", "Source"),
        ("assess_costs", "taxon", "TaxonName"),
        ("preprocess_source", "label_name", "LabelName"),
        ("derived_labels_db", "query", "Query"),
        ("filter_programs", "operation", "Operation"),
        ("map_taxonomy", "label_pattern", "LabelPattern"),
    ]
    result = {}
    for (filename, trigger, type_name) in data:
        source = Path(f"docs/{filename}.html").read_text()
        match = regex.search(
            fr"{trigger}(?:</span> )?: (<function NewType\.<locals>\.new_type at 0x\w+?>)",
            source)
        result[type_name] = match[1]
        print(f"{match[1]} -> {type_name}")
    for path in Path("docs/").rglob("*.html"):
        source = path.read_text()
        initial_length = len(source)
        for (type_name, bad_string) in result.items():
            source = source.replace(bad_string, type_name)
        if len(source) < initial_length:
            path.write_text(source)
    for path in Path("docs/").rglob("*.html"):
        if path.name == "index.html":
            continue
        source = path.read_text()
        source = regex.sub(
            r'<a title="paroxython\.user_types\.(\w+?)" href="user_types\.html#paroxython\.user_types\.\1">\1</a>',
            r"\1",
            source,
        )
        source = source.replace("paroxython.user_types.", "")
        source = source.replace("typing_extensions.", "")
        source = source.replace("_regex.Pattern object", "regex")
        source = source.replace(PATH, ".")
        path.write_text(source)
    path = Path("docs/index.html")
    source = path.read_text()
    source = regex.sub(r"(?m)^.+paroxython\.user_types.+\n", "", source)
    path.write_text(source)
    Path("docs/user_types.html").unlink()
Example #26
0
def _download_resource_file(
    file_url, short_name, directory, verbose=False, sha256=None
):
    """
    Download resource file from PROJ url
    """
    if verbose:
        print(f"Downloading: {file_url}")
    tmp_path = Path(directory, f"{short_name}.part")
    try:
        urlretrieve(file_url, tmp_path)
        if sha256 is not None and sha256 != _sha256sum(tmp_path):
            raise RuntimeError(f"SHA256 mismatch: {short_name}")
        tmp_path.replace(Path(directory, short_name))
    finally:
        try:
            os.remove(tmp_path)
        except FileNotFoundError:
            pass
Example #27
0
def get_article_destination_filepath(article_source_filepath):
    base_filename, _ = Path(article_source_filepath).name.split('.')
    article_output_filename = '{}.{}'.format(
        base_filename.replace(' ', ''),
        'html',
    )
    return os.path.join(
        os.path.dirname(article_source_filepath),
        article_output_filename,
    )
Example #28
0
File: program.py Project: rec/gitz
class _Program:
    ALLOW_NO_RUN = True

    def __init__(self):
        self.code = -1
        self.executable = Path(sys.argv[0]).name
        self.argv = sys.argv[1:]
        if '--help' in self.argv:
            self.argv[self.argv.index('--help')] = '-h'
        self.called = collections.Counter()

    def start(self, context=None):
        if context is None:
            context = vars(sys.modules['__main__'])
        self.args, self.log = parser.parse(self, **context)
        no_run = self.ALLOW_NO_RUN and self.args.no_run
        runner.RUN.start(self.log, no_run)

        exe = self.executable.replace('-', '_')
        main = context.get(exe) or context.get('main')
        if not main:
            self.exit('No method named', exe, 'or main in', self.executable)

        try:
            main()

        except Exception as e:
            for line in getattr(e, '_runner_output', []):
                self.log.error(*line)

            self.log.verbose(traceback.format_exc(), file=sys.stderr)
            self.exit('%s: %s' % (e.__class__.__name__, e))

    def exit(self, *messages):
        if messages:
            self.error(*messages)
        sys.exit(self.code)

    def error(self, *messages):
        self._error(messages, 'error')

    def error_if(self, errors, message, singular='branch', plural='branches'):
        if errors:
            s = singular if len(errors) == 1 else plural
            self._error([message, s, ', '.join(errors)], 'error')
            return True

    def message(self, *messages):
        self.log.message(*messages)

    def _error(self, messages, category):
        caption = self.executable + ':'
        self.called[category] += 1
        caption = category.upper() + ': ' + caption
        self.log.error(caption, *messages, file=sys.stderr)
Example #29
0
    def backup(self, out_dir: Union[Path, str], progress: Signal = None):
        """
        Create a full untouched (but decrypted) ISO backup of a DVD with all
        metadata intact.

        Parameters:
            out_dir: Directory to store the backup.
            progress: Signal to emit progress updates to.

        Raises:
            SlipstreamNoKeysObtained if no CSS keys were obtained when needed.
            SlipstreamReadError on unexpected read errors.
        """
        self.log.info("Starting DVD backup for %s" % self.device)

        fn = Path(out_dir) / ("%s.ISO.!ss" % self.cdlib.pvd.volume_identifier.replace(b"\x00", b"").strip().decode())
        first_lba = 0  # lba values are 0-indexed
        current_lba = first_lba
        last_lba = self.cdlib.pvd.space_size - 1
        disc_size = self.cdlib.pvd.log_block_size * self.cdlib.pvd.space_size

        self.log.debug(
            f"Reading sectors {first_lba:,} to {last_lba:,} with sector size {self.cdlib.pvd.log_block_size:,} B.\n"
            f"Length: {last_lba + 1:,} sectors, {disc_size:,} bytes.\n"
            f'Saving to "{fn.with_suffix("")}"...'
        )

        if self.dvdcss.is_scrambled():
            self.log.debug("DVD is scrambled. Checking if all CSS keys can be cracked. This might take a while.")
            self.vob_lba_offsets = self.get_vob_lbas(crack_keys=True)
            if not self.vob_lba_offsets:
                raise SlipstreamNoKeysObtained("No CSS title keys were returned, unable to decrypt.")
        else:
            self.log.debug("DVD isn't scrambled. CSS title key cracking skipped.")

        f = fn.open("wb")
        t = tqdm(total=last_lba + 1, unit="sectors")

        while current_lba <= last_lba:
            data = self.read(current_lba, min(self.dvdcss.BLOCK_BUFFER, last_lba - current_lba + 1))
            f.write(data)
            read_sectors = len(data) // self.cdlib.pvd.log_block_size
            current_lba += read_sectors
            if progress:
                progress.emit((current_lba / last_lba) * 100)
            t.update(read_sectors)

        f.close()
        t.close()

        fn = fn.replace(fn.with_suffix(""))
        self.log.info(
            "Finished DVD Backup!\n"
            f"Read a total of {current_lba:,} sectors ({os.path.getsize(fn):,}) bytes.\n"
        )
def main():
    """Make a jazz noise here"""
    args = get_args()
    input_arg = args.input
    output_arg = args.output
    lookup_arg = args.lookup
    lookup_list = []

    with open(lookup_arg, mode='r', encoding='utf-8-sig') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            # sample_list.append(row['experiment'])
            # print(row['label'],row['id'])
            dict = {}
            dict['Sample ID'] = row['Sample ID']
            dict['run'] = row['run']
            lookup_list.append(dict)

    # lookup mapping between PMO sample and run ids

    # Open JSON file
    infile = open(input_arg)
    # returns JSON object as a dictionary
    data = json.load(infile)

    # open outfile
    in_path = Path(input_arg).stem
    out_string = output_arg + in_path + '.tsv'
    id = in_path.replace("_", ":")

    # --------------------------------------------------
    # https://datagy.io/python-flatten-list-of-lists/
    # flat_list = list()
    def flatten_list(list_of_lists):
        for item in list_of_lists:
            if type(item) == list:
                flatten_list(item)
            else:
                flat_list.append(item)
        return flat_list

    # Write out tsv file with numeric attributes and quantities
    with open(out_string, 'w', newline='', encoding='utf-8') as csv_file:
        writer = csv.writer(csv_file, delimiter='\t')
        for d in data:
            for l in lookup_list:
                if d['sampleAccn'] == l['Sample ID']:
                    sample_run = l['run']
                    flat_list = list()
                    values = flatten_list(d['values'])
                    value = list(filter(None, values))[0]
                    row = [sample_run, id, value]
                    writer.writerow(row)
    # Close infile
    infile.close()
Example #31
0
def fast_reshard(
    inputs: List[Path],
    output: Path,
    tmp: Path = None,
    free_original: bool = False,
    rm_original: bool = False,
) -> Path:
    """Same as reshard but don't re-compress the output.

    This will lead to a bigger output file, especially if the shards are very small.
    """
    if tmp is None:
        tmp = _get_tmp(output)
    with open(tmp, "wb") as o:
        subprocess.run(["cat"] + [str(f) for f in inputs], stdout=o)

    tmp.replace(output)
    indexes_files = [get_index(i) for i in inputs]
    existing_indexes = sum(i.exists() for i in indexes_files)
    assert (existing_indexes == len(indexes_files)
            or existing_indexes == 0), "some indexes don't exist."
    if existing_indexes > 0:
        indexes = [np.load(idx) for idx in indexes_files]
        for i in range(len(indexes) - 1):
            indexes[i + 1] += indexes[i][-1]
        with open(str(output) + ".index", "wb") as o:
            np.save(o, np.concatenate(indexes))

    if not (free_original or rm_original):
        return output

    for _input in inputs:
        if rm_original:
            _input.unlink()
        elif free_original:
            # Overwrite the previous file.
            # This frees up disk space and allows doit to properly track the success.
            _input.write_text(f"Resharded into {output}")
        if get_index(_input).is_file():
            get_index(_input).unlink()

    return output
Example #32
0
def template_rewrite(template_path: Path, context: Dict[str, str]) -> None:
    with template_path.open("r") as fin:
        temp_path = Path(str(template_path) + ".temp")
        with temp_path.open("w") as fout:
            matching = False
            for line in fin:
                if matching:
                    if END_MARKER.search(line):
                        matching = False
                else:
                    m = START_MARKER.search(line)
                    if m:
                        matching = True
                        key = m.group(1)
                        fout.write(line)
                        fout.write(context[key])
                        continue
                if not matching:
                    fout.write(line)
        temp_path.replace(template_path)
Example #33
0
 async def _verify_exit_code(self, jobid):
     cmd = ("sacct", "-n", "-X", "-j", jobid, "-o", "JobID,State,ExitCode")
     _, stdout, _ = await read_and_display_async(*cmd, hide_display=True)
     if not stdout:
         raise RuntimeError("Job information not found")
     m = self._sacct_re.search(stdout)
     if int(m.group("exit_code")) != 0 or m.group("status") != "COMPLETED":
         if m.group("status") in ["RUNNING", "PENDING"]:
             return False
         # TODO: potential for requeuing
         # parsing the error message
         error_line = Path(self.error).read_text().split("\n")[-2]
         if "Exception" in error_line:
             error_message = error_line.replace("Exception: ", "")
         elif "Error" in error_line:
             error_message = error_line.replace("Exception: ", "")
         else:
             error_message = "Job failed (unknown reason - TODO)"
         raise Exception(error_message)
     return True
Example #34
0
def clean_template(template, removals=None, cfmt=cfmt):
    if removals is None:
        removals = ["temp", "dx", "dy"]
    # this will essentially replace any nonexistent keys (field names) with ''
    d = cfmt.format(template, defaultdict(str))

    for r in removals:
        d = d.replace("[{}_]".format(r), "")
    z = re.sub(r"_+", "_", d)
    z = z.replace("_.", ".")
    z = re.sub(r"[\[\]\(\)\']", "", z)
    f = Path(z).as_posix()
    f = f.replace("/_", "/")
    return f
Example #35
0
def copy_viewer_html_scala(source, destination):
    debug('cp %s %s (modifying as we go)' % (source, destination))

    data = Path(source).read_bytes()

    # Nix DOCTYPE and comment
    data = re.sub(rb'^.*?-->', '', data)

    # Add Scala method signature
    data = b'@this(assets: AssetsFinder)\n@()' + data

    # Add <base>
    data = data.replace(b'<head>', b'<head><base href="/assets/pdfjs/web/x">')

    # Set up PDFJS paths
    data = data.replace(b'<script', b'<script>window.PDFJS = { workerSrc: "@assets.path("pdfjs/build/pdf.worker.js")" };</script><script', 1)
    data = data.replace(b'"../build/generic/build/pdf.js"', b'"@assets.path("pdfjs/build/pdf.js")"', 1)
    data = data.replace(b'"viewer.js"', b'"@assets.path("pdfjs/web/viewer.js")"', 1)

    # Add onload
    data = data.replace(b'</body>', b"""<script src="@assets.path("javascript-bundles/PdfViewer-show.js")"></script></body>""")

    Path(destination).write_bytes(data)
Example #36
0
def normalize_url(url):
    """Normalize ``url`` for underlying NT/Unix operating systems.

    The input ``url`` may look like the following:

        - C:\\Directory\\zzz.svg
        - file://C:\\Directory\\zzz.svg
        - zzz.svg

    The output ``url`` on NT systems would look like below:

        - file:///C:/Directory/zzz.svg

    """
    if url and os.name == 'nt' and not url.startswith('data:'):
        # Match input ``url`` like the following:
        #   - C:\\Directory\\zzz.svg
        #   - Blah.svg
        if not url.startswith('file:') and os.path.isabs(url):
            url = os.path.abspath(url)
            if '#' in url:
                url, part = url.rsplit('#', 1)
            else:
                part = None
            url = Path(url).resolve().as_uri()
            if part is not None:
                url = url + '#' + part

        # Match input ``url`` like the following:
        #   - file://C:\\Directory\\zzz.svg
        elif re.match(
                '^file://[a-z]:', url,
                re.IGNORECASE | re.MULTILINE | re.DOTALL):
            url = url.replace('//', '///')
            url = url.replace('\\', '/')

    return url
def copy_css(plugin_dirname, overview_dirname):
    plugin_path = 'assets/stylesheets/view.scss'
    overview_path = 'app/assets/stylesheets/overview-plugin-metadata-app.less'

    debug('cp %s/%s %s/%s (converting from scss to less)' % (plugin_dirname, plugin_path, overview_dirname, overview_path))

    data = Path(plugin_dirname + '/' + plugin_path).read_bytes()

    # Change variable references to start with '@', as in Less
    data = data.replace(b'$', b'@')

    # Wrap the whole thing
    data = (b'// DO NOT EDIT. This was auto-generated by auto/refresh-metadata-editor.py.\n\n'
            + data
            + b'\n\n// reminder: DO NOT EDIT'
           )

    Path(overview_dirname + '/' + overview_path).write_bytes(data)
Example #38
0
    def get_cover_page(self, size=800):
        """ Get or create a jpg version of the pdf frontpage """
        if self.pdf and not self.cover_page:
            filename = Path(self.pdf.name).with_suffix('.jpg').name
            try:
                cover_image = pdf_to_image(self.pdf.file, page=1, size=size)
            except Exception as e:
                logger.exception('Failed to create cover')
                msg = 'ERROR:\n{}\nnot found on disk'.format(self.pdf.name)
                cover_image = error_image(msg, (size * 70 / 100), size)
                filename = filename.replace('.jpg', '_not_found.jpg')

            blob = BytesIO()
            cover_image.save(blob)
            self.cover_page.save(
                filename, ContentFile(blob.getvalue()), save=True
            )
        return self.cover_page
Example #39
0
def html_fixes(my_file):
    '''
    Applies the various updates and changes I want done to the raw Gigatrees
    HTML.

    Assumes 'my_file' is in the CONTENT_FOLDER.
    Assumes 'my_file' is a string.
    Assumes 'my_file' is the full path
    '''
    with codecs.open(str(my_file), 'r', 'utf-8') as html_doc:
        my_html = html_doc.read()


    # change page title
    try:
        soup = BeautifulSoup(my_html, "lxml")
        title_tag = soup.html.head.title
    except AttributeError:
        soup = BeautifulSoup(my_html, "html5lib")
        title_tag = soup.html.head.title
    for tag in soup(id="gt-page-title", limit=1):
        title_tag.string.replace_with(tag.string.strip())
        tag.decompose()

    # dump all the meta tags in the head section
    for tag in soup("meta"):
        tag.decompose()

    ## fix links that point to php pages
    #for tag in soup("a", href=True):
    #    tag['href'] = tag['href'].replace('.php', '.html')

    ## remove wrapper lists (ul/li) to tables
    #for tag in soup("ul"):
    #   tag2 = tag.findParent('ul')
    #       if tag2:
    #           tag2.replace_with(tag2.contents)
    #           # replace 'li' tags with 'p'
    #           for tag3 in tag2("li"):
    #              tag3.name = 'p'

    # Remove links to CDN stuff I serve locally
    js_served_locally = (
        'jquery.min.js',
        'jquery-ui.min.js',  # old, so not re-added
        'bootstrap.min.js',
        'globalize.min.js',  # old, so not re-added
        'dx.chartjs.js',  # old, so not re-added
        'gigatrees-charts.js',
        'c3.min.js',
        'd3.min.js',
        'bootstrap-tooltip-handler.js',
        'jquery.mousewheel.min.js',  # used by FancyBox
        'jquery.fancybox.pack.js',
        'jquery.fancybox-buttons.js',
        'jquery.fancybox-media.js',
        'jquery.fancybox-thumbs.js',
        'fancybox-handler.js',
        'html5shiv.min.js',
        'respond.min.js',
    )
    js_served_from_page = (
        "var myImage='../assets/mapicon_u.png';",
        '$(document).ready(function () {\ndocument.getElementById("gt-version")"',
    )
    for tag in soup.find_all("script"):
        try:
            link = tag["src"]
            if link.endswith(js_served_locally):
                tag.decompose()
        except:
            pass
        
        try:
            if str(tag.contents[0]).lstrip().startswith(js_served_from_page):
                tag.decompose()
        except IndexError:
            pass

    # fix pdf paths?

    # build-in narbar
    try:
        soup.html.body.nav.decompose()
    except AttributeError:
        # doesn't exist
        pass
    # style section for gt-about-modal
    try:
        soup.html.body.style.decompose()
    except AttributeError:
        # doesn't exist
        pass

    # other stuff
    for tag in soup(id="gt-version", limit=1):
        tag.decompose()
    for tag in soup(id="site-title"):
        tag.decompose()
    for tag in soup(id="gt-about-modal", limit=1):
        tag.decompose()
    for tag in soup(id="gt-qscore", limit=1):
        tag.decompose()

    # replace spaces in references to sources with non-breaking spaces
    for tag in soup.find_all(class_="gsref"):
        for my_contents in tag.contents:
            try:
                my_contents.replace("[ ", "[&nbsp;").replace("[\n", "[&nbsp;\n").replace(" ]", "&nbsp;]")
            except TypeError:
                pass

    # as of Gigatrees 4.4.0, the "age" column is always empty, so dump it
    #for tag in soup.find_all(class_="gage")

    # manually define the slug for the page (i.e. where the output file will be)
    my_slug = Path(my_file).relative_to(CONTENT_FOLDER)
    my_slug = str(my_slug)
    my_slug = my_slug[:-5] if my_slug.endswith('.html') else my_slug
    # swap direction of slashes
    my_slug = my_slug.replace('\\', '/')
    # drop initial 'I' from individuals and 'S' from sources
    ## Links thoughout the output are hardcoded. So we could replace these with "{filename}" links, but I don't feel like do that right now
    # my_slug = ('profiles\\' + my_slug[10:]) if my_slug.startswith('profiles\\I') else my_slug
    # my_slug = ('sources\\' + my_slug[9:]) if my_slug.startswith('sources\\S') else my_slug
    new_tag_3 = soup.new_tag("meta", content=my_slug)
    new_tag_3.attrs['name'] = 'slug'
    soup.html.head.append(new_tag_3)

    # Override page title
    if my_slug == "names/index":
        title_tag.string.replace_with("Surnames")

    # Add meta tags, used for the breadcrumbs in the link.
    # These are used by the Pelican engine and template engine.
    # They need to be in the <head> section, in the form:
    #
    # <head>
    #    <!-- other stuff... -->
    #    <meta name="at" content="Locations" />
    #    <meta name="at_link" content="places.html" />  <!-- this is added to SITEURL -->
    # </head>
    new_tags = False
    if my_slug.startswith("names/") and my_slug != "names/index":
        new_tag_1 = soup.new_tag("meta", content="Surnames")
        new_tag_2 = soup.new_tag("meta", content="names/index.html")
        new_tags = True
    elif my_slug.startswith("places/") and my_slug != "places/index":
        new_tag_1 = soup.new_tag("meta", content="Locations")
        new_tag_2 = soup.new_tag("meta", content="places/index.html")
        new_tags = True
    elif my_slug.startswith("sources/") and my_slug != "sources/index":
        new_tag_1 = soup.new_tag("meta", content="Sources")
        new_tag_2 = soup.new_tag("meta", content="sources/index.html")
        new_tags = True
        # not working??
    elif my_slug.startswith("timelines/") and my_slug != "timelines/index":
        new_tag_1 = soup.new_tag("meta", content="Timelines")
        new_tag_2 = soup.new_tag("meta", content="timelines/index.html")
        new_tags = True

    if new_tags:
        new_tag_1.attrs['name'] = 'at'
        new_tag_2.attrs['name'] = 'at_link'
        soup.html.head.append(new_tag_1)
        soup.html.head.append(new_tag_2)

    # prep images for image_process pelican plugin
    for tag in soup.find_all("img", class_="gt-image-photo"):
        if not 'image-process-gt-photo' in tag.attrs["class"]:
            tag.attrs["class"].append("image-process-gt-photo")

    # style map button
    for tag in soup.find_all("a", class_="btn"):
        if not 'btn-info' in tag.attrs["class"]:
            tag.attrs["class"].append("btn-info")

    # unwrap "Last Modified" from being in a list to being a paragraph
    for tag in soup.find_all("li", id="gt-modified"):
        tag.name = 'p'
        tag.parent.unwrap()  # remove outer <ul>

    # remove extra line breaks in main flow of document
    for tag in soup.find_all("br"):
        if tag.parent == soup.html.body:
            tag.decompose()

    # write fixed version of file to disk
    with codecs.open(str(my_file), 'w', 'utf-8') as html_doc:
        html_doc.write(soup.prettify(formatter="html"))
Example #40
0
         bc_sources = file_paths
     # Execute build
     build_cmake_project(build_path, target=bc, cmake_path=cmake_path, path_additions=path_additions)
     # Copy library to build directory
     output_file_path = os.path.join(bc_path, ol)
     output_file_path = os.path.join(output_file_path, binary_name)
     os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
     shutil.copy(binary_path, output_file_path)
 # Extract headers
 headers_path = os.path.join(bc_path, "headers")
 for bc_source in bc_sources:
     # Filter path so it matches syntax of lib path
     bc_source = Path(bc_source).as_posix()
     # Drive letter capitalization is inconsistent, make it lower case so it matches all the time
     if bc_source.startswith("C:"):
         bc_source = bc_source.replace("C:", "c:", 1)
     if bc_source.endswith(".h"):
         # Clean path
         if compare_lib_path in bc_source:
             bc_source = bc_source.replace(compare_lib_path, "")
             # Remove dangling slash
             bc_source = bc_source[1:]
         # Copy to output
         source_file = os.path.join(lib_path, bc_source)
         output_file = os.path.join(headers_path, bc_source)
         os.makedirs(os.path.dirname(output_file), exist_ok=True)
         try:
             shutil.copy(source_file, output_file)
         except shutil.SameFileError:
             print("Error copying {} to {}".format(bc_source, output_file))
             raise
Example #41
0
    def setup_output(self):
        # Establish the path to the "output" directory.
        p = Path(str(Path(self.map_file).parents[0]) + "/output")

        # Check to see if the "output" directory actually exists.
        if not p.exists():
            # It does not exist so create it.
            p.mkdir()

        directory_path = str(Path(self.map_file).parents[0]) + "/output/" + str(Path(self.map_file).name) + \
            str(datetime.datetime.utcnow())

        # Create the directory for this run.
        p = Path(directory_path)

        if not p.exists():
            # It does not exist so create it.
            p.mkdir()

        self.output_path = directory_path

        # Create the general output file by tagging the ".output" extension to the existing file and a time stamp.
        file_string = directory_path + "/debug_info.output"
        p = Path(file_string)

        if p.exists():
            p.replace(file_string)
        else:
            p.touch()

        self.output_file = p.open(mode='w')

        # Do the same with the planner paths output.
        file_string = directory_path + "/paths.gnuplot"
        p = Path(file_string)

        if p.exists():
            p.replace(file_string)
        else:
            p.touch()

        self.planner_paths_file = p.open(mode='w+')

        # Do the same with the robot path output.
        file_string = directory_path + "/robot_path.gnuplot"
        p = Path(file_string)

        if p.exists():
            p.replace(file_string)
        else:
            p.touch()

        self.robot_path_file = p.open(mode='w+')

        # Do the same with the occupancy grid output.
        file_string = directory_path + "/occupancy.gnuplot"
        p = Path(file_string)

        if p.exists():
            p.replace(file_string)
        else:
            p.touch()

        self.occupancy_file = p.open(mode='w+')
def update_generic(generated_folder, destination_folder):
    """Update data from generated to final folder.
       Generic version which just copy the files"""
    client_generated_path = Path(generated_folder)
    shutil.rmtree(destination_folder)
    client_generated_path.replace(destination_folder)
Example #43
0
class Dotfile(object):
    """A configuration file managed within a repository.

    :param name:   name of the symlink in the home directory (~/.vimrc)
    :param target: where the symlink should point to (~/Dotfiles/vimrc)
    """
    RELATIVE_SYMLINKS = True

    def __init__(self, name, target):
        # if not name.is_file() and not name.is_symlink():
        #     raise NotFound(name)
        self.name = Path(name)
        self.target = Path(target)

    def __str__(self):
        return str(self.name)

    def __repr__(self):
        return '<Dotfile %r>' % self.name

    def _ensure_dirs(self, debug):
        """Ensure the directories for both name and target are in place.

        This is needed for the 'add' and 'link' operations where the
        directory structure is expected to exist.
        """
        def ensure(dir, debug):
            if not dir.is_dir():
                if debug:
                    echo('MKDIR  %s' % dir)
                else:
                    dir.mkdir(parents=True)

        ensure(self.name.parent, debug)
        ensure(self.target.parent, debug)

    def _prune_dirs(self, debug):
        # TODO
        if debug:
            echo('PRUNE  <TODO>')

    def _link(self, debug, home):
        """Create a symlink from name to target, no error checking."""
        source = self.name
        target = self.target

        if self.name.is_symlink():
            source = self.target
            target = self.name.resolve()
        elif self.RELATIVE_SYMLINKS:
            target = os.path.relpath(target, source.parent)

        if debug:
            echo('LINK   %s -> %s' % (source, target))
        else:
            source.symlink_to(target)

    def _unlink(self, debug):
        """Remove a symlink in the home directory, no error checking."""
        if debug:
            echo('UNLINK %s' % self.name)
        else:
            self.name.unlink()

    def short_name(self, home):
        """A shorter, more readable name given a home directory."""
        return self.name.relative_to(home)

    def _is_present(self):
        """Is this dotfile present in the repository?"""
        return self.name.is_symlink() and (self.name.resolve() == self.target)

    def _same_contents(self):
        return (md5(self.name.read_bytes()).hexdigest() == \
                md5(self.target.read_bytes()).hexdigest())

    @property
    def state(self):
        """The current state of this dotfile."""
        if self.target.is_symlink():
            return 'external'

        if not self.name.exists():
            # no $HOME file or symlink
            return 'missing'

        if self.name.is_symlink():
            # name exists, is a link, but isn't a link to the target
            if not self.name.samefile(self.target):
                return 'conflict'
            return 'link'

        if not self._same_contents():
            # name exists, is a file, but differs from the target
            return 'conflict'

        return 'copy'

    def add(self, copy=False, debug=False, home=Path.home()):
        """Move a dotfile to its target and create a link.

        The link is either a symlink or a copy.
        """
        if copy:
            raise NotImplementedError()
        if self._is_present():
            raise IsSymlink(self.name)
        if self.target.exists():
            raise TargetExists(self.name)
        self._ensure_dirs(debug)
        if not self.name.is_symlink():
            if debug:
                echo('MOVE   %s -> %s' % (self.name, self.target))
            else:
                self.name.replace(self.target)
        self._link(debug, home)

    def remove(self, copy=UNUSED, debug=False):
        """Remove a dotfile and move target to its original location."""
        if not self.name.is_symlink():
            raise NotASymlink(self.name)
        if not self.target.is_file():
            raise TargetMissing(self.name)
        self._unlink(debug)
        if debug:
            echo('MOVE   %s -> %s' % (self.target, self.name))
        else:
            self.target.replace(self.name)

    def enable(self, copy=False, debug=False, home=Path.home()):
        """Create a symlink or copy from name to target."""
        if copy:
            raise NotImplementedError()
        if self.name.exists():
            raise Exists(self.name)
        if not self.target.exists():
            raise TargetMissing(self.name)
        self._ensure_dirs(debug)
        self._link(debug, home)

    def disable(self, copy=UNUSED, debug=False):
        """Remove a dotfile from name to target."""
        if not self.name.is_symlink():
            raise NotASymlink(self.name)
        if self.name.exists():
            if not self.target.exists():
                raise TargetMissing(self.name)
            if not self.name.samefile(self.target):
                raise RuntimeError
        self._unlink(debug)
        self._prune_dirs(debug)
def rotate_logs(log_file):
    for log_num in range(8, 0, -1):
        lf = Path(log_file, '.' + str(log_num)) if log_num else Path(log_file)
        if lf.exists():
            lf.replace(Path(log_file, '.' + str(log_num + 1)))
Example #45
0
    def do_upgrade_requirements(self, arg, timeout=(3*60)):
        """
        1. Convert via 'pip-compile' *.in requirements files to *.txt
        2. Append 'piprot' informations to *.txt requirements.

        Direct start with:
            $ pylucid_admin upgrade_requirements
        """
        requirement_filepath = self.path_helper.req_filepath # .../pylucid/requirements/developer_installation.txt

        assert requirement_filepath.is_file(), "File not found: '%s'" % requirement_filepath

        requirements_path = requirement_filepath.parent

        count = 0
        for requirement_in in requirements_path.glob("*.in"):
            count +=1
            requirement_in = Path(requirement_in).name

            if requirement_in.startswith("basic_"):
                continue

            requirement_out = requirement_in.replace(".in", ".txt")

            self.stdout.write("_"*79 + "\n")

            # We run pip-compile in ./requirements/ and add only the filenames as arguments
            # So pip-compile add no path to comments ;)

            return_code = VerboseSubprocess(
                "pip-compile", "--verbose", "--upgrade", "-o", requirement_out, requirement_in,
                cwd=str(requirements_path),
                timeout=timeout
            ).verbose_call(check=True)

            if not requirement_in.startswith("test_"):
                req_out = Path(requirements_path, requirement_out)
                with req_out.open("r") as f:
                    requirement_out_content = f.read()

                for version_prefix in VERSION_PREFIXES:
                    if not version_prefix in requirement_out_content:
                        raise RuntimeError("ERROR: %r not found!" % version_prefix)

            #
            # Skip piprot until https://github.com/sesh/piprot/issues/73 fixed
            #
            #
            # self.stdout.write("_"*79 + "\n")
            # output = [
            #     "\n#\n# list of out of date packages made with piprot:\n#\n"
            # ]
            # sp=VerboseSubprocess("piprot", "--outdated", requirement_out, cwd=str(requirements_path))
            # for line in sp.iter_output():
            #     print(line, end="", flush=True)
            #     output.append("# %s" % line)
            #
            # self.stdout.write("\nUpdate file %r\n" % requirement_out)
            # filepath = Path(requirements_path, requirement_out).resolve()
            # assert filepath.is_file(), "File not exists: %r" % filepath
            # with open(filepath, "a") as f:
            #     f.writelines(output)

        if count==0:
            print("ERROR: no *.in files found in: '%s'" % requirements_path)
        else:
            print("processed %i *.in files" % count)
Example #46
0
    def do_upgrade_requirements(self, arg):
        """
        Upgrade requirements files with pip-compile and piprot.

        1. Convert via 'pip-compile' *.in requirements files to *.txt
        2. Append 'piprot' informations to *.txt requirements.
        """
        if self.package_name == "bootstrap_env":
            print("ERROR: command not allowed for 'bootstrap_env' !\n")
            print(
                "Because Bootstrap-env should be used as a tool in other projects"
                " and the projects himself should pin requirements ;) "
            )
            return

        requirements_path = self.path_helper.req_filepath

        print("compile *.in files in %s" % requirements_path)
        requirement_in_files = tuple(requirements_path.glob("*.in"))
        if not requirement_in_files:
            print("ERROR: No *.in files found!")
        else:
            print("%i *.in files found" % len(requirement_in_files))


        for requirement_in in requirement_in_files:
            requirement_in = Path(requirement_in).name

            if requirement_in.startswith("basic_"):
                continue

            requirement_out = requirement_in.replace(".in", ".txt")

            self.stdout.write("_"*79 + "\n")

            # We run pip-compile in ./requirements/ and add only the filenames as arguments
            # So pip-compile add no path to comments ;)

            return_code = VerboseSubprocess(
                "pip-compile", "--verbose", "--upgrade", "-o", requirement_out, requirement_in,
                cwd=requirements_path
            ).verbose_call(check=True)

            if not requirement_in.startswith("test_"):
                req_out = Path(requirements_path, requirement_out)
                with req_out.open("r") as f:
                    requirement_out_content = f.read()

            self.stdout.write("_"*79 + "\n")
            output = [
                "\n#\n# list of out of date packages made with piprot:\n#\n"
            ]

            s = VerboseSubprocess("piprot", "--outdated", requirement_out, cwd=requirements_path)
            for line in s.iter_output(check=True):
                print(line, flush=True)
                output.append("# %s" % line)

            self.stdout.write("\nUpdate file %r\n" % requirement_out)
            filepath = Path(requirements_path, requirement_out).resolve()
            assert filepath.is_file(), "File not exists: %r" % filepath
            with open(filepath, "a") as f:
                f.writelines(output)
def update_node(generated_folder, destination_folder):
    """Update data from generated to final folder, Python version"""
    client_generated_path = Path(generated_folder)
    shutil.rmtree(destination_folder)
    client_generated_path.replace(destination_folder)
Example #48
0
with open(CODE_JS, 'r') as f:
	for line in f:
		if 0 == st:
			if line[:-1] == IN_MARK:
				st = 1
			codeHead += line
		elif 1 == st:
			if line[:-1] == IN_MARK:
				codeTail += line
				st = 2
		elif 2 == st:
			codeTail += line

# backup copy the CODE_JS file
code_js.replace(CODE_JS + '.bak')

with open(CODE_JS, 'w') as c:
	print(codeHead, file=c)

	with open(DATA_IN, 'r') as f:
		print('var Lengths = {', file=c)

		for line in f:
			if len(line) == 0 or '#' == line[0]:
				continue

			fields = line.split('\t')

			print(country(fields[2][:2], fields[1][:-1], fields[0]), file=c)