示例#1
0
def rex_stacks(rex_dir, chisq, internal, thesis, png, n_test):
    """Generate plots from TRExFitter result."""
    import tdub.rex
    import tdub.config
    outdir = PosixPath(rex_dir) / "matplotlib"
    outdir.mkdir(exist_ok=True)
    tdub.config.init_meta_table()
    tdub.config.init_meta_logy()
    if thesis:
        tdub.config.IS_THESIS = True
    tdub.rex.plot_all_regions(
        rex_dir,
        outdir,
        stage="pre",
        show_chisq=chisq,
        n_test=n_test,
        internal=internal,
        thesis=thesis,
        save_png=png,
    )
    tdub.rex.plot_all_regions(
        rex_dir,
        outdir,
        stage="post",
        show_chisq=chisq,
        n_test=n_test,
        internal=internal,
        thesis=thesis,
        save_png=png,
    )
    return 0
示例#2
0
def write_gds(
    component: Component,
    gdspath: Optional[PosixPath] = None,
    gdsdir: PosixPath = tmp,
    unit: float = 1e-6,
    precision: float = 1e-9,
    auto_rename: bool = False,
) -> PosixPath:
    """Write component to GDS and returs gdspath

    Args:
        component: gdsfactory Component.
        gdspath: GDS file path to write to.
        unit unit size for objects in library.
        precision: for the dimensions of the objects in the library (m).
        remove_previous_markers: clear previous ones to avoid duplicates.
        auto_rename: If True, fixes any duplicate cell names.

    Returns:
        gdspath
    """

    gdsdir = pathlib.Path(gdsdir)
    gdspath = gdspath or gdsdir / (component.name + ".gds")
    gdspath = pathlib.Path(gdspath)
    gdsdir = gdspath.parent
    gdsdir.mkdir(exist_ok=True, parents=True)

    component.write_gds(
        str(gdspath), unit=unit, precision=precision, auto_rename=auto_rename,
    )
    component.path = gdspath
    return gdspath
def get_sparameters_path(
    component: Component,
    dirpath: PosixPath = CONFIG["sp"],
    layer2material: Dict[Tuple[int, int], str] = layer2material_default,
    layer2nm: Dict[Tuple[int, int], int] = layer2nm_default,
    **kwargs,
) -> PosixPath:
    """Returns Sparameters filepath.

    Args:
        component:
        dirpath
        layer2material: GDSlayer to material alias (see aliases in pp.sp.write)
        layer2nm: GDSlayer to thickness (nm)
    """
    dirpath = pathlib.Path(dirpath)
    dirpath = dirpath / component.function_name if component.function_name else dirpath
    dirpath.mkdir(exist_ok=True, parents=True)
    material2nm = {
        layer2material[layer]: layer2nm[layer]
        for layer in layer2nm.keys() if layer in component.get_layers()
    }
    suffix = dict2name(**material2nm)
    if kwargs:
        suffix += "_" + dict2name(**kwargs)
    return dirpath / f"{component.get_name_long()}_{suffix}.dat"
示例#4
0
def prepare_coco128(
    data_path: PosixPath,
    dirname: str = "coco128",
) -> None:
    """
    Prepare coco128 dataset to test.

    Args:
        data_path (PosixPath): root path of coco128 dataset.
        dirname (str): the directory name of coco128 dataset. Default: 'coco128'.
    """
    logger = logging.getLogger(__name__)

    if not data_path.is_dir():
        logger.info(f"Create a new directory: {data_path}")
        data_path.mkdir(parents=True, exist_ok=True)

    zip_path = data_path / "coco128.zip"
    coco128_url = "https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip"
    if not zip_path.is_file():
        logger.info(f"Downloading coco128 datasets form {coco128_url}")
        torch.hub.download_url_to_file(coco128_url,
                                       zip_path,
                                       hash_prefix="a67d2887")

    coco128_path = data_path / dirname
    if not coco128_path.is_dir():
        logger.info(f"Unzipping dataset to {coco128_path}")
        with ZipFile(zip_path, "r") as zip_obj:
            zip_obj.extractall(data_path)
示例#5
0
def vcopy(source, destination):
    '''Copy files to the specified destination.'''

    path_source = PosixPath(source)
    path_destination = PosixPath(destination)

    # Creating the destination folder if it does not exist
    if not path_destination.exists() or not path_destination.is_dir():
        path_destination.mkdir(mode=0o755)
        chown(path_destination, user=0, group=0)

    # Getting the name of the files to copy
    files = list()
    for object in path_source.iterdir():
        if object.is_file():
            filename = str(object.relative_to(path_source))

            if filename[0] != '.':
                files.append(filename)

    # Copying files
    for file in files:
        path_file_source = path_source / file
        path_file_destination = path_destination / file

        if not path_file_destination.exists(
        ) or not path_file_destination.is_file():
            copy(path_file_source, path_file_destination)
            path_file_destination.chmod(0o755)
            chown(path_file_destination, user=0, group=0)
示例#6
0
def gen_contents(db, branch_name: str, component_name: str, dist_dir: str):
    repopath = branch_name + '/' + component_name
    basedir = PosixPath(dist_dir).joinpath(branch_name).joinpath(
        component_name)
    basedir.mkdir(0o755, parents=True, exist_ok=True)
    cur = db.cursor()
    cur.execute(
        "SELECT architecture FROM pv_repos "
        "WHERE architecture != 'all' AND path=%s", (repopath, ))
    allarch = [r[0] for r in cur]
    for arch in allarch:
        cur.execute(
            """
            SELECT df.path || '/' || df.name AS f, string_agg(DISTINCT (
              coalesce(dp.section || '/', '') || dp.package), ',') AS p
            FROM pv_packages dp
            INNER JOIN pv_package_files df USING (package, version, repo)
            INNER JOIN pv_repos pr ON pr.name=dp.repo
            WHERE pr.path=%s AND df.ftype='reg'
            AND pr.architecture IN (%s, 'all') AND dp.debtime IS NOT NULL
            GROUP BY df.path, df.name""", (repopath, arch))
        filename = str(basedir.joinpath('Contents-%s.gz' % arch))
        with gzip.open(filename, 'wb', 9) as f:
            for path, package in cur:
                f.write(
                    (path.ljust(55) + ' ' + package + '\n').encode('utf-8'))
示例#7
0
def prepare_coco128(
    data_path: PosixPath,
    dirname: str = 'coco128',
) -> None:
    """
    Prepare coco128 dataset to test.

    Args:
        data_path (PosixPath): root path of coco128 dataset.
        dirname (str): the directory name of coco128 dataset. Default: 'coco128'.
    """
    if not data_path.is_dir():
        print(f'Create a new directory: {data_path}')
        data_path.mkdir(parents=True, exist_ok=True)

    zip_path = data_path / 'coco128.zip'
    coco128_url = 'https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip'
    if not zip_path.is_file():
        print(f'Downloading coco128 datasets form {coco128_url}')
        torch.hub.download_url_to_file(coco128_url, zip_path, hash_prefix='a67d2887')

    coco128_path = data_path / dirname
    if not coco128_path.is_dir():
        print(f'Unzipping dataset to {coco128_path}')
        with ZipFile(zip_path, 'r') as zip_obj:
            zip_obj.extractall(data_path)
示例#8
0
def apply_all(
    datadir,
    arrname,
    outdir,
    workspace,
    fold_results=None,
    single_results=None,
    and_submit=False,
):
    """Generate BDT response arrays for all ROOT files in DATADIR."""
    import glob
    import shutil
    import pycondor

    if len(single_results) > 0 and len(fold_results) > 0:
        raise ValueError("Cannot use -f and -s together with apply-single")
    results_flags = None
    if len(fold_results) > 0:
        results_flags = "-f {}".format(" -f ".join(fold_results))
    elif len(single_results) > 0:
        results_flags = "-s {}".format(" -s ".join(single_results))
    else:
        raise ValueError("-f or -s required")

    ws = PosixPath(workspace).resolve()

    outpath = PosixPath(outdir).resolve()
    outpath.mkdir(exist_ok=True)

    datapath = PosixPath(datadir).resolve(strict=True)
    all_files = glob.glob(f"{datapath}/*.root")
    arglist = [f"{f} {arrname} {outpath} {results_flags}" for f in all_files]

    condor_dag = pycondor.Dagman(name="dag_train_scan", submit=str(ws / "sub"))
    condor_job_scan = pycondor.Job(
        name="job_apply_all",
        universe="vanilla",
        getenv=True,
        notification="Error",
        extra_lines=["notify_user = [email protected]"],
        executable=shutil.which("tdub"),
        submit=str(ws / "sub"),
        error=str(ws / "err"),
        output=str(ws / "out"),
        log=str(ws / "log"),
        dag=condor_dag,
    )
    for run in arglist:
        condor_job_scan.add_arg(f"apply single {run}")

    if and_submit:
        condor_dag.build_submit()
    else:
        condor_dag.build()
示例#9
0
文件: file.manage.py 项目: apsong/JS3
    def run(self):
        if not self.user_authorize('system', 'manage'):
            return
        while True:
            task = self.task_q.get()
            logging.info(task)

            if task[0] == 'UPLOAD':
                p = PosixPath(task[1])
                if p.is_dir():
                    target_dir = os.path.join(task[2], p.name)
                    if self.do_mkdir(p.name, task[2]):
                        children = [d for d in p.iterdir()]
                        children.sort()
                        for f in children:
                            self.task_q.put(['UPLOAD', str(f), target_dir])
                else:
                    self.do_upload(str(p), task[2])
            elif task[0] == 'DOWNLOAD':
                R = self._stat(task[1])
                if R["fileType"] == 1:  #dir
                    p = PosixPath(task[2]).joinpath(R["name"] == "" and "ROOT"
                                                    or R["name"])
                    p.mkdir()
                    target_dir = os.path.join(task[2], p.name)
                    for r in self._stat2(task[1]):
                        self.task_q.put(['DOWNLOAD', r["path"], target_dir])
                else:
                    self.do_download(task[1], task[2])
            elif task[0] == 'MKDIR':
                self.do_mkdir(os.path.basename(task[1]),
                              os.path.dirname(task[1]))
            elif task[0] == 'STAT':
                self.do_stat(task[1])
            elif task[0] == 'LS':
                self.do_ls(task[1])
            elif task[0] == 'LS-R':
                self.do_ls_r(task[1])
            elif task[0] == 'RENAME':
                self.do_rename(task[1], task[2])
            elif task[0] == 'REMOVE':
                self.do_delete(task[1])
            elif task[0] == 'EXIT':
                self.close()
                logging.info("Exit.")
                self.task_q.task_done()
                return
            else:
                msg = "Invalid task: %s" % str(task)
                logging.error(msg)
                TestClient.ERRORS.append(msg)

            self.task_q.task_done()
示例#10
0
def path_settings(directory):
    dest = os.getcwd()

    if system() == 'Windows':
        appdata = WindowsPath(os.getenv('APPDATA'))
        dest = appdata.joinpath(directory)
    else:
        dest = PosixPath('/etc').joinpath(directory)

    dest.mkdir(parents = True, exist_ok = True)

    return dest
示例#11
0
def misc_drdscomps(datadir, outdir, thesis):
    """Generate plots comparing DR and DS (with BDT cuts shown)."""
    import tdub.internal.drds as tdid

    curdir = PosixPath.cwd().resolve()
    if outdir is not None:
        outdir = PosixPath(outdir).resolve()
    else:
        outdir = curdir
    outdir.mkdir(exist_ok=True, parents=True)
    os.chdir(outdir)
    tdid.bdt_cut_plots(datadir, thesis=thesis)
    os.chdir(curdir)
示例#12
0
文件: file.manage.py 项目: apsong/JS3
    def run(self):
        if not self.user_authorize('system', 'manage'):
            return
        while True:
            task = self.task_q.get()
            logging.info(task)

            if task[0] == 'UPLOAD':
                p = PosixPath(task[1])
                if p.is_dir():
                    target_dir = os.path.join(task[2], p.name)
                    if self.do_mkdir(p.name, task[2]):
                        children = [d for d in p.iterdir()]
                        children.sort()
                        for f in children:
                            self.task_q.put(['UPLOAD', str(f), target_dir])
                else:
                    self.do_upload(str(p), task[2])
            elif task[0] == 'DOWNLOAD':
                R = self._stat(task[1])
                if R["fileType"]==1: #dir
                    p = PosixPath(task[2]).joinpath(R["name"]=="" and "ROOT" or R["name"])
                    p.mkdir()
                    target_dir = os.path.join(task[2], p.name)
                    for r in self._stat2(task[1]):
                        self.task_q.put(['DOWNLOAD', r["path"], target_dir])
                else:
                    self.do_download(task[1], task[2])
            elif task[0] == 'MKDIR':
                self.do_mkdir(os.path.basename(task[1]), os.path.dirname(task[1]))
            elif task[0] == 'STAT':
                self.do_stat(task[1])
            elif task[0] == 'LS':
                self.do_ls(task[1])
            elif task[0] == 'LS-R':
                self.do_ls_r(task[1])
            elif task[0] == 'RENAME':
                self.do_rename(task[1], task[2])
            elif task[0] == 'REMOVE':
                self.do_delete(task[1])
            elif task[0] == 'EXIT':
                self.close()
                logging.info("Exit.")
                self.task_q.task_done()
                return
            else:
                msg = "Invalid task: %s" % str(task)
                logging.error(msg)
                TestClient.ERRORS.append(msg)

            self.task_q.task_done()
示例#13
0
class KeyMaterialStore:
    """
    Context manager for safe access to key material store.
    """
    _old_umask: int
    _store_path: Path

    def __init__(self, store_path: str) -> None:
        self._store_path = PosixPath(store_path)

    def __enter__(self) -> 'KeyMaterialStore':
        self._old_umask = umask(0o077)
        self._store_path.mkdir(parents=True, exist_ok=True)
        if stat(self._store_path).st_mode & 0o077:
            umask(self._old_umask)
            raise Exception(
                f'Key store "{self._store_path}" has lax permissions')
        umask(0o177)
        return self

    def __exit__(
        self,
        exception_type: Optional[Type[BaseException]],
        exception_value: Optional[BaseException],
        traceback: Optional[TracebackType],
    ) -> None:
        umask(self._old_umask)

    def path(self) -> Path:
        """
        Returns the path to the key material store.

        :return: Path to the key material store
        """
        return self._store_path

    def resolve_path(self, relative_path: str, lax: bool = False) -> Path:
        """
        Resolves paths in the key material store, checking for lax permissions.

        :param relative_path: Relative path inside the key material store
        :param lax: Check for lax permissions if `False`
        :return: Resolved path
        """
        path = PosixPath(self._store_path) / relative_path
        path.resolve(strict=True)
        if not lax and stat(path).st_mode & 0o177:
            raise Exception(
                f'Key material "{relative_path}" has lax permissions.')
        return path
示例#14
0
def init_web_editor_config(download_dir: PosixPath, static_url: str):

    download_dir.mkdir(exist_ok=True, parents=True)

    WEB_EDITOR_DOWNLOAD = {
        "to": download_dir,
        "tinymce": {
            "url":
            "https://download.tiny.cloud/tinymce/community/tinymce_5.10.3.zip",
            "target": "tinymce/js/tinymce",
        },
        "monaco": {
            "url":
            "https://registry.npmjs.org/monaco-editor/-/monaco-editor-0.32.1.tgz",
            "target": "package/min",
        },
    }

    WEB_EDITOR_CONFIG = {
        "tinymce": {
            "js": [
                join(static_url, "tinymce/tinymce.min.js"),
                join(static_url,
                     "djangoeditorwidgets/tinymce/tinymce.config.js"),
                join(static_url,
                     "djangoeditorwidgets/tinymce/tinymce.init.js"),
            ],
            "css": {
                "all": [
                    join(static_url,
                         "djangoeditorwidgets/tinymce/tinymce.custom.css"),
                ]
            },
        },
        "monaco": {
            "js": [
                join(static_url, "monaco/vs/loader.js"),
                join(static_url,
                     "djangoeditorwidgets/monaco/monaco.config.js"),
            ],
            "css": {
                "all": [
                    join(static_url,
                         "djangoeditorwidgets/monaco/monaco.custom.css"),
                ]
            },
        },
    }

    return WEB_EDITOR_DOWNLOAD, WEB_EDITOR_CONFIG
def make_output_dirs(root_dir: PosixPath) -> None:
    target_paths = [
        'timeline',
    ]

    try:
        root_dir.mkdir()
    except FileExistsError:
        pass

    for target in target_paths:
        try:
            target_path = root_dir / target
            target_path.mkdir()
        except FileExistsError:
            continue
示例#16
0
def geotiff_to_png(geotiff_path):
    p = PosixPath(geotiff_path)
    png_dir = PosixPath("sys/tmp/png") / p.parent
    png_dir.mkdir(parents=True, exist_ok=True)
    png_filename = str(png_dir / p.name) + ".png"

    logger.debug(f"Converting GeoTIFF '{geotiff_path}' to PNG '{png_filename}")

    # TODO: this probaly surpresses too much, we really just want to surpress rasterio's warning:
    # rasterio/__init__.py:229: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        with rasterio.open(geotiff_path, 'r') as geotiff:
            write_png(geotiff, png_filename)
    logger.debug(f"Conversion complete")

    return [png_filename, png_filename + ".json"]
示例#17
0
def create_condor_workspace(name: str | os.PathLike,
                            overwrite: bool = False) -> PosixPath:
    """Create a condor workspace given a name.

    This will create a new directory containing `log`, `out`, and
    `err` directories inside. The `workspace` argument to the
    :py:func:`~condor_preamble` function assumes creation of a workspace
    via this function.

    Missing parent directories will always be created.

    Parameters
    ----------
    name : str or os.PathLike
        the desired filesystem path for the workspace
    overwrite: bool
        if True, an existing workspace will be overwritten

    Raises
    ------
    OSError
        if the filesystem path exists and exist_ok is False

    Returns
    -------
    pathlib.PosixPath
        filesystem path to the workspace

    Examples
    --------
    >>> import tdub.batch as tb
    >>> import shutil
    >>> ws = tb.create_condor_workspace("./some/ws")
    >>> with open(ws / "condor.sub", "w") as f:
    ...     preamble = tb.condor_preamble(ws, shutil.which("tdub"), to_file=f)
    ...     tb.add_condor_arguments("train-single ......", f)

    """
    ws = PosixPath(name).resolve()
    if overwrite and ws.exists():
        shutil.rmtree(ws)
    ws.mkdir(exist_ok=False, parents=True)
    (ws / "log").mkdir()
    (ws / "err").mkdir()
    (ws / "out").mkdir()
    return ws
示例#18
0
def apply_single(infile, arrname, outdir, fold_results=None, single_results=None):
    """Generate BDT response array for INFILE and save to .npy file.

    We generate the .npy files using either single training results
    (-s flag) or folded training results (-f flag).

    """
    if len(single_results) > 0 and len(fold_results) > 0:
        raise ValueError("Cannot use -f and -s together with apply-single")

    from tdub.ml_apply import build_array, FoldedTrainSummary, SingleTrainSummary
    from tdub.data import SampleInfo
    from tdub.data import selection_branches
    from tdub.frames import raw_dataframe
    import numpy as np

    outdir = PosixPath(outdir).resolve()
    outdir.mkdir(parents=True, exist_ok=True)

    trs = None
    if len(fold_results) > 0:
        trs = [FoldedTrainSummary(p) for p in fold_results]
    elif len(single_results) > 0:
        trs = [SingleTrainSummary(p) for p in single_results]
    else:
        raise ValueError("-f or -s required")

    necessary_branches = ["OS", "elmu", "reg2j1b", "reg2j2b", "reg1j1b"]
    for res in trs:
        necessary_branches += res.features
        necessary_branches += selection_branches(res.selection_used)
    necessary_branches = sorted(set(necessary_branches), key=str.lower)

    log.info("Loading necessary branches:")
    for nb in necessary_branches:
        log.info(f" - {nb}")

    stem = PosixPath(infile).stem
    sampinfo = SampleInfo(stem)
    tree = f"WtLoop_{sampinfo.tree}"
    log.info(f"Using tree {tree}")
    df = raw_dataframe(infile, tree=tree, branches=necessary_branches)
    npyfilename = outdir / f"{stem}.{arrname}.npy"
    result_arr = build_array(trs, df)
    np.save(npyfilename, result_arr)
示例#19
0
def train_shapes(datadir, outdir):
    """Generate shape comparison plots."""
    if outdir is None:
        outdir = PosixPath.cwd()
    else:
        outdir = PosixPath(outdir)
        outdir.mkdir(exist_ok=True, parents=True)

    import tdub.internal.shapecomps as tdisc
    import tdub.ml_train as tdmlt
    import tdub.config
    tdub.config.init_meta_table()
    meta_table = tdub.config.PLOTTING_META_TABLE
    datadir = PosixPath(datadir)
    region = (datadir / "region.txt").read_text().strip()
    df, y, w = tdmlt.load_prepped(datadir)
    var_region_binning = tdmlt.var_and_binning_for_region(df, region, meta_table)
    for v, r, b in var_region_binning:
        tdisc.dist_comparison_plot(v, r, b, df, y, w, meta_table, outdir=outdir)
示例#20
0
def run():
    # now we run the training
    from keras import Model
    from keras.layers import GRU, Dense, Input, Masking, Embedding

    in_node = Input(shape=(MAX_LENGTH, ))
    # we'll use an embedding layer to represent each person as a
    # vector
    embedding = Embedding(MAX_EMBED + 1 * 2, 1, mask_zero=True)(in_node)
    # a gru can translate this variable number of people into a fixed
    # size representation
    gru = GRU(5)(embedding)
    dense = Dense(5)(gru)
    # note that we don't want to use any activation functions for the
    # final output given that this is a regression problem
    out = Dense(1)(dense)

    model = Model(inputs=[in_node], outputs=[out])
    model.compile(loss='mse', optimizer='adam')

    # this function runs the name generator infinitely to produce
    # training data
    def make_arrays(generator, max_length=MAX_LENGTH, n_samples=400):
        ar = np.zeros((n_samples, max_length), dtype=int)
        targ = np.zeros((n_samples, 1), dtype=float)
        for n, samp in zip(cycle(range(n_samples)), generator()):
            emb = index_participents(samp)
            ar[n, :] = emb
            targ[n, 0] = len(samp)
            if n + 1 == n_samples:
                yield ar, targ

    model.fit_generator(make_arrays(name_generator),
                        steps_per_epoch=100,
                        epochs=EPOCHS)

    outdir = Path('models')
    outdir.mkdir(exist_ok=True)

    model.save(outdir / 'deep.h5')
示例#21
0
    def store_page(self, page_spec):

        # Extract page info
        page = page_spec['page']
        scan = page_spec['scan']
        side = page_spec['side']

        # Define aliases for relevant settings
        geometry = self._settings.get_geometry()

        # Extract source and target file
        page_path = page_spec['page-path']

        # Print settings
        msg = \
            "DEBUG [Page] Store page:\n" + \
            "\n" + \
            "  - page:        {}\n".format(page) + \
            "  - scan:        {}\n".format(scan) + \
            "  - side:        {}\n".format(side) + \
            "  - geometry:    {}\n".format(geometry) + \
            "  - target file: {}\n".format(page_path) + \
            "\n"
        Logger.debug(msg)

        page = self.get_page(page_spec)

        # When the page has not been found return False
        if not isinstance(page, (str, np.ndarray)):
            return False

        # Ensure that the page directory exists
        page_dir = PosixPath(page_path).parent
        if not page_dir.exists():
            Logger.debug("Pages: Creating page directory: {}".format(str(page_dir)))
            page_dir.mkdir(parents=True, exist_ok=True)

        # Save image
        Logger.debug("Pages: Storing image: {}".format(page_path))
        cv2.imwrite(page_path, page)
示例#22
0
def copySnippets(args):
    print("Checking for the snippets configuration directory")
    vimDir = PosixPath("~/.config/nvim/").expanduser()
    if not vimDir.exists():
        print("Neovim config dir not found, need to install...")
        return
    snippetsDir = PosixPath("~/.config/nvim/UltiSnips").expanduser()
    if not snippetsDir.exists():
        print("Directory not created, creating...")
        snippetsDir.mkdir(parents=True, exist_ok=True)
    try:
        copyfile(
            PosixPath("./snippets/html.snippets"),
            PosixPath("~/.config/nvim/UltiSnips/html.snippets").expanduser())
        copyfile(
            PosixPath("./snippets/typescript.snippets"),
            PosixPath(
                "~/.config/nvim/UltiSnips/typescript.snippets").expanduser())
        copyfile(
            PosixPath("./snippets/scss.snippets"),
            PosixPath("~/.config/nvim/UltiSnips/scss.snippets").expanduser())
    except SameFileError:
        print("Same files detected, snips may be symlinked. Ignoring...")
示例#23
0
    def load_config(self):
        basedir = PosixPath(environ['HOME']) / '.config' / 'fxi' / 'apps'
        basedir.mkdir(exist_ok=True, parents=True)
        filename = (_coconut_partial(_coconut.operator.add, {1: '.json'}, 2))(
            (((self.title).lower()).replace(' ', '_')))
        path = basedir / filename

        self.config_path = path

        if not path.exists():
            with path.open('w') as file_object:
                pass
            self.config = {}
            return

        with path.open('r') as file_object:
            content = file_object.read()

            if not content:
                self.config = {}
                return

            self.config = json.loads(content)
示例#24
0
def make_output_dir(output_image_dir_path: PosixPath, clean=False):
    if output_image_dir_path.exists() and clean:
        shutil.rmtree(str(output_image_dir_path))
    output_image_dir_path.mkdir()
示例#25
0
model = load_model(argv[1])

x = []
y = []

for n in range(len(people) * 2):
    randos = sample(people, min(n, len(people)))
    if n > len(people):
        randos += sample(people, n - len(people))
    test_arr = index_participents(randos)
    predicted = model.predict(test_arr)
    x.append(n)
    y.append(predicted.flatten())

fig = Figure((4, 3))
Canvas(fig)
plot = fig.add_subplot(111)
plot.set_xlabel('ground truth')
plot.set_ylabel('model')
plot.grid()
for ax in [plot.xaxis, plot.yaxis]:
    ax.set_ticks(np.arange(0, 20))
plot.plot(x, y, '.')
plot.axvspan(10, 20, color='red', alpha=0.5)

out_path = Path('figures')
out_path.mkdir(exist_ok=True)

fig.savefig(out_path / 'test.pdf', bbox_inches='tight')
示例#26
0
def absoluteFileLocation(output_dir, base):
    out_dir = PosixPath(output_dir)  # todo: configurable
    if not out_dir.is_dir():
        out_dir.mkdir()
    return (out_dir / PosixPath(base + ".spec.ts")).absolute()
    def create_folder(self, path: pathlib.PosixPath):
        path.mkdir(parents=True, exist_ok=True)

        self.logger.debug(f"Folder {path} created.")
示例#28
0
class Expresso2office():
   def __init__(self, dir_csv="./output", dir_vcf="."):
      logging.debug(f"Passando aqui")
      self.dir_csv = PosixPath(dir_csv).expanduser()
      self.dir_vcf = PosixPath(dir_vcf).expanduser()
      self.format_date = "%m/%d/%Y"
      self.format_time = "%H:%M"
      self.format_date_time = f"{self.format_date} {self.format_time}"

   def checa_diretorios(self):
      logging.debug(f"Verificando diretorio de origem existe: {self.dir_vcf.resolve()}")
      if self.dir_vcf.is_dir():
         logging.info(f"Tratando arquivos vcf do diretorio: {self.dir_vcf.resolve()}")
      else:
         logging.info(f"Diretorio com arquivos vcf nao existe: {self.dir_vcf.resolve()}")
         exit(1)

      if not self.dir_csv.is_dir():
         logging.info(f"Diretorio para os arquivo csv nao existe, o mesmo sera criado: {self.dir_csv.resolve()}")
         try:
            self.dir_csv.mkdir()
         except OSError:
            logging.info(f"A criacao do diretorio {self.dir_csv.resolve()} falhou")
            exit(1)
         else:
            logging.log(f"Criou com sucesso o diretorio: {self.dir_csv.resolve()}")
    
   def convert2csv(self, file_vcf):
      try:
         data = Path(file_vcf).read_text()
         for cal in vobject.readComponents(data):
            file_csv = PurePath(self.dir_csv, f"address-{self.dir_vcf.name}.csv")
            logging.info("-" * 40)
            logging.info(f"Convertendo o arquivo VCF: {file_vcf}")
            logging.info(f"Arquivo cvs gerado: {file_csv}")
            with Path(file_csv).open(mode='a') as csv_out:
               csv_writer = csv.writer(csv_out, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
               if os.path.isfile(file_csv) and os.path.getsize(file_csv) == 0:
                  csv_writer.writerow(["firstName","middleName","lastName","company","jobTitle","workPhone","workPhone2","companyPhone","homePhone","homePhone2","mobilePhone","email","email2"])
               for line in cal.lines():
                  #import ipdb; ipdb.set_trace()
                  logging.debug(f"{file_vcf} name: {line.name}")
                  logging.debug(f"{file_vcf} behavior: {line.behavior}")
                  logging.debug(f"{file_vcf} encoded: {line.encoded}")
                  logging.debug(f"{file_vcf} group: {line.group}")
                  logging.debug(f"{file_vcf} params: {line.params}")
                  logging.debug(f"{file_vcf} serialize: {line.serialize()}")
                  logging.debug(f"{file_vcf} singletonparams: {line.singletonparams}")
                  logging.debug(f"{file_vcf} value: {line.value}")
                  logging.debug(f"{file_vcf} varlueRepr: {line.valueRepr()}")
                  if line.name == "VERSION":
                     try:
                        version = line.value
                     except:
                        version = ""
                  if line.name == "PRODID":
                     try:
                        prodid = line.value               
                     except:
                        prodid = ""
                  if line.name == "FN":
                     try:
                        full_name = line.value
                     except:
                        full_name = ""
                  if line.name == "N":
                     try:
                        first_name = line.serialize().split(":")[1].split(";")[1].lstrip()
                     except:
                        first_name = ""
                     try:
                        middle_name = line.serialize().split(":")[1].split(";")[2].lstrip()
                     except:
                        middle_name = ""
                     try:
                        last_name = line.serialize().split(":")[1].split(";")[0].lstrip()
                     except:
                        last_name = ""
                  if line.name == "UID":
                     try:
                        uid = line.value
                     except:
                        uid = ""
                  if line.name == "ORG":
                     try:
                        company = line.value[0]
                     except:
                        company = ""
                  if line.name == "TITLE":
                     try:
                        job_title = line.value
                     except:
                        job_title = ""
                  if line.name == "TEL":
                     if line.params['TYPE'] == ['WORK']:
                        try:
                           work_phone = line.value
                        except:
                           work_phone = ""
                     if line.params['TYPE'] == ['HOME']:
                        try:
                           home_phone = line.value
                        except:
                           home_phone = ""
                     if line.params['TYPE'] == ['CELL', 'WORK']:
                        try:
                           company_phone = line.value
                        except:
                           company_phone = ""
                     if line.params['TYPE'] == ['CELL', 'HOME']:
                        try:
                           mobile_phone = line.value
                        except:
                           mobile_phone = ""
                     if line.params['TYPE'] == ['FAX', 'WORK']:
                        try:
                           work_phone2 = line.value
                        except:
                           work_phone2 = ""
                     if line.params['TYPE'] == ['FAX', 'HOME']:
                        try:
                           home_phone2 = line.value
                        except:
                           home_phone2 = ""
                  if line.name == "EMAIL":
                     if line.params['TYPE'] == ["WORK"]:
                        try:
                           email = line.value
                        except:
                           email = ""
                     if line.params['TYPE'] == ["HOME"]:
                        try:
                           email2 = line.value
                        except:
                           email2 = ""
               csv_writer.writerow([first_name, middle_name, last_name, company, job_title, work_phone, work_phone2, company_phone, home_phone, home_phone2, mobile_phone, email, email2])
      except Exception as e:
         logging.fatal(f"Excessao nao mapeada: {e}")

   def realizar_parse(self):
      os.chdir(self.dir_vcf)
      for file_vcf in glob.glob("*.vcf"):
         self.convert2csv(file_vcf)
示例#29
0
    def init_logger(self):

        # Store log messages until logger is configured
        log_messages = []

        # Init log level from ~/.bookblock or the defaults
        config = self.config
        log_dir    = config.get('logger', 'log_dir')
        log_name   = config.get('logger', 'log_name')
        log_level  = config.get('logger', 'log_level')
        log_enable = config.getint('logger', 'log_enable')
        #| log_maxfiles = config.getint('logger', 'log_maxfiles')

        # Ensure that log directory exists
        log_dir_path = PosixPath(log_dir).expanduser()
        print("log_dir_path:", log_dir_path)
        if not Path(log_dir_path).exists():
            log_messages.append("Creating log directory: {}".format(log_dir))
            log_dir_path.mkdir(parents=True, exist_ok=True)

        # Substitute the log file name format patterns with actual values
        # %y -> year, %m -> month, %d -> day, %_ -> next log file number
        log_file = log_name.replace('%_', '@@NUMBER@@')
        log_file = strftime(log_file)
        log_file = '{}/{}'.format(str(log_dir_path), log_file)
        n = 0
        while True:
            log_file2 = log_file.replace('@@NUMBER@@', str(n))
            if not os.path.exists(log_file2):
                log_file = log_file2
                break
            n += 1
            if n > 10000:  # prevent maybe flooding ?
                raise Exception('Too many logfile, remove them')
            
        # Open log file
        # and substitue it for the kivy log file (~/kivy/logs/...
        FileHandler.filename = str(log_file)
        if FileHandler.fd is not None:
            FileHandler.fd.close()
        FileHandler.fd = open(log_file, 'w')
        log_messages.append('Logger: Record log in %s' % log_file)

        # Log level
        # Command line option --debug overwrites settings
        log_level_option = self._settings.get_debug_level()
        if log_level_option != None:
            log_level = log_level_option

        # Convert to logging debug level
        log_level_code = LOG_LEVELS.get(log_level.lower())

        # When log_level is not one of 
        # trace, debug, info, warning, error, critical, 
        # None is returned.
        if log_level_code == None:
            log_levels = ['trace', 'debug', 'info', 'warning', 'error', 'critical']
            print("ERROR Undefined log level: {}\n".format(log_level) +
                  "Defined are only the following: {}.".format(', '.join(log_levels)),
                  file=sys.stderr)
            exit(-1)

        # Set log level
        Logger.setLevel(log_level_code)

        # En- / Disable logger
        Logger.logfile_activated = bool(log_enable)

        # TODO Purge old logs
        # See site-packages/kivy/logger.py, class FileHandler, method purge_logs() 
        # for an example of how to purge old logs.

        # Log stored log messages
        for msg in log_messages:
            Logger.info(msg)
            
        # Log some general information about Python, Kivy etc.
        
        # Kivys default logging level is info
        # In order to suppress the initial INFO messages printed when kivy is loaded
        # until the log level for `bookblock' is set
        # I set the level to WARNING by in file ~/.kivy/config.ini
        # > [kivy]
        # > #log_level = info
        # > log_level = warning
        # Some of the suppressed information is printed now
        # when the bookblock log level is lower or equal to INFO

        Logger.info('Kivy: v%s' % kivy.__version__)
        Logger.info('Kivy: Installed at "{}"'.format(kivy.__file__))
        Logger.info('Python: v{}'.format(sys.version))
        Logger.info('Python: Interpreter at "{}"'.format(sys.executable))
        Logger.info('Bookblock: Installed at "{}"'.format(dirname(dirname(__file__))))
        Logger.info('Bookblock: To avoid the Kivy startup INFO messages '
                    'change the kivy log level to WARNING '
                    'in ~/.kivy/config.ini')
        Logger.info('Bookblock: To avoid further messages from Bookblock '
                    'adapt the Bookblock log level in '
                    'in ~/.bookblock')
        Logger.info('Bookblock: For more debug information '
                    'change the kivy log level in ~/.kivy/config.ini '
                    'and the Bookblock log level in ~/.bookblock/config.ini '
                    'to TRACE, DEBUG, or INFO.')
示例#30
0
"""
ledge.config
~~~~~~~~~~~~

This module implements interface for getting and setting the "ledge" cli application's configuration.
The configuration is stored in the ~/.ledge/config.yaml file
"""

import yaml
from pathlib import PosixPath

ledge_dir = PosixPath('~/.ledge').expanduser()
if not ledge_dir.exists():
    ledge_dir.mkdir()

config_file = ledge_dir.joinpath('config.yaml')
if not config_file.exists():
    config_file.touch()


def get(key):
    """Get the config value by providing the key"""
    with config_file.open('r') as f:
        conf = yaml.load(f)
        return conf.get(key)
    return None


def set(key, value):
    """Set a config by providine a key-value pair"""
    with config_file.open('r') as f:
示例#31
0
def gen_release(db, branch_name: str, component_name_list: list, dist_dir: str,
                conf: PVConf):
    branch_dir = PosixPath(dist_dir).joinpath(branch_name)
    branch_dir.mkdir(0o755, parents=True, exist_ok=True)

    cur = db.cursor()
    meta_data_list = dict.fromkeys(component_name_list)
    for component_name in component_name_list:
        cur.execute("SELECT architecture FROM pv_repos WHERE path=%s",
                    (branch_name + '/' + component_name, ))
        meta_data_list[component_name] = [r[0] for r in cur] or ['all']
    cur.close()
    # Now we have this structure:
    # meta_data_list['main'] = ['amd64', 'arm64', ...]

    r_basic_info = {
        'Origin': conf['origin'],
        'Label': conf['label'],
        'Suite': branch_name,
        'Codename': conf['codename'],
        'Description': conf['desc'],
    }
    r_template = deb822.Release(r_basic_info)
    now = datetime.now(tz=timezone.utc)
    r_template['Date'] = now.strftime(date_format)
    if 'ttl' in conf:
        ttl = int(conf['ttl'])
        r_template['Valid-Until'] = (now +
                                     timedelta(days=ttl)).strftime(date_format)

    r = r_template.copy()

    r['Architectures'] = ' '.join(
        sorted(set.union(
            *map(set, meta_data_list.values())))) if meta_data_list else 'all'
    r['Components'] = ' '.join(sorted(component_name_list))
    hash_list = []
    for c in meta_data_list:
        for a in meta_data_list[c]:
            has_contents = False
            for filename in (
                    'binary-%s/Packages' % a,
                    'binary-%s/Packages.xz' % a,
                    'Contents-%s' % a,
                    'Contents-%s.gz' % a,
            ):
                path = branch_dir.joinpath(c).joinpath(filename)
                try:
                    size = path.stat().st_size
                except FileNotFoundError:
                    continue
                fullpath = str(PurePath(c).joinpath(filename))
                hash_list.append({
                    'sha256': sha256_file(str(path)),
                    'size': size,
                    'name': fullpath
                })
                if filename.startswith('Contents'):
                    if filename.endswith('.gz') and not has_contents:
                        with gzip.open(str(path), 'rb') as f:
                            size, sha256 = size_sha256_fp(f)
                        hash_list.append({
                            'sha256': sha256,
                            'size': size,
                            'name': os.path.splitext(fullpath)[0]
                        })
                    else:
                        has_contents = True

    null_name = 'placeholder'
    null_path = branch_dir.joinpath(null_name)
    if len(hash_list) == 0:
        open(null_path, 'wb').close()  # touch an empty file
        hash_list.append({
            'sha256': sha256_file(str(null_path)),
            'size': 0,
            'name': null_name
        })
    else:
        if os.path.exists(str(null_path)):
            os.remove(str(null_path))

    hash_list.sort(key=lambda x: x['name'])
    r['SHA256'] = hash_list
    release_fn = branch_dir.joinpath('Release')
    with open(str(release_fn), 'w', encoding='UTF-8') as f:
        f.write(str(r))
    subprocess.check_call([
        GPG_MAIN, '--batch', '--yes', '--clearsign', '-o',
        str(branch_dir.joinpath('InRelease')),
        str(release_fn)
    ])
    release_fn.unlink()