Esempio n. 1
0
def getfilepathextlist(_pathname: str) -> list:
    """Return the path, filename, and extension as a list

    >>> getfilepathextlist('/etc/mime.types')
    ['/etc', 'mime', '.types']
    >>> getfilepathextlist('/bin/sh')
    ['/bin', 'sh', '']
    """
    return [path_split(_pathname)[0], path_splitext(path_split(_pathname)[1])[0], path_splitext(path_split(_pathname)[1])[1]]
Esempio n. 2
0
def make_vars_py(csvfile=__VARS_CSV__, pyfile=__VARS_PYFILE__):
    """
    @param csvfile: csv file with vars to make py from
    @type csvfile: str
    @param pyfile: filepath to write to
    @type pyfile: str
    @return: None
    @rtype: None
    """

    # Solve unicode escape issues in generated text
    # eg pasting "C:\"... instead of escaped text. Hard to double escape.
    pyfile = pyfile.replace("\\", "/")
    csvfile = csvfile.replace("\\", "/")

    rvars = extract_csv_vars(csvfile)
    tmp = StringIO()

    srcfile = __file__
    from os.path import split as path_split
    module = path_split(srcfile)[1]

    msg_body = """Python file to map pynames to recipe variable instances.
The list of variables should (almost) never change, except if
A different protocol is used to generate pynames from var names.
Instead of needing to import and generate at runtime, just import
this python file. This file will automatically raise ValueError
if it detects that the corresponding csv file is out of date."""

    header = make_module_header(srcfile, msg_body)
    tmp.write(header)

    tmp.write("from os.path import getmtime as _getmtime, dirname as _dirname, join as _join\n")
    tmp.write("\n# Filenames as passed to makevars.make_vars_py()\n")
    tmp.write("__curdir = _dirname(__file__)\n")
    tmp.write("__module = _join(__curdir, \"%s\")\n" % path_split(pyfile)[1])
    tmp.write("__csv = _join(__curdir, \"vars_db\", \"%s\")\n" % path_split(csvfile)[1])
    tmp.write("\nif _getmtime(__csv) > _getmtime(__module):\n")
    tmp.write("    raise ImportError(\"Warning! Pyfile is outdated. Regenerate from %s.\")\n\n" % module)
    tmp.write("from pbslib.recipemaker import RecipeVariable\n\n\n")

    for pyname, varname in rvars:
        tmp.write(pyname)
        tmp.write(' = RecipeVariable("')
        tmp.write(varname)
        tmp.write('")\n')
    with open(pyfile, 'w') as f:
        f.write(tmp.getvalue())
Esempio n. 3
0
def glob(session, glob_pattern):
    """
    Find files matching given pattern through a session.

    :param session: session to run the command on
    :type session: ShellSession
    :param str glob_pattern: pattern of filenames where `*`, `?` and such match
                             any string or not-empty-string and so on.
    :returns: file names matching pattern, including path (like glob.glob)
    :rtype: [str]
    :raises: :py:class:`RuntimeError` if tar ls-command returned non-null and
             non-2 status

    Much like :py:func:`glob.glob` but implemented using `ls`.

    The glob matching is done locally, not with `ls` since quoting also makes
    the glob characters '*' and '?' regular characters, so are not interpreted
    any more in shell.
    """
    path_part, name_pattern = path_split(glob_pattern)
    all_filenames = ls(session, path_part)

    # convert pattern: escape \ . ? *
    pattern = re.escape(name_pattern)

    # convert pattern: convert shell globs to regex
    pattern = pattern.replace(r'\*', '.*').replace(r'\?', '.')

    # convert pattern: require match whole file name; compile for speed-up
    pattern = re.compile(pattern + '$')
    return [
        join(path_part, filename) for filename in all_filenames
        if pattern.match(filename)
    ]
Esempio n. 4
0
    async def refresh_backup(self, decoded_restored: list,
                             tgback_file_path: str) -> None:
        backup_death_at = time() + 5_356_800

        user = await self._TelegramClient.get_entity('me')
        user = self.__prepare_user_entity(user)

        decoded_restored[2] = backup_death_at
        decoded_restored[3] = user.username
        decoded_restored[4] = user.id
        decoded_restored[5] = user.last_name

        dump(encode_restored(decoded_restored), tgback_file_path)
        ext = '.png' if not tgback_file_path[-4:] == '.png' else ''
        await self._TelegramClient.send_file(
            'me',
            open(tgback_file_path + ext, 'rb'),
            caption=self.__notify.format('updated', tgback_file_path,
                                         'Updated', ctime(),
                                         ctime(backup_death_at), '', VERSION))
        backup_name = path_split(tgback_file_path)[-1]
        await self._TelegramClient.send_message(
            'me',
            f'Hello! Please, update your backup `{backup_name}`\n\n**One week left!!**',
            schedule=timedelta(seconds=4_752_000))
Esempio n. 5
0
  def __init__(self, loop, syncer, config):
    self.host = config['rpc']['host']
    self.port = config['rpc']['port']
    self.loop = loop
    self.syncer = syncer
    self.global_message_queue = self.syncer.queues['RPCManager']
    #self.allowed_clients
    self.requests = {}
    self.up = True
    self.logger = logging.getLogger("RPCManager")

    rpc_manager_location = __file__
    web_wallet_dir = join(path_split(rpc_manager_location)[0], "web_wallet")
    self.app = web.Application(loop=self.loop)
    
    self.app.router.add_static('/',web_wallet_dir)
    self.app.router.add_route('*', '/rpc', self.handle)
    self.server = self.loop.create_server(self.app.make_handler(), self.host, self.port)
    asyncio.ensure_future(self.server, loop=loop)
    asyncio.ensure_future(self.check_queue())

    methods.add(self.ping)
    methods.add(self.getconnectioncount)
    methods.add(self.getheight)
    methods.add(self.getblocktemplate)
    methods.add(self.validatesolution)
    methods.add(self.getbalancestats)
    methods.add(self.getbalancelist)
    methods.add(self.getbalance)
    methods.add(self.sendtoaddress)
    methods.add(self.getnewaddress)
    methods.add(self.dumpprivkey)
    methods.add(self.importprivkey)
    methods.add(self.getsyncstatus)
Esempio n. 6
0
def ccs2ccsflat(flt_ctxt, in_obj):
    self = flt_ctxt.ctxt_wrapped

    ccsf_dirname, ccsf_basename = path_split(CCS_FLATTEN)
    # order of priority when searching for the binary (descending order):
    # - same as the root of the package (debugging purposes)
    # - directories-part as per specification in setup.cfg
    # - PATH env variable (if defined)
    ccs_flatten = which(ccsf_basename, dirname_x(__file__, 2), ccsf_dirname,
                        '')
    if not ccs_flatten:
        raise FilterError(self, "ccs_flatten binary seems unavailable")

    # XXX currently ccs_flatten does not handle stdin (tempfile.mkstemp?)
    # XXX conversion is not idempotent, should prevent using ccs-flat as input
    #     (specifically, explicit ordering will get borken in subsequent round)
    in_file = in_obj('file')
    if not isinstance(in_file, basestring):
        # convert '-'/'@0' already converted to fileobj back to '-'
        if in_file is not stdin:
            raise RuntimeError("Only stdin ('-') supported")
        in_file = '-'
    command = [ccs_flatten, in_file]
    log.info("running `{0}'".format(' '.join(command)))
    try:
        proc = OneoffWrappedStdinPopen(command, stdout=PIPE, stderr=PIPE)
    except OSError:
        raise FilterError(self, "error running ccs_flatten binary")
    out, err = proc.communicate()
    if proc.returncode != 0 or out == '' and err != '':
        raise FilterError(self, "ccs_flatten {0}\n\t{1}",
                          ("exit code: {0}" if proc.returncode > 0 else
                           "killing signal: {0}").format(abs(proc.returncode)),
                          err)
    return ('bytestring', out)
Esempio n. 7
0
def get_module_for_file(filepath):
    path_to, filename = path_split(filepath)
    modulename, ext = splitext(filename)
    return '.'.join(
        chain(recursively_get_package_components(path_to),
              (modulename, ))  # chain
    )  # join
def do_zip_reduction(file_name, absolute_folder_with_file_name_in_it, output_file_name):
    """ Removes the single root folder from the zip file, shifts everything up one level. """
    prior_directory = os.path.abspath(".")
    try:
        # move to the folder containing the file (simplifies other logic)
        os.chdir(absolute_folder_with_file_name_in_it)
        
        # Extract zip
        with zipfile.ZipFile(file_name) as f:
            file_paths = [some_file.filename for some_file in f.filelist]
            # determine the root folder...
            extraction_folder = path_split(os.path.commonprefix(file_paths))[0]
            if not extraction_folder:
                log.error("This zip file is already prepared for deployment, there was no common root folder in the zip file.")
                EXIT(1)
            else:
                log.info("Rebuilding zip file without root directiory.")
                # extract
                f.extractall()

        # get and read in the entire contents of the zip into a new zip, but one directory level up.
        with zipfile.ZipFile(output_file_name, mode='w', compression=zipfile.ZIP_DEFLATED) as z:
            for zip_path, file_path in get_file_paths_for_zipping(extraction_folder):
                with open(file_path) as f:
                    z.writestr(zip_path, f.read())
        
        # delete the extraction folder
        shutil.rmtree(extraction_folder)
        
    except None:
        pass
    finally:
        # we always want to return to the original directory.
        os.chdir(prior_directory)
Esempio n. 9
0
def find_old_files(wincom, reference):
    """
    Build a list of all of the outdated files in the directory.
    Rather than calling os.path.getmtime for each file in typehint folder,
    it is easier and faster just to cache all the results in a .csv to parse
    and rebuild each update.

    Feed in dict of wincom paths to os.stat.s_mtime, and a dict
    of the same for reference. Reference paths should be in format
    where '\\'.join((wincom_to_typehint(base), head)) produces
    the correct reference path to the base and head names produced
    by os.path.split() on the corresponding wincom name.


    @param wincom: the dict of wincom files and timestamps
    @type wincom: dict[str, float]
    @param reference: the dict of typehint files and timestamps
    @type reference: dict[str, float]
    @return: list[(src, dst)]
    @rtype: list[(str, str)]
    """
    old = []
    for w_path, w_mtime in wincom.items():
        base, head = path_split(w_path)
        ref_name = "\\".join((wincom_to_typehint(base), head))
        ref_mtime = reference.get(ref_name, 0)
        if w_mtime > ref_mtime:
            if ref_name.startswith("\\"):
                ref_name = ref_name.lstrip("\\")
            ref_target = "\\".join((typehint_dir, ref_name))
            old.append((w_path, ref_target))
    return old
Esempio n. 10
0
    def new(cls, path, ckan_version, port=None):
        """
        Return a Environment object with settings for a new project.
        No directories or containers are created by this call.

        :params path: location for new project directory, may be relative
        :params ckan_version: release of CKAN to install
        :params port: preferred port for local instance

        Raises DatcatsError if directories or project with same
        name already exits.
        """
        workdir, name = path_split(abspath(expanduser(path)))

        if not valid_name(name):
            raise DatacatsError('Please choose an environment name starting'
                ' with a letter and including only lowercase letters'
                ' and digits')
        if not isdir(workdir):
            raise DatacatsError('Parent directory for environment'
                ' does not exist')

        datadir = expanduser('~/.datacats/' + name)
        target = workdir + '/' + name

        if isdir(datadir):
            raise DatacatsError('Environment data directory {0} already exists',
                (datadir,))
        if isdir(target):
            raise DatacatsError('Environment directory already exists')

        environment = cls(name, target, datadir, ckan_version, port)
        environment._generate_passwords()
        return environment
Esempio n. 11
0
def yielder(root_dir_to_crawl):
    logging.debug("Crawling {}".format(root_dir_to_crawl))

    counter = 0
    for (dirpath, dirnames, filenames) in walk(root_dir_to_crawl):
        if path_split(dirpath)[-1].startswith("_"):
            continue
        logging.debug(dirpath)
        for filename in filenames:
            file_full_path = path_join(dirpath, filename)
            if filename.endswith(".zip"):
                # no files bigger than 1GB, nobody got time for that...
                if get_size_mbs(file_full_path) > 1024:
                    continue

                counter += 1
                yield CollectRequest(
                    file_full_path=file_full_path,
                    my_proc_filters=deepcopy(proc_filters_template),
                )

            else:
                print(("UNKNOWN FILE - {}".format(file_full_path)))

    print(("Yielder done - counter={}".format(counter)))
Esempio n. 12
0
    async def send_file(self, destination, fp, *, filename=None, content=None, tts=False, embed=None):
        """
        discord.py's send_file with embed support
        """

        channel_id, guild_id = await self.bot._resolve_destination(destination)

        try:
            with open(fp, 'rb') as f:
                buffer = io.BytesIO(f.read())
                if filename is None:
                    _, filename = path_split(fp)
        except TypeError:
            buffer = fp

        content = str(content) if content is not None else None

        if embed is not None:
            embed = embed.to_dict()

        data = await self.bot.http.send_file(channel_id, buffer, guild_id=guild_id,
                                             filename=filename, content=content, tts=tts, embed=embed)
        channel = self.bot.get_channel(data.get('channel_id'))
        message = self.bot.connection._create_message(channel=channel, **data)
        return message
Esempio n. 13
0
def get_timestamp_from_filename(filename):
    filename_part = path_split(filename)[1] # [1] gets tail
    namepart, extension = splitext(filename_part)
    try:
        return int(namepart)
    except ValueError:
        return 0
Esempio n. 14
0
def ccs2ccsflat(flt_ctxt, in_obj):
    self = flt_ctxt.ctxt_wrapped

    ccsf_dirname, ccsf_basename = path_split(CCS_FLATTEN)
    # order of priority when searching for the binary (descending order):
    # - same as the root of the package (debugging purposes)
    # - directories-part as per specification in setup.cfg
    # - PATH env variable (if defined)
    ccs_flatten = which(ccsf_basename, dirname_x(__file__, 2), ccsf_dirname, '')
    if not ccs_flatten:
        raise FilterError(self, "ccs_flatten binary seems unavailable")

    # XXX currently ccs_flatten does not handle stdin (tempfile.mkstemp?)
    # XXX conversion is not idempotent, should prevent using ccs-flat as input
    #     (specifically, explicit ordering will get borken in subsequent round)
    in_file = in_obj('file')
    if not isinstance(in_file, basestring):
        # convert '-'/'@0' already converted to fileobj back to '-'
        if in_file is not stdin:
            raise RuntimeError("Only stdin ('-') supported")
        in_file = '-'
    command = [ccs_flatten, in_file]
    log.info("running `{0}'".format(' '.join(command)))
    try:
        proc = OneoffWrappedStdinPopen(command, stdout=PIPE, stderr=PIPE)
    except OSError:
        raise FilterError(self, "error running ccs_flatten binary")
    out, err = proc.communicate()
    if proc.returncode != 0 or out == '' and err != '':
        raise FilterError(self, "ccs_flatten {0}\n\t{1}",
                          ("exit code: {0}" if proc.returncode > 0 else
                           "killing signal: {0}").format(abs(proc.returncode)),
                          err)
    return ('bytestring', out)
Esempio n. 15
0
 def from_data_file(cls, datafile):
     """
     @type datafile: kla.analyzer.KLARawDataFile
     """
     save_name = path_split(datafile.filename)[1]
     save_name = save_name.replace(save_name[save_name.rfind("."):], ".xlsx")
     return cls(datafile.filename, save_name, None, datafile.metadata)
Esempio n. 16
0
    def new(cls, path, ckan_version, port=None):
        """
        Return a Environment object with settings for a new project.
        No directories or containers are created by this call.

        :params path: location for new project directory, may be relative
        :params ckan_version: release of CKAN to install
        :params port: preferred port for local instance

        Raises DatcatsError if directories or project with same
        name already exits.
        """
        workdir, name = path_split(abspath(expanduser(path)))

        if not valid_name(name):
            raise DatacatsError(
                'Please choose an environment name starting'
                ' with a letter and including only lowercase letters'
                ' and digits')
        if not isdir(workdir):
            raise DatacatsError('Parent directory for environment'
                                ' does not exist')

        datadir = expanduser('~/.datacats/' + name)
        target = workdir + '/' + name

        if isdir(datadir):
            raise DatacatsError(
                'Environment data directory {0} already exists', (datadir, ))
        if isdir(target):
            raise DatacatsError('Environment directory already exists')

        environment = cls(name, target, datadir, ckan_version, port)
        environment._generate_passwords()
        return environment
Esempio n. 17
0
def read_summary(folder_path):
    """Сканирует указанную папку"""
    if not exists(folder_path) or not isdir(
            folder_path):  # если целевой папки не существует
        raise FileNotFoundError(
            f'Folder {folder_path} not found')  # поднимаем ошибку
    result = {}  # стартуем словарь результатов
    for path, _, files in walk(
            folder_path):  # прогулка по подпапкам запрошенной папки
        for file in files:  # прогулка по файлам подпапки
            file_size_to = int('1' + '0' * len(str(getsize(join(path, file))))
                               )  # считаем количество нулей для ключа
            file_type = file.split('.')[-1]  # считываем расширение
            # если расширения нет в списке ключа, заодно формируем ключ если его нет
            if not (file_type in result.setdefault(file_size_to,
                                                   ([0], []))[1]):
                result[file_size_to][1].append(
                    file_type)  # добавляем расширение в список
            result[file_size_to][0][0] += 1  # увеличиваем счётчик ключа
    for key, res in result.items():  # проходим по словарю
        result[key] = (
            *res[0], res[1]
        )  # разворачиваем в нем списки со счётчиками для выполнения условия задачи
    with open(path_split(abspath(folder_path))[1] + '_summary.json',
              'w') as f:  # открываем целевой файл
        dump(result, f,
             ensure_ascii=False)  # сбрасываем в него дамп результата
    return result  # возвращаем результат на вывод
Esempio n. 18
0
def full_path_split(path):
    result = []
    fn = None
    while fn != '':
        path, fn = path_split(path)
        result.append(fn)
    result.reverse()
    return result
Esempio n. 19
0
    def insert(self, node):
        node.node_id = self.__get_new_node_id()
        dirname, basename = path_split(node.path)
        parent_node = self.node_from_path(dirname)
        query = 'INSERT INTO Nodes (backup_id,version,nodename,node_id,parent_node_id, inode_number) VALUES (?,?,?,?,?,?)'
        self.storage.db_execute(query, (self.backup_id, self.versions[0], basename, node.node_id, parent_node.node_id, node.inode_number))

        self.cached_nodes[node.path] = node
Esempio n. 20
0
def getfilenameexttup(_pathname: str) -> tuple:
    """Return the filename (as a tuple) without path

    >>> getfilenameexttup('/etc/mime.types')
    ('mime', '.types')
    >>> getfilenameexttup('/bin/sh')
    ('sh', '')
    """
    return path_splitext(path_split(_pathname)[1])
Esempio n. 21
0
    def dialog_populate(self, event=None):
        """
            Dynamically populates & updates the treeview, listbox,
            and keeps track of the full paths corresponding to each
            item in the listbox
        """
        if not self.treeview.focus():
            return

        self.treeview.column("#0", width=1000)

        existing_children = self.treeview.get_children(self.treeview.focus())
        [self.treeview.delete(child) for child in existing_children]

        self.list_box.delete(0, "end")
        self.selection_paths.clear()

        focus_item = self.treeview.focus()
        path = self.climb(focus_item)

        if self.show_hidden:
            children = self.list_dir(path, force=True)
        else:
            children = self.list_dir(path)

        for child in children:
            if isdir(path + child):

                self.treeview.insert(focus_item,
                                     index="end",
                                     text=child,
                                     image=self.folder_icon)

                if self.select_dirs:
                    self.list_box.insert("end", child)
                    self.selection_paths.append(path + child)

            elif isfile(path + child):

                if self.include_files:
                    self.treeview.insert(focus_item,
                                         index="end",
                                         text=child,
                                         image=self.file_icon)

                if self.select_files:
                    self.list_box.insert("end", child)
                    self.list_box.itemconfig("end", {"bg": "#EAEAEA"})
                    self.selection_paths.append(path + child)

        if isfile(normpath(path)):
            (head, tail) = path_split(normpath(path))
            head = sub("\\\\", "/", head)

            self.list_box.insert("end", tail)
            self.selection_paths.append(head + "/" + tail)
            self.list_box.itemconfig("end", {"bg": "#EAEAEA"})
Esempio n. 22
0
def getfilepath(_pathname: str) -> str:
    """Return the path without filename

    >>> getfilepath(r'/etc/mime.types')
    '/etc'
    >>> getfilepath('/bin/sh')
    '/bin'
    """
    return path_split(_pathname)[0]
Esempio n. 23
0
def getfilepathext(_pathname: str) -> tuple:
    """Return the path and filename+extension as a tuple

    >>> getfilepathext('/etc/mime.types')
    ('/etc', 'mime.types')
    >>> getfilepathext('/bin/sh')
    ('/bin', 'sh')
    """
    return path_split(_pathname)
Esempio n. 24
0
 def env_name(cls, prefix):
     if not prefix:
         return None
     if paths_equal(prefix, context.root_prefix):
         return 'base'
     maybe_envs_dir, maybe_name = path_split(prefix)
     for envs_dir in context.envs_dirs:
         if paths_equal(envs_dir, maybe_envs_dir):
             return maybe_name
     return prefix
Esempio n. 25
0
 def ignore_path_down(path):
     nonlocal ignore
     ret = set()
     while path not in ignore and path != self.path:
         ret.add(path)
         if pathExists(path):
             self.h_data[path] = int(file_info(path).st_mtime)
         path = path_split(path)[0]
     ignore |= ret
     return ret
Esempio n. 26
0
    def on_any_event(self, event):
        "event-listener checking if the affected files are the cert-files we're interested in"
        if event.is_directory:
            return

        filename = path_split(event.src_path)[-1]
        if isinstance(event, FileMovedEvent):
            filename = path_split(event.dest_path)[-1]

        if filename in ['cert.pem', 'key.pem']:
            # all cases except for FileModified need re-configure
            if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):
                ChangeHandler.reexec_config()
            # file modification needs only a nginx reload without config.py
            elif isinstance(event, FileModifiedEvent):
                ChangeHandler.reload_nginx()
        # cert files have been moved away, re-configure
        elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:
            ChangeHandler.reexec_config()
Esempio n. 27
0
def env_name(prefix):
    if not prefix:
        return None
    if paths_equal(prefix, context.root_prefix):
        return ROOT_ENV_NAME
    maybe_envs_dir, maybe_name = path_split(prefix)
    for envs_dir in context.envs_dirs:
        if paths_equal(envs_dir, maybe_envs_dir):
            return maybe_name
    return prefix
Esempio n. 28
0
def env_name(prefix):
    if not prefix:
        return None
    if paths_equal(prefix, context.root_prefix):
        return ROOT_ENV_NAME
    maybe_envs_dir, maybe_name = path_split(prefix)
    for envs_dir in context.envs_dirs:
        if paths_equal(envs_dir, maybe_envs_dir):
            return maybe_name
    return prefix
Esempio n. 29
0
 def __get_identifiers_from(self, arbitrary_path):
     mount_path = arbitrary_path
     manager = self.system_bus.get_object('org.freedesktop.Hal', '/org/freedesktop/Hal/Manager')
     while True:
         devices = manager.FindDeviceStringMatch('volume.mount_point', mount_path, dbus_interface='org.freedesktop.Hal.Manager')
         if len(devices) == 1:
             break
         (mount_path, tail) = path_split(mount_path) #@UnusedVariable
     [device_name] = devices
     relative_path = arbitrary_path.replace(mount_path, '', 1).lstrip('/')
     return device_name, relative_path
Esempio n. 30
0
def safe_json(obj: object, filepath: str, **kwargs):
    temp = StringIO()
    kwargs['indent'] = kwargs.get('indent', 4)
    json_dump(obj, temp, **kwargs)

    tail, _ = path_split(filepath)
    if not path_exists(tail):
        makedirs(tail)

    with open(filepath, 'w') as f:
        f.write(temp.getvalue())
Esempio n. 31
0
def getfilenameext(_pathname: str) -> str:
    """Return the filename with extension, but without path

    The path is removed from the given string

    >>> getfilenameext('/etc/mime.types')
    'mime.types'
    >>> getfilenameext('/bin/sh')
    'sh'
    """
    return path_split(_pathname)[1]
Esempio n. 32
0
def main(args):
    parser = argparse.ArgumentParser(
            description=dedent("""\
                Build RPM using mock

                If mock is not installed locally,
                Vagrant will be used to build and
                configure a mock build VM

                Requirements:
                  mock (on an EL7 host, from epel-release)
                or
                  vagrant, rsync
            """),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            )
    parser.add_argument(
            'spec_file', type=argparse.FileType('r'),
            help="which RPM spec file to build",
            )

    args = parser.parse_args(args)

    spec_file = abspath(args.spec_file.name)
    packaging_dir, spec_file_name = path_split(spec_file)
    # We assume that the spec always lives one dir level below the source
    source = dirname(packaging_dir)

    # Get build options
    rpmbuild_defines = get_rpmbuild_defines()
    logger.info('defines %s', rpmbuild_defines)
    rendered_spec_file = render_spec_file(spec_file, rpmbuild_defines)

    # Fetch any defined Sources (this does not include the repo containing the
    # spec file itself, the local copy is to be used.
    chdir(source)
    sources = get_sources(rendered_spec_file)
    sources += get_dependency_urls(rendered_spec_file)
    for url in sources:
        logger.info('checking %s', url)
        download_if_newer(url)

    chdir(SCRIPT_DIR)

    if has_cmd('mock'):
        # run locally
        rpm = build(source, rendered_spec_file)
        shutil.copy(rpm, SCRIPT_DIR)
    elif has_cmd('vagrant'):
        # install and run mock in a vagrant CentOS 7 box
        build_in_vagrant(source, spec_file_name)

    else:
        raise RuntimeError('need either mock or vagrant to build')
Esempio n. 33
0
def getfilename(_pathname: str) -> str:
    """Return the filename without path or extension

    The path and extension are removed from the given string

    >>> getfilename('/etc/mime.types')
    'mime'
    >>> getfilename('/bin/sh')
    'sh'
    """
    return path_splitext(path_split(_pathname)[1])[0]
Esempio n. 34
0
def _get_cache_name(filename):
    """
    @param filename: filename to get cache name for
    @type filename: str
    @return: the name of the corresponding pickle cache
    @rtype: str
    """

    _, tail = path_split(filename)
    name, _ = path_splitext(tail)
    cache = '\\'.join((pickle_cache, name + '.pickle'))
    return cache
Esempio n. 35
0
def _split_path(path):
    """
    A wrapper around the normal split function that ignores any trailing /.

    :return: A tuple of the form (dirname, last) where last is the last element
             in the path.
    """
    # Get around a quirk in path_split where a / at the end will make the
    # dirname (split[0]) the entire path
    path = path[:-1] if path[-1] == '/' else path
    split = path_split(path)
    return split
Esempio n. 36
0
def load_img(db, directory, fetch_taxonomy=False, taxid=None):

    dir, bas = path_split(directory)
    if not bas:
        dir, bas = path_split(dir)

    fasta_file = path_join(directory, bas + '.fna')
    gff_file = path_join(directory, bas + '.gff')
    kegg_file = path_join(directory, bas + '.ko.tab.txt')
    xref_file = path_join(directory, bas + '.xref.tab.txt')

    gff_file = fix_img_gff_errors(gff_file)

    xref_dict = None
    if isfile(kegg_file):
        xref_dict = add_gene_dbxref(xref_file, kegg_file)

    with open(fasta_file) as seq_handle:
        seq_dict = SeqIO.to_dict(SeqIO.parse(seq_handle, "fasta"))

    saved = []
    for rec in GFF.parse(gff_file, seq_dict):
        for i in range(len(rec.features)):
            feat = rec.features[i]
            try:
                dbxrefs = xref_dict[feat.qualifiers['ID'][0]]
                try:
                    rec.features[i].qualifiers['db_xref'].extend(dbxrefs)
                except:
                    rec.features[i].qualifiers['db_xref'] = dbxrefs
            except KeyError:
                # this gene has no xrefs
                pass

            #print rec.features[i]

        saved.append(add_sequence_dbxref(rec, taxid, bas))

    db.load(saved, fetch_NCBI_taxonomy=fetch_taxonomy)
Esempio n. 37
0
def find_path_obj(user, old_path):
    path = []
    while len(old_path) > 1:
        old_path, tmp = path_split(old_path)
        path.append(tmp)

    del_fold = Fold.objects.filter(user=user, name=path.pop())
    print(path)
    while len(path):
        del_fold = del_fold[0]
        del_fold = del_fold.fold_set.filter(user=user, name=path.pop())

    return del_fold
Esempio n. 38
0
    def on_any_event(self, event):
        "event-listener checking if the affected files are the cert-files we're interested in"
        if event.is_directory:
            return

        filename = path_split(event.src_path)[-1]
        if isinstance(event, FileMovedEvent):
            filename = path_split(event.dest_path)[-1]

        if filename in ['cert.pem', 'key.pem']:
            # all cases except for FileModified need re-configure
            if isinstance(
                    event,
                (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):
                ChangeHandler.reexec_config()
            # file modification needs only a nginx reload without config.py
            elif isinstance(event, FileModifiedEvent):
                ChangeHandler.reload_nginx()
        # cert files have been moved away, re-configure
        elif isinstance(event, FileMovedEvent) and path_split(
                event.src_path)[-1] in ['cert.pem', 'key.pem']:
            ChangeHandler.reexec_config()
def load_img(db, directory, fetch_taxonomy=False, taxid=None):

    dir, bas = path_split(directory)
    if not bas:
        dir, bas = path_split(dir)

    fasta_file = path_join(directory, bas + '.fna')
    gff_file = path_join(directory, bas + '.gff')
    kegg_file = path_join(directory, bas + '.ko.tab.txt')
    xref_file = path_join(directory, bas + '.xref.tab.txt')

    gff_file = fix_img_gff_errors(gff_file)

    xref_dict = None
    if isfile(kegg_file):
        xref_dict = add_gene_dbxref(xref_file, kegg_file)

    with open(fasta_file) as seq_handle:
        seq_dict = SeqIO.to_dict(SeqIO.parse(seq_handle, "fasta"))

    saved = []
    for rec in GFF.parse(gff_file, seq_dict ):
        for i in range(len(rec.features)):
            feat = rec.features[i]
            try:
                dbxrefs = xref_dict[feat.qualifiers['ID'][0]]
                try:
                    rec.features[i].qualifiers['db_xref'].extend(dbxrefs)
                except:
                    rec.features[i].qualifiers['db_xref'] = dbxrefs
            except KeyError:
                # this gene has no xrefs
                pass

            #print rec.features[i]

        saved.append(add_sequence_dbxref(rec, taxid, bas))

    db.load(saved, fetch_NCBI_taxonomy=fetch_taxonomy)
Esempio n. 40
0
def main():
    global CEF_VER

    from argparse import ArgumentParser
    from os.path import isdir, split as path_split, splitext
    from glob import glob

    parser = ArgumentParser()
    parser.add_argument('path', metavar='PATH')
    parser.add_argument('-i', '--inplace', dest='inplace', action='store_true')
    parser.add_argument('-c',
                        '--cef-ver',
                        dest='cef_ver',
                        choices=['1', '3'],
                        default='0')
    args = parser.parse_args()

    if args.cef_ver == '0':
        if 'cef1' in args.path:
            CEF_VER = '1'
        elif 'cef3' in args.path:
            CEF_VER = '3'
        else:
            return parser.error('specify cef version')
    else:
        CEF_VER = args.cef_ver

    for path in glob(args.path):
        if isdir(path):
            print 'Skipping directory {}'.format(path)
            continue

        print 'Current file: {}'.format(path)
        with open(path) as f:
            new_header = replace_all(f.read())

        if not args.inplace:
            path += '_new'

        pre = ''
        if not new_header.startswith('module'):
            head, fname = path_split(path)
            mname = splitext(fname)[0].lstrip('cef_').rstrip('_capi')
            inc = '{}.{}'.format('internal',
                                 mname) if 'internal' in path else mname
            pre = 'module deimos.cef{}.{};\n\n'.format(CEF_VER, inc)

        with open(path, 'w') as f:
            f.write(pre)
            f.write(new_header)
Esempio n. 41
0
File: list.py Progetto: lpirl/yana
    def invoke_on_note(self, args, note):
        self.listed_paths.append(note.abspath)

        print_colored("%u " % len(self.listed_paths), True)
        pathname, filename = path_split(note.path)
        print_default("%s%s" % (pathname, pathsep))
        print_highlighted("%s%s" % (filename, linesep))

        if args.tags:
            tags = note.tags
            if tags:
                print_default("\n")
                for tag in tags:
                    print_colored_2("\t#%s\n" % tag)
                print_default("\n")
Esempio n. 42
0
File: list.py Progetto: lpirl/yana
    def invoke_on_note(self, args, note):
        self.listed_paths.append(note.abspath)

        print_colored("%u " % len(self.listed_paths), True)
        pathname, filename = path_split(note.path)
        print_default("%s%s" % (pathname, pathsep))
        print_highlighted("%s%s" % (filename, linesep))

        if args.tags:
            tags = note.tags
            if tags:
                print_default("\n")
                for tag in tags:
                    print_colored_2("\t#%s\n" % tag)
                print_default("\n")
Esempio n. 43
0
def index(request):
    if request.is_ajax():  # show note list
        if 'path' in request.GET.keys():
            get = request.GET['path']
            local_path = request.session.get('path')
            folds = []
            q_list = []

            while len(get) > 1:  # 可以用字符串拆分处理
                get, fold = path_split(get)
                folds.insert(0, fold)

            for path in folds:
                local_path = local_path[path]

            notes = local_path["YmFzZQ=="]
            for note in notes:
                q_list.append(Q(**{"id": note}))
            notes = Note.objects.filter(reduce(operator.or_, q_list))

            return render(request, "note_list.html", {"notes": notes})
        elif 'note' in request.GET.keys():

            content = Note.objects.get(id=request.GET['note'])
            files = File.objects.filter(note__id=request.GET['note'])

            return render(request, "show_content.html", {"content": content, "files": files})

    else:
        paths = []
        folds = {}
        fold_obj = Fold.objects.filter(user=request.user)

        for fold in fold_obj:
            notes = Note.objects.filter(fold__user=request.user, fold=fold)

            path = get_path(fold)
            tmp = folds
            while path:
                path_name = path.pop()
                if path_name not in tmp.keys():
                    tmp.update({path_name: {"YmFzZQ==": []}})
                    for note in notes:
                        tmp[path_name]["YmFzZQ=="].append(note.id)
                tmp = tmp[path_name]

        request.session['path'] = folds
        return render(request, 'index.html', {"folds": folds})
Esempio n. 44
0
    def return_execution_error(self, frame=1):
        """ Return the current function being executed.
        @param frame: the integer frame in stacktrace to use (e.g., 0 would indicate this function).
        @return
            - function: the function being executed, as well as file name and line being read.
        """

        result = {'error': None}

        # parse info from execution
        exc_type, exc_obj, exc_tb = exc_info()
        fname = path_split(exc_tb.tb_frame.f_code.co_filename)[frame]
        result['error'] = ("%s, %s, %s" % (str(exc_type), str(fname), str(exc_tb.tb_lineno)))

        # return
        return result
Esempio n. 45
0
def import_py_vars(pyfile):
    """
    @param pyfile: pyfile to import
    @type pyfile: str
    @return: dict
    @rtype: dict[str, RecipeVariable]
    """
    from os.path import split as path_split, splitext as path_splitext
    from sys import path as sys_path
    from importlib import import_module

    py_vars_dir, py_vars_name = path_split(pyfile)
    py_vars_name, ext = path_splitext(py_vars_name)
    sys_path.append(py_vars_dir)
    var_module = import_module(py_vars_name)
    return {k : v for k, v in var_module.__dict__.items() if not k.startswith("_")}
Esempio n. 46
0
def main(args):
    if not isdir(args['input_dir']):
        logging.error("crawldir {} not a dir! Quiting.".format(
            args['crawl-dir']))
        exit(-1)

    make_sure_dir_exists(path_split(args['output_file'])[0])
    with open(args['output_file'], "w") as out_file:
        imported_libs_counter = Counter()
        main_proc_filters = deepcopy(proc_filters_template)
        step = args['step']
        items_to_yield = list(yielder(args['input_dir']))
        current = 0
        proc_counter = 0

        while current < len(items_to_yield):
            with closing(Pool(args['num_cores'])) as pool, tqdm(
                    total=min(len(items_to_yield) - current, step)) as pbar:
                for my_proc_filters, my_imported_lib_counter, out_gnn_jsons in pool.imap_unordered(
                        graceful_get_records,
                        islice(items_to_yield, current, current + step),
                        chunksize=args['chunksize']):

                    imported_libs_counter += my_imported_lib_counter
                    for out_rep in out_gnn_jsons:
                        out_file.write("{}\n".format(out_rep))
                        proc_counter += 1

                    for index in range(0, len(proc_filters_template)):
                        main_proc_filters[index][1] += my_proc_filters[index][
                            1]

                    pbar.update()
                    del my_proc_filters

                print("before close")
                pool.close()
                print("before join")
                pool.join()

            current += step
            print(("Step done - current={}".format(current)))

    print("Records done")
    print(main_proc_filters)
    print("#Procs = {}".format(proc_counter))
    print("Output file written")
Esempio n. 47
0
    def _commit_log(self):
        fpth, mode = self._get_log_name()

        dirname = path_split(fpth)[0]
        try:
            makedirs(dirname)
        except FileExistsError:
            pass

        # just in case the log is really big, avoid derping
        # the whole thing into memory at once.
        with open(fpth, mode) as f:
            self._logbuf.seek(0, 0)  # beginning of file
            for line in self._logbuf:
                f.write(line)

        del self._logbuf
Esempio n. 48
0
    def process_csv(self, file, chart_name="KLA"):
        """
        Analyzing data is ugly. Analyze 'file', where 'file' is a csv file
         corresponding to a batch data report with KLA data.
        """
        print("Opening new worksheet")
        xl, wb, ws, cells = xlObjs(file, visible=False)
        with HiddenXl(xl, True):
            # XXX what if cell not found?
            do_cell = cells.Find(What="DOPV(%)", After=cells(1, 1), SearchOrder=xlByRows)
            xcol = do_cell.Column + 1
            end_row = do_cell.End(xlDown).Row
            print("Performing data analysis")
            self._insert_time_col(ws, cells, xcol)
            self._insert_ln_col(ws, cells, xcol + 2)

            print("Creating data plot")

            # XXX possible in one call?
            ws.Columns(xcol + 3).Insert(Shift=xlToRight)
            ws.Columns(xcol + 3).Insert(Shift=xlToRight)

            ln_x, ln_y, lin_x, lin_y = _MakeNamedRanges(wb, ws, cells, 2, end_row, xcol - 1).get_ranges()

            # ln v time for specific chart
            chart = CreateChart(ws, xlXYScatter)
            CreateDataSeries(chart, ln_x, ln_y)
            FormatChart(chart, None, chart_name + "-LN(100-DOPV)", "Time(hour)", "-LN(DO PV (%))", True, False)

            # do v time
            chart2 = CreateChart(ws, xlXYScatter)
            CreateDataSeries(chart2, lin_x, lin_y)
            FormatChart(chart2, None, chart_name + "DO PV", "Time(hour)", "DO (%)", True, False)

            # uncomment to move to move chart to new sheet
            # xlLocationAsNewSheet = 1
            # chart.Location(1)

            save_name = file.replace(file[file.rfind("."):], '.xlsx')

            # uncomment to save in raw data  folder
            # wb.SaveAs(save_name, AddToMru=False)

            wb.SaveAs(self._path + path_split(save_name)[1], AddToMru=False, FileFormat=xlOpenXMLWorkbook)

        return save_name
Esempio n. 49
0
def main():
    global CEF_VER
    
    from argparse import ArgumentParser
    from os.path import isdir, split as path_split, splitext
    from glob import glob

    parser = ArgumentParser()
    parser.add_argument('path', metavar='PATH')
    parser.add_argument('-i', '--inplace', dest='inplace', action='store_true')
    parser.add_argument('-c', '--cef-ver', dest='cef_ver', choices=['1', '3'], default='0')
    args = parser.parse_args()

    if args.cef_ver == '0':
        if 'cef1' in args.path:
            CEF_VER = '1'
        elif 'cef3' in args.path:
            CEF_VER = '3'
        else:
            return parser.error('specify cef version')
    else:
        CEF_VER = args.cef_ver

    for path in glob(args.path):
        if isdir(path):
            print 'Skipping directory {}'.format(path)
            continue

        print 'Current file: {}'.format(path)
        with open(path) as f:
            new_header = replace_all(f.read())

        if not args.inplace:
            path += '_new'

        pre = ''
        if not new_header.startswith('module'):
            head, fname = path_split(path)
            mname = splitext(fname)[0].lstrip('cef_').rstrip('_capi')
            inc = '{}.{}'.format('internal', mname) if 'internal' in path else mname
            pre = 'module deimos.cef{}.{};\n\n'.format(CEF_VER, inc)

        with open(path, 'w') as f:
            f.write(pre)
            f.write(new_header)
Esempio n. 50
0
def exec_code(path):
    print("Handling \"{}\"...".format(path))
    folder, filename = path_split(path)

    cur_path = getcwd()
    chdir(folder)

    with open(filename, 'r', encoding="utf-8") as file:
        code = file.read().strip()

    stdout = StringIO()

    repeats = int(MAX_EXEC_TIME / count_time(code, stdout=stdout))

    exec_time = count_time(code, repeats=clamp(repeats, *REPEATS_CLAMP))

    chdir(cur_path)
    return code, stdout.getvalue().strip(), format_time(exec_time)
Esempio n. 51
0
    def on_moved(self, event):
        # Screenshots are moved in by the operating system
        # using a temporary name of the form ".NAME-wwww".
        path_parts = path_split(event.src_path)
        filename = path_parts[-1]
        m = re.search('^\.(.+)-\w{4}$', filename)

        # If the current file matches this naming.
        if m:
            # Get the path of the screenshot-to-be.
            path = path_join(path_parts[0], m.group(1))
            # Wait for it to finish moving (with time out).
            for i in range(1000):
                time.sleep(1)
                if isfile(path):
                    # Process the screenshot.
                    self.process(path)
                    return
Esempio n. 52
0
def read_compiler_logs(tar, root_dir, tar_files):
    res = {
        'compiler': {
            'dyninst': {
                'c': {
                    'path': '',
                    'name': 'Unknown',
                    'version': ''
                },
                'cxx': {
                    'path': '',
                    'name': 'Unknown',
                    'version': ''
                }
            },
            'testsuite': {
                'c': {
                    'path': '',
                    'name': 'Unknown',
                    'version': ''
                },
                'cxx': {
                    'path': '',
                    'name': 'Unknown',
                    'version': ''
                }
            }
        }
    }
    for d in ('dyninst', 'testsuite'):
        logfile = "{0:s}/{1:s}/build/compilers.conf".format(root_dir, d)
        if logfile in tar_files:
            compiler_log = tar.extractfile(logfile)
            compiler_results = read_properties(
                TextIOWrapper(compiler_log, encoding='utf-8'))
            for lang in ('c', 'cxx'):
                path = compiler_results['{0:s}_path'.format(lang)]
                (dir_name, base) = path_split(path)
                res['compiler'][d][lang]['path'] = dir_name
                res['compiler'][d][lang]['name'] = base
                res['compiler'][d][lang]['version'] = compiler_results[
                    '{0:s}_version'.format(lang)]
    return res
    def __init__(self):
        super().__init__()

        self.setWindowTitle(path_split(__file__)[1])

        layout = QVBoxLayout()

        self.text_edit_input = QPlainTextEdit()
        self.text_edit_output = QPlainTextEdit()

        self.label_error = QLabel()
        self.label_error.setStyleSheet("QLabel { color : red; }")
        self.label_error.setTextInteractionFlags(Qt.TextSelectableByMouse)
        self.label_error.setSizePolicy(QSizePolicy.Expanding,
                                       QSizePolicy.Preferred)

        self.button_detail_error = QPushButton('...')
        self.button_detail_error.setFixedSize(20, 20)
        self.button_detail_error.setToolTip('Detail error')
        self.button_detail_error.hide()

        self.last_error_message = None
        self.last_detail_error_message = None

        self.button_detail_error.clicked.connect(
            self.show_detail_error_message)
        self.text_edit_input.textChanged.connect(self.input_text_changed)

        splitter = QSplitter()
        splitter.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
        splitter.addWidget(self.text_edit_input)
        splitter.addWidget(self.text_edit_output)

        layout.addWidget(splitter)

        layout_error = QHBoxLayout()
        layout_error.addWidget(self.label_error)
        layout_error.addWidget(self.button_detail_error)

        layout.addLayout(layout_error)

        self.setLayout(layout)
def _path_splitter(s, _d_match=re.compile(r'\.\d').match):
    """Split a string into its path components. Assumes a string is a path."""
    # If a PathLib Object, use it's functionality to perform the split.
    if has_pathlib and isinstance(s, PurePath):
        s = py23_str(s)
    path_parts = deque()
    p_appendleft = path_parts.appendleft
    # Continue splitting the path from the back until we have reached
    # '..' or '.', or until there is nothing left to split.
    path_location = s
    while path_location != os_curdir and path_location != os_pardir:
        parent_path = path_location
        path_location, child_path = path_split(parent_path)
        if path_location == parent_path:
            break
        p_appendleft(child_path)

    # This last append is the base path.
    # Only append if the string is non-empty.
    if path_location:
        p_appendleft(path_location)

    # Now, split off the file extensions using a similar method to above.
    # Continue splitting off file extensions until we reach a decimal number
    # or there are no more extensions.
    # We are not using built-in functionality of PathLib here because of
    # the recursive splitting up to a decimal.
    base = path_parts.pop()
    base_parts = deque()
    b_appendleft = base_parts.appendleft
    while True:
        front = base
        base, ext = path_splitext(front)
        if _d_match(ext) or not ext:
            # Reset base to before the split if the split is invalid.
            base = front
            break
        b_appendleft(ext)
    b_appendleft(base)

    # Return the split parent paths and then the split basename.
    return ichain(path_parts, base_parts)
Esempio n. 55
0
def safe_pickle(obj, filepath, **kwargs):
    """
    @param obj: Any pickleable object
    @type obj:  T
    @param filepath: filepath to save the pickle file
    @type filepath: str
    @param kwargs: dict of pickle kwargs
    @type kwargs: dict
    @return: None
    @rtype: None
    """
    temp = BytesIO()
    pickle_dump(obj, temp, **kwargs)

    tail, _ = path_split(filepath)
    if not path_exists(tail):
        makedirs(tail)

    with open(filepath, 'wb') as f:
        f.write(temp.getvalue())
Esempio n. 56
0
def write_conda_meta(info, dst_dir, final_urls_md5s):
    user_requested_specs = info.get('user_requested_specs', info['specs'])
    cmd = path_split(sys.argv[0])[-1]
    if len(sys.argv) > 1:
        cmd = "%s %s" % (cmd, " ".join(sys.argv[1:]))

    builder = [
        "==> %s <==" % time.strftime('%Y-%m-%d %H:%M:%S'),
        "# cmd: %s" % cmd,
    ]
    dists = tuple(Dist(url) for url, _ in final_urls_md5s)

    builder.extend("+%s" % dist.full_name for dist in dists)
    if user_requested_specs:
        update_specs = [str(MatchSpec(s)) for s in user_requested_specs]
        builder.append("# update specs: %s" % update_specs)
    builder.append("\n")

    if not isdir(join(dst_dir, 'conda-meta')):
        os.makedirs(join(dst_dir, 'conda-meta'))
    with open(join(dst_dir, 'conda-meta', 'history'), 'w') as fh:
        fh.write("\n".join(builder))
def s_path_join(path, *paths):
    """
    Wrapper for the os.path.join function, splits every path component to
    replace it with a system specific path separator. This is for consistent
    path separators (and also systems that don't use either '\' or '/')


    Parameter
    ----------------------------------------------------------------------------
    :params path, paths: string
        see os.path.join

    :return: string
    ----------------------------------------------------------------------------
        a joined path, see os.path.join

    >>> s_path_join('./z/d/', '../a/b/c/f')
    r'.\z\a\b\c\f'
    """
    p_splits = list(path_split(path))
    for r in map(path_split, paths):
        p_splits.extend(r)
    return path_join(*p_splits)