コード例 #1
0
ファイル: update.py プロジェクト: hatnote/pacetrack
    def save(self):
        """save to campaign_dir/data/YYYYMM/state_YYMMDD_HHMMSS.json
        and campaign_dir/data/YYYYMM/state_full_YYMMDD_HHMMSS.json"""
        if not self.goal_results or not self.article_results:
            raise RuntimeError(
                'only intended to be called after a full results population with from_api()'
            )
        save_timestamp = datetime.datetime.utcnow().isoformat()

        result_path = self.campaign.base_path + self.timestamp.strftime(
            STATE_PATH_TMPL)
        mkdir_p(os.path.split(result_path)[0])

        result_data = {
            'campaign_name': self.campaign.name,
            'timestamp': self.timestamp,
            'save_date': save_timestamp,
            'campaign_results': self.campaign_results,
            'goal_results': self.goal_results,
            'title_list': self.campaign.article_title_list
        }
        with atomic_save(result_path) as f:
            json.dump(result_data, f, indent=2, sort_keys=True, default=str)

        full_result_fn = self.timestamp.strftime(STATE_FULL_PATH_TMPL)
        full_result_path = self.campaign.base_path + full_result_fn
        result_data['article_results'] = [
            attr.asdict(a) for a in self.article_list
        ]
        with atomic_save(full_result_path) as f:
            gzf = gzip.GzipFile(filename=full_result_fn, fileobj=f)
            json.dump(result_data, gzf, default=str)
            gzf.close()

        return
コード例 #2
0
def update_index(pypier_path):
    new_index_bytes, new_readme_bytes = generate_index(pypier_path)
    index_path = pypier_path + '/packages/index.html'
    readme_path = pypier_path + '/packages/README.md'
    with fileutils.atomic_save(index_path) as f:
        f.write(new_index_bytes)
    with fileutils.atomic_save(readme_path) as f:
        f.write(new_readme_bytes)
    return
コード例 #3
0
def _main():
    start_time = time.time()
    with open('projects.yaml') as f:
        projects = yaml.load(f)['projects']

    entries = []

    for p in projects:
        print 'processing', p['name']
        info = dict(p)
        if info.get('skip'):
            continue

        info['url'] = info.get('url', info.get('gh_url'))

        if info.get('gh_url'):
            gh_info = get_gh_project_info(info['gh_url'])
            info.update(gh_info)

        info['is_zerover'] = info.get('is_zerover', not info.get('emeritus', False))

        entries.append(info)

    from pprint import pprint

    pprint(entries)

    res = {'projects': entries,
           'gen_date': datetime.datetime.utcnow().isoformat(),
           'gen_duration': time.time() - start_time}

    with atomic_save('projects.json') as f:
        f.write(json.dumps(res, indent=2, sort_keys=True, default=_json_default))

    return
コード例 #4
0
    def write_yaml(self, stat: OutputProduct, tmp_filenames: List[Path], yaml_filename=None, multiband=True):
        output_filenames = [self.output_filename_tmpname[tmp_filename]
                            for tmp_filename in tmp_filenames]

        uris = [output_filename.absolute().as_uri()
                for output_filename in output_filenames]

        def layer(index):
            if multiband:
                return index + 1
            return 1

        band_uris = {name: {'layer': layer(index), 'path': uris[index]}
                     for index, name in enumerate(stat.product.measurements)}

        datasets = self._find_source_datasets(stat, uri=None, band_uris=band_uris)

        if yaml_filename is None:
            yaml_filename = str(output_filenames[0].with_suffix('.yaml'))

        # Write to Yaml
        if len(datasets) == 1:  # I don't think there should ever be more than 1 dataset in here...
            _LOG.info('writing dataset yaml for %s to %s', stat, yaml_filename)
            with fileutils.atomic_save(yaml_filename) as yaml_dst:
                yaml_dst.write(datasets.values[0])
        else:
            _LOG.error('Unexpected more than 1 dataset %r being written at once, '
                       'investigate!', datasets)
コード例 #5
0
ファイル: scan.py プロジェクト: sixy6e/digitalearthau
def build_pathset(collection: Collection,
                  cache_path: Path = None,
                  log=_LOG) -> dawg.CompletionDAWG:
    """
    Build a combined set (in dawg form) of all dataset paths in the given index and filesystem.

    Optionally use the given cache directory to cache repeated builds.
    """
    locations_cache = cache_path.joinpath(query_name(
        collection.query), 'locations.dawg') if cache_path else None
    if locations_cache:
        fileutils.mkdir_p(str(locations_cache.parent))

    log = log.bind(collection_name=collection.name)
    if locations_cache and not cache_is_too_old(locations_cache):
        path_set = dawg.CompletionDAWG()
        log.debug("paths.trie.cache.load", file=locations_cache)
        path_set.load(str(locations_cache))
    else:
        log.info("paths.trie.build")
        path_set = dawg.CompletionDAWG(
            chain(collection.iter_index_uris(), collection.iter_fs_uris()))
        log.info("paths.trie.done")
        if locations_cache is not None:
            log.debug("paths.trie.cache.create", file=locations_cache)
            with fileutils.atomic_save(str(locations_cache)) as f:
                path_set.write(f)
    return path_set
コード例 #6
0
    def write_yaml(self, stat: OutputProduct, tmp_filename: Path):
        output_filename = self.output_filename_tmpname[tmp_filename]

        uri = output_filename.absolute().as_uri()

        def band_info(measurement_name):
            return {
                'layer':
                list(stat.product.measurements).index(measurement_name) + 1,
                'path': uri
            }

        band_uris = {
            name: band_info(name)
            for name in stat.product.measurements
        }

        datasets = self._find_source_datasets(stat,
                                              uri=uri,
                                              band_uris=band_uris)

        yaml_filename = str(output_filename.with_suffix('.yaml'))

        # Write to Yaml
        if len(
                datasets
        ) == 1:  # I don't think there should ever be more than 1 dataset in here...
            _LOG.info('writing dataset yaml for %s to %s', stat, yaml_filename)
            with fileutils.atomic_save(yaml_filename) as yaml_dst:
                yaml_dst.write(datasets.values[0])
        else:
            _LOG.error(
                'Unexpected more than 1 dataset %r being written at once, '
                'investigate!', datasets)
コード例 #7
0
ファイル: project.py プロジェクト: johnpneumann/MarkdownClerk
def generate_project_daily(location, template_file, settings_file):
    """Generate a new file for each day of the project.

    Args:
        location (:obj:`str`): The location for the project structure.
        template_file (:obj:`str`): The template file to use for the generation.
        settings_file (:obj:`str`): The settings file to use for the markdown.

    Returns:
        int: Status code of success or failure. Anything except 0 is a failure.

    """
    with io.open(settings_file) as fopen:
        data = yaml.safe_load(fopen.read())
    project_vars = data['vars']
    weeks = data['settings']['weeks']
    days = 7 if data['settings']['days'] > 7 else data['settings']['days']

    template_dir = os.path.dirname(template_file)
    jinja_env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True)
    template = jinja_env.get_template(os.path.basename(template_file))
    for week in range(1, weeks + 1):
        LOGGER.info('Generating week %s files', week)
        for day in range(1, days + 1):
            day_path = os.path.join(location, 'week{:02d}'.format(week), 'day{:02d}'.format(day))
            if _create_dir(day_path):
                return 1
            readme_path = os.path.join(day_path, 'README.md')
            try:
                with fileutils.atomic_save(readme_path) as fopen:
                    fopen.writelines(template.render(**project_vars).encode('utf-8'))
            except (OSError, IOError):
                LOGGER.exception('Failed to write data to %s', readme_path, exc_info=True)
                return 1
    return 0
コード例 #8
0
 def fin():
     mlog.debug('Closing database: {}'.format(db_file))
     with open(cached_db_file, 'r') as ff:
         mlog.debug('Reverting to cached database: {}'.format(cached_db_file))
         with atomic_save(db_file) as f:
             json.dump(json.load(ff), f)
     os.remove(cached_db_file)
コード例 #9
0
def render(plist, pdir):
    "generate the list markdown from the yaml listing"
    topic_map = plist.get_projects_by_type('topic')
    topic_toc_text = format_tag_toc(topic_map)
    projects_by_topic = format_all_categories(topic_map)

    plat_map = plist.get_projects_by_type('platform')
    plat_toc_text = format_tag_toc(plat_map)
    projects_by_plat = format_all_categories(plat_map)

    context = {
        'TOPIC_TOC': topic_toc_text,
        'TOPIC_TEXT': projects_by_topic,
        'PLATFORM_TOC': plat_toc_text,
        'PLATFORM_TEXT': projects_by_plat,
        'TOTAL_COUNT': len(plist.project_list)
    }

    templates_path = pdir + '/templates/'
    if not os.path.isdir(templates_path):
        raise APACLIError('expected "templates" directory at %r' %
                          templates_path)

    for filename in iter_find_files(templates_path, '*.tmpl.md'):
        tmpl_text = open(filename).read()
        target_filename = os.path.split(filename)[1].replace('.tmpl', '')
        output_text = tmpl_text.format(**context)
        with atomic_save(pdir + '/' + target_filename) as f:
            f.write(output_text.encode('utf8'))

    return
コード例 #10
0
def main():
    plist = ProjectList.from_path('projects.yaml')
    print([p for p in plist.project_list if not p.desc])
    topic_map = plist.get_projects_by_type('topic')
    topic_toc_text = format_tag_toc(topic_map)
    projects_by_topic = format_all_categories(topic_map)

    plat_map = plist.get_projects_by_type('platform')
    plat_toc_text = format_tag_toc(plat_map)
    projects_by_plat = format_all_categories(plat_map)

    context = {
        'TOPIC_TOC': topic_toc_text,
        'TOPIC_TEXT': projects_by_topic,
        'PLATFORM_TOC': plat_toc_text,
        'PLATFORM_TEXT': projects_by_plat,
        'TOTAL_COUNT': len(plist.project_list)
    }

    for filename in iter_find_files(TEMPLATES_PATH, '*.tmpl.md'):
        tmpl_text = open(filename).read()
        target_filename = os.path.split(filename)[1].replace('.tmpl', '')
        output_text = tmpl_text.format(**context)
        with atomic_save(target_filename) as f:
            f.write(output_text.encode('utf8'))

    return
コード例 #11
0
ファイル: update.py プロジェクト: hatnote/pacetrack
def render_home(ptcs):
    ctx = glom(ptcs, {'campaigns': [T.get_summary_ctx()]})
    index_html = ASHES_ENV.render('index.html', ctx)
    index_path = STATIC_PATH + '/index.html'
    with atomic_save(index_path) as f:
        f.write(index_html.encode('utf-8'))
    return
コード例 #12
0
ファイル: update.py プロジェクト: hatnote/pacetrack
    def update(self, force=False, _act=None):
        "does it all"
        final_update_log_path = STATIC_PATH + 'campaigns/%s/update.log' % self.id
        _act['name'] = self.name
        _act['id'] = self.id
        _act['log_path'] = final_update_log_path
        now = datetime.datetime.utcnow()
        with atomic_save(final_update_log_path) as f:
            cur_update_sink = build_stream_sink(f)
            old_sinks = tlog.sinks
            tlog.set_sinks(old_sinks + [cur_update_sink])
            try:
                self.load_article_list()
                self.load_latest_state()

                next_fetch = now if not self.latest_state else self.latest_state.timestamp + self.fetch_frequency
                if not force and next_fetch > now:
                    tlog.critical('skip_fetch').success(
                        '{cid} not out of date, skipping until next fetch at {next_fetch}. ',
                        cid=self.id,
                        next_fetch=next_fetch)
                    return

                self.record_state()  # defaults to now
                self.load_latest_state()
                self.prune_by_frequency()
                self.render_report()
                self.render_article_list()
            finally:
                tlog.set_sinks(old_sinks)
        return
コード例 #13
0
def normalize(plist, pfile):
    """normalize project and tag order, checking for duplicates
    and format divergences, overwrites the yaml listing"""
    plist.normalize()
    new_yaml = plist.to_yaml()
    with atomic_save(pfile) as f:
        f.write(new_yaml.encode('utf8'))
    return
コード例 #14
0
ファイル: conf.py プロジェクト: wurstfabrik/wurst-cli
    def save(self):
        if self._data is None:
            return

        with atomic_save(self._path) as outfp:
            self._data.prune()
            data = toml.dumps(self._data.to_dict()).encode("utf8")
            outfp.write(data)
コード例 #15
0
ファイル: cli.py プロジェクト: mcgyver5/apatite
def _pull_single_repo(proj, repo_dir, rm_cached=False):
    # TODO: turn rm_cached into a flag
    # TODO: shouldn't this be the callable submitted to the executor?
    vcs, url = proj.clone_info
    if url is None:
        print_err('project "%s" has unsupported vcs type for repo url: %r' %
                  (proj.name, proj.repo_url))
        return  # TODO
    target_dir = repo_dir + proj.name_slug + '/'
    cwd = repo_dir
    mode = 'clone'
    if os.path.exists(target_dir):
        if not rm_cached:
            mode = 'update'
            cwd = target_dir
        else:
            # sanity check we're in the right place
            if os.path.exists(target_dir + '../.apatite_repo_dir'):
                shutil.rmtree(target_dir)
            else:
                raise Exception('non-apatite path blocking apatite clone: %r' %
                                target_dir)

    try:
        cmd_tmpl = VCS_TMPLS[vcs][mode]
    except KeyError:
        raise Exception('unsupported operation %r with vcs %r' % (mode, vcs))
    cmd = format_list(cmd_tmpl, cmd=vcs, url=url, target_dir=target_dir)
    # avoid asking for github username and password, since closing stdin doesn't work

    cur_dt = datetime.datetime.utcnow()
    started = time.time()
    proc = subprocess.Popen(cmd,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE,
                            cwd=cwd,
                            env={'GIT_TERMINAL_PROMPT': '0'})
    proc.project_name = proj.name_slug
    proc.target_dir = target_dir
    CUR_PROCS.append(proc)
    stdout, stderr = proc.communicate()
    CUR_PROCS.remove(proc)
    proc_res = ProcessResult(
        returncode=proc.returncode,
        stdout=stdout.decode('utf8'),
        stderr=stderr,
        start_time=started,
        end_time=datetime.datetime.utcnow(
        ))  # Bug? duratiion, start_time - end_time TypeError
    if proc_res.returncode != 0:
        print_err('%r exited with code %r, stderr:' %
                  (proc.args, proc_res.returncode))
        print_err(proc_res.stderr)
    else:
        with atomic_save(target_dir + '/.apatite_last_pulled') as f:
            f.write(cur_dt.isoformat().partition('.')[0].encode('utf8'))

    return proc_res
コード例 #16
0
ファイル: update.py プロジェクト: hatnote/pacetrack
    def render_report(self):
        start_state = [{
            'name': k,
            'result': v
        } for k, v in self.start_state.goal_results.items()]
        start_state.sort(key=lambda g: g['name'])
        latest_state = [{
            'name': k,
            'result': v
        } for k, v in self.latest_state.goal_results.items()]
        latest_state.sort(key=lambda g: g['name'])
        combined = [{
            'start': s[0],
            'latest': s[1]
        } for s in izip_longest(start_state, latest_state)]
        # TODO: Also combine goals, so you can show info about targets, etc.

        ctx = {
            'id': self.id,
            'name': self.name,
            'lang': self.lang,
            'description': self.description,
            'contacts': self.contacts,
            'wikiproject_name': self.wikiproject_name,
            'campaign_start_date': self.campaign_start_date.isoformat(),
            'campaign_end_date': self.campaign_end_date.isoformat(),
            'date_created': self.date_created.isoformat(),
            'date_updated':
            datetime.datetime.utcnow().strftime(UPDATED_DT_FORMAT),
            'goals': self.goals,
            'article_count': len(self.article_title_list),
            'start_state_goal': start_state,
            'latest_state_goal': latest_state,
            'combined_state': combined
        }
        campaign_static_path = STATIC_PATH + 'campaigns/%s/' % self.id
        mkdir_p(campaign_static_path)
        report_html = ASHES_ENV.render('campaign.html', ctx)
        report_path = campaign_static_path + 'index.html'
        report_json_path = campaign_static_path + 'campaign.json'
        with atomic_save(report_path) as html_f, atomic_save(
                report_json_path) as json_f:
            html_f.write(report_html)
            json.dump(ctx, json_f, indent=2, sort_keys=True)
        return
コード例 #17
0
ファイル: cli.py プロジェクト: mcgyver5/apatite
def normalize(plist, pfile):
    """normalize project and tag order, checking for duplicates
    and format divergences, overwrites the yaml listing"""
    plist.normalize()
    new_yaml = plist.to_yaml()
    # say no to trailing whitespace
    new_yaml = '\n'.join([line.rstrip() for line in new_yaml.splitlines()])
    with atomic_save(pfile) as f:
        f.write(new_yaml.encode('utf8'))
    return
コード例 #18
0
ファイル: update.py プロジェクト: hatnote/pacetrack
 def render_article_list(self):
     all_results = self._get_all_results()
     for goal in self.goals:
         goal['slug'] = slugify(goal['name'])
     ctx = {
         'name':
         self.name,
         'lang':
         self.lang,
         'description':
         self.description,
         'contacts':
         self.contacts,
         'wikiproject_name':
         self.wikiproject_name,
         'campaign_start_date':
         self.campaign_start_date.isoformat(),
         'campaign_end_date':
         self.campaign_end_date.isoformat(),
         'date_created':
         self.date_created.isoformat(),
         'date_updated':
         datetime.datetime.utcnow().strftime(UPDATED_DT_FORMAT),
         'article_count':
         len(self.article_title_list),
         'all_results':
         all_results,
         'goals': [{
             'name': 'Article',
             'slug': 'title'
         }] + sorted(self.goals, key=lambda s: s['name'])
     }
     campaign_static_path = STATIC_PATH + 'campaigns/%s/' % self.id
     article_list_html = ASHES_ENV.render('articles.html', ctx)
     article_list_path = campaign_static_path + 'articles.html'
     article_list_json_path = campaign_static_path + 'articles.json'
     mkdir_p(os.path.split(article_list_path)[0])
     with atomic_save(article_list_path) as html_f, atomic_save(
             article_list_json_path) as json_f:
         html_f.write(article_list_html.encode('utf-8'))
         json.dump(ctx, json_f, indent=2, sort_keys=True)
     return
コード例 #19
0
def append_credentials(pgpass, dbcreds):
    """ Append credentials to pgpass file """
    try:
        with pgpass.open() as fin:
            data = fin.read()
    except IOError:
        data = ''

    with atomic_save(str(pgpass.absolute()), file_perms=0o600, text_mode=True) as fout:
        if data:
            fout.write(data)
            if not data.endswith('\n'):
                fout.write('\n')
        fout.write(':'.join(dbcreds) + '\n')
コード例 #20
0
def _main():
    start_time = time.time()
    with open(PROJ_PATH + '/projects.yaml') as f:
        projects = yaml.load(f)['projects']
    #projects = [p for p in projects if p['name'] == 'scikit-learn']
    #if not projects:
    #    return
    try:
        with open(PROJ_PATH + '/projects.json') as f:
            cur_data = json.load(f)
            cur_projects = cur_data['projects']
            cur_gen_date = isoparse(cur_data['gen_date'])
    except (IOError, KeyError):
        cur_projects = []
        cur_gen_date = None

    if cur_gen_date:
        fetch_outdated = (datetime.datetime.utcnow() -
                          cur_gen_date) > datetime.timedelta(seconds=3600)
    else:
        fetch_outdated = True
    cur_names = sorted([c['name'] for c in cur_projects])
    new_names = sorted([n['name'] for n in projects])

    tpr = os.getenv('TRAVIS_PULL_REQUEST')
    if tpr and tpr != 'false':
        print('Pull request detected. Skipping data update until merged.')
        return
    if fetch_outdated or cur_names != new_names or os.getenv(
            'ZV_DISABLE_CACHING'):
        entries = fetch_entries(projects)
    else:
        print('Current data already up to date, exiting.')
        return

    pprint(entries)

    res = {
        'projects': entries,
        'gen_date': datetime.datetime.utcnow().isoformat(),
        'gen_duration': time.time() - start_time
    }

    with atomic_save(PROJ_PATH + '/projects.json') as f:
        f.write(
            json.dumps(res, indent=2, sort_keys=True, default=_json_default))

    return
コード例 #21
0
def append_credentials(pgpass, dbcreds):
    """ Append credentials to pgpass file """
    try:
        with pgpass.open() as fin:
            data = fin.read()
    except IOError:
        data = ''

    # The permissions on .pgpass must disallow any access to world or group.
    # Hence, chmod 0600 ~/.pgpass. If the permissions are less strict than this, the file will be ignored.
    with atomic_save(str(pgpass.absolute()), file_perms=0o600, text_mode=True) as fout:
        if data:
            fout.write(data)
            if not data.endswith('\n'):
                fout.write('\n')

        fout.write(':'.join(dbcreds) + '\n')
        print('\nUpdated DEA Database Password in ~/.pgpass file.')
コード例 #22
0
    def write_yaml(self, stat: OutputProduct, tmp_filename: Path):
        output_filename = self.output_filename_tmpname[tmp_filename]

        datasets = self._find_source_datasets(stat,
                                              uri=output_filename.as_uri())

        yaml_filename = str(output_filename.with_suffix('.yaml'))

        # Write to Yaml
        if len(
                datasets
        ) == 1:  # I don't think there should ever be more than 1 dataset in here...
            _LOG.info('writing dataset yaml for %s to %s', stat, yaml_filename)
            with fileutils.atomic_save(yaml_filename) as yaml_dst:
                yaml_dst.write(datasets.values[0])
        else:
            _LOG.error(
                'Unexpected more than 1 dataset %r being written at once, '
                'investigate!', datasets)
コード例 #23
0
def create_maildir_directories(basedir, is_folder=False):
    os_makedirs(basedir, 0o700, exist_ok=True)
    new_path = None
    for subdir_name in ('tmp', 'cur', 'new'):
        subdir_path = os.path.join(basedir, subdir_name)
        os_makedirs(subdir_path, 0o700, exist_ok=True)
        if subdir_name == 'new':
            new_path = subdir_path

    # The maildir++ description [1] mentions a "maildirfolder" file for each
    # subfolder. Dovecot does not create such a file but doing so seems
    # harmless.
    # http://www.courier-mta.org/imap/README.maildirquota.html
    if is_folder:
        maildirfolder_path = os.path.join(basedir, 'maildirfolder')
        # never overwrite an existing "maildirfolder" file (just being overcautious)
        # in Python 3 we could also use "open(..., 'xb')" and catch FileExistsError
        with atomic_save(maildirfolder_path, overwrite=False) as fp:
            pass
    return new_path
コード例 #24
0
 def urlretrieve(self, url, dest, print_progress=True):
     resp = self._session.get(url, stream=True)
     total_size, total_chunks = 0, 0
     with fileutils.atomic_save(dest) as f:
         content_iter = resp.iter_content(1024)
         while 1:
             size_str = bytes2human(total_size, 1).rjust(7)
             msg = ('%s downloaded from %s\r' % (size_str, url))
             if print_progress and (total_chunks % 20) == 0:
                 sys.stdout.write(msg)
                 sys.stdout.flush()
             try:
                 chunk = next(content_iter)
             except StopIteration:
                 if print_progress:
                     sys.stdout.write('\n')
                 break
             total_size += len(chunk)
             total_chunks += 1
             f.write(chunk)
     return
コード例 #25
0
ファイル: fc_app.py プロジェクト: GeoscienceAustralia/fc
def dataset_to_geotif_yaml(dataset: xarray.Dataset,
                           odc_dataset: Dataset,
                           filename: Union[Path, str],
                           variable_params=None):
    """
    Write the dataset out as a set of geotifs with metadata in a yaml file.
    There will be one geotiff file per band.
    The band name is added into the file name.
    i.e ls8_fc.tif -> ls8_fc_BS.tif

    :param dataset:
    :param filename: Output filename
    :param variable_params: dict of variable_name: {param_name: param_value, [...]}
                            Used to get band names.

    """

    bands = variable_params.keys()
    abs_paths, _, yml = tif_filenames(filename, bands)

    Path(filename).parent.mkdir(parents=True, exist_ok=True)

    # Write out the yaml file
    with fileutils.atomic_save(str(yml)) as stream:
        yaml.safe_dump(odc_dataset.metadata_doc, stream, encoding='utf8')

    # Iterate over the bands
    for key, bandfile in abs_paths.items():
        slim_dataset = dataset[[key]]  # create a one band dataset
        attrs = slim_dataset[key].attrs.copy()  # To get nodata in
        del attrs['crs']  # It's  format is poor
        del attrs['units']  # It's  format is poor
        slim_dataset[key] = dataset.data_vars[key].astype('int16', copy=True)
        write_geotiff(bandfile,
                      slim_dataset.isel(time=0),
                      profile_override=attrs)
コード例 #26
0
 def write(self):  # TODO: need way to get contents.
     'write contents to file'
     contents = self.get_contents()
     with atomic_save(self.path) as file:
         file.write(contents)
     return
コード例 #27
0
ファイル: puzzle.py プロジェクト: mykter/lichess-puzzle-stats
def save(path, data):
    with atomic_save(path, text_mode=True) as f:
        json.dump(data, f)
コード例 #28
0
def pypier(args, reqs):
    '''
    only supports pure-python repos for now
    '''
    parser = argparse.ArgumentParser(prog='pypier')
    parser.add_argument('cmd',
                        choices=('config', 'publish', 'pip-index') + PIP_CMDS)
    cmd = parser.parse_args(args[1:2]).cmd
    cache = reqs.cache
    executor = reqs.executor
    site_config = reqs.site_config
    pypier_repo = site_config['pypier']['repo']
    pypier_repo_ro = site_config['pypier']['repo_ro']
    if cmd == 'config':
        print
        print 'PyPIER repos:'
        print '  ', pypier_repo_ro, '(fetch)'
        print '  ', pypier_repo, '(publish)'
    elif cmd == 'publish':
        parser.add_argument('--dry-run', action='store_true')
        arg_vals = parser.parse_args(args[1:])
        setup_dir = find_project_dir(os.getcwd(), 'setup.py')
        pypier_read_write = cache.workon_project_git('pypier', pypier_repo)
        executor.python('setup.py', 'sdist').redirect(cwd=setup_dir)
        # TODO manylinux wheels?  OSX wheels?
        version = executor.python('setup.py', version=None).batch()[0].strip()
        output = [
            fn for fn in os.listdir(setup_dir + '/dist/') if version in fn
        ]
        name = output[0].split('-', 1)[0]
        # typical artifact: foo-ver.tar.gz
        dst = pypier_read_write.path + '/packages/' + name + '/'
        fileutils.mkdir_p(dst)
        # TODO: instead of just looking for anything in the dist
        # directory, query setup for the version and check for that.
        for result in output:
            if os.path.exists(os.path.join(dst, result)):
                raise EnvironmentError(
                    "{} has already been published".format(result))
        for result in output:
            shutil.copy(setup_dir + '/dist/' + result, dst)
        with fileutils.atomic_save(os.path.join(dst, 'pkg_info.json')) as f:
            pkg_info = get_pkg_info(executor, setup_dir)
            pkg_info_json = json.dumps(pkg_info, indent=2, sort_keys=True)
            f.write(pkg_info_json + '\n')
        update_index(pypier_read_write.path)
        source_metadata = get_source_metadata(executor, setup_dir)
        commit_msg = 'PyPIER publish: {}\n\n{}\n'.format(
            ', '.join(output),
            json.dumps(source_metadata, indent=2, sort_keys=True))
        pypier_read_write.push(commit_msg, dry_run=arg_vals.dry_run)
    elif cmd == 'pip-index':
        pypier_read_only = cache.pull_project_git('pypier', pypier_repo_ro)
        link_path = pypier_read_only + '/packages/index.html'
        print link_path  # NOTE: this print command is the primary purpose
    elif cmd in PIP_CMDS:
        pypier_read_only = cache.pull_project_git('pypier', pypier_repo_ro)
        link_path = pypier_read_only + '/packages/index.html'
        #env = dict(os.environ)
        #env['PIP_FIND_LINKS'] = ' '.join(
        #    [link_path] + env.get('PIP_FIND_LINKS', '').split())
        # TODO: figure out clean way to extend env
        # TODO: remove ALL_PROXY='' once urllib3 + requests
        #       do a release and don't pre-emptively die
        #       on socks5h:// proxy
        executor.patch_env(PIP_FIND_LINKS=link_path,
                           ALL_PROXY='').command(['python', '-m', 'pip'] +
                                                 args[1:]).redirect(
                                                     stdout=sys.stdout,
                                                     stderr=sys.stderr)
    else:
        # argparse should catch this above
        raise ValueError('unrecognized sub-command %r' % cmd)
コード例 #29
0
ファイル: file_keys.py プロジェクト: darrida/pocket_protector
 def write(self):
     'write contents to file'
     contents = self.get_contents()
     with atomic_save(self.path) as file:
         file.write(contents.encode('utf8'))
     return
コード例 #30
0
ファイル: bethmetamod.py プロジェクト: harvimt/bethmetamod
async def main(loop):
	download_timer = Timer()
	extract_timer = Timer()
	mod_converge_timer = Timer()
	apply_converge_timer = Timer()
	purge_timer = Timer()
	preproc_timer = Timer()
	postproc_timer = Timer()
	load_order_timer = Timer()
	enable_plugins_timer = Timer()

	mod_list = [
		# Install outside of Data/
		FastExit(),
		FourGBPatch(),
		OBSE(),
		ENB(),
		ENBoost(),
		MoreHeap(),
		# OBSE Plugins
		OneTweak(),
		# OBSETester(),
		MenuQue(),
		ConScribe(),
		Pluggy(),
		NifSE(),
		# Performance
		Streamline(),
		OSR(),
		# Necessary Tweaks
		ATakeAllAlsoBooks(),
		DarnifiedUI(),
		DarnifiedUIConfigAddon(),
		HarvestFlora(),
		HarvestContainers(),
		# Textures
		QTP3R(),
		GraphicImprovementProject(),
		ZiraHorseCompilationModpack(),
		RingRetexture(),
		KafeisArmoredCirclets(),
		KoldornsSewerTextures2(),
		KoldornsCaveTextures2(),
		MEAT(),
		BomretTexturePackForShiveringIslesWithUSIP(),
		# Gameplay
		RealisticLeveling(),
		HUDStatusBars(),
		UV3(),
		# Install Last
		INITweaks(),
		ArchiveInvalidationInvalidated(),
	]
	converged_paths = {}
	path_mod_owner = {}  # which mod owns which path
	for path in recurse_files(Config.VANILLA_DIR):
		converged_paths[str(path).lower()] = Config.VANILLA_DIR / path

	log.info('downloading')
	with download_timer:
		for mod in mod_list:
			async with aiohttp.ClientSession(loop=loop) as session:
				await mod.download(session)

	if False:  # stop after download?
		log.info('stopping after download')
		return

	log.info('extracting')
	with extract_timer:
		for mod in mod_list:
			await mod.extract()

	if False:  # stop after extract?
		log.info('stopping after extract')
		return

	log.info('pre-processing')
	with preproc_timer:
		for mod in mod_list:
			await mod.preprocess()

	log.info('calulcating convergance for each mod')
	with mod_converge_timer:
		for mod in mod_list:
			log.info(f'converging {mod.mod_name}')
			for source_path, dest_path in mod.modify():
				if not isinstance(dest_path, Path):
					raise Exception(f'{dest_path} is not a Path!')
				elif dest_path.is_absolute():
					raise Exception(f'{dest_path} is not absolute')
				if not isinstance(source_path, Path):
					raise Exception(f'{source_path} is not a Path!')
				elif not source_path.is_absolute():
					raise Exception(f'{source_path} is not absolute')
				elif not source_path.exists():
					raise Exception(f'{source_path} does not exist, bad modify code')
				elif not source_path.is_file():
					raise Exception(f'{source_path} is not a regular file.')

				converged_paths[str(dest_path).lower()] = source_path
				path_mod_owner[str(dest_path).lower()] = mod.mod_name

	log.info('applying convergance')
	with apply_converge_timer:
		for dest_path, source_path in converged_paths.items():
			dest_path = Config.game.root_dir / dest_path
			if not dest_path.exists() or not samefile(str(dest_path), str(source_path)):
				if dest_path.exists():
					dest_path.unlink()  # FIXME move to purged dir?
				dest_path.parent.mkdir(exist_ok=True, parents=True)
				try:
					create_hardlink(str(source_path), str(dest_path))
				except FileNotFoundError:
					raise Exception(f'failed to hard link {source_path} to {dest_path} {source_path} (or {dest_path.parent}) not found')

	log.info('purging')
	with purge_timer:
		try:
			with Config.mod_ownership_path.open('rb') as f:
				old_path_mod_owner = json.load(f)
		except FileNotFoundError:
			old_path_mod_owner = {}
		purged_root = Config.PURGED_DIR / datetime.now().isoformat().replace(':', '')
		for path in recurse_files(Config.game.root_dir):

			if (
				str(path).lower() not in converged_paths and
				not path.suffix.lower() in {'.ini', '.cfg', '.json', '.log'} and
				#TODO don't purge xml files unless they're menu files
				not path.parts[0].lower() in {'obmm', 'mopy'}
			):
				if str(path).lower() in old_path_mod_owner:
					(Config.game.root_dir / path).unlink()
				else:
					purged_path = purged_root / path
					purged_path.parent.mkdir(exist_ok=True, parents=True)
					(Config.game.root_dir / path).rename(purged_path)

		log.info('purging empty directories')
		for d in recurse_dirs(Config.game.root_dir):
			if dir_is_empty(d):
				d.rmdir()

	log.info('postprocessing')
	with postproc_timer:
		for mod in mod_list:
			await mod.postprocess()
	log.info('Done Applying Changes')

	log.info('modifying load order')
	with load_order_timer:
		boss_uninstall_string = get_regkey('HKLM', r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\BOSS', 'UninstallString')
		boss_install_location = Path(shlex.split(boss_uninstall_string)[0]).parent
		boss_exe_path = boss_install_location / 'boss.exe'

		proc = await asyncio.create_subprocess_exec(
			str(boss_exe_path), '-s', '-g', Config.game.BOSS_NAME,
			cwd=str(boss_install_location),
			stderr=sys.stderr,
			stdout=sys.stdout,
		)
		await proc.wait()

	log.info('enabling all .esp and .esm files')
	with enable_plugins_timer:
		PLUGINS_HEADER = textwrap.dedent('''
		# This file is used to tell Oblivion which data files to load.
		# WRITE YOUR OWN PYTHON SCRIPT TO MODIFY THIS FILE (lol)
		# Please do not modify this file by hand.
		''').strip()

		with atomic_save(str(Config.game.app_data_path / 'plugins.txt')) as f:
			with io.TextIOWrapper(f, 'ascii') as ef:
				ef.write(PLUGINS_HEADER)
				ef.write('\n')
				for esm in Config.game.root_dir.glob('Data/*.esm'):
					ef.write(esm.name)
					ef.write('\n')
				for esp in Config.game.root_dir.glob('Data/*.esp'):
					ef.write(esp.name)
					ef.write('\n')

	log.info('saving which mod owns which file')
	with atomic_save(str(Config.mod_ownership_path)) as f:
		with io.TextIOWrapper(f, 'ascii') as ef:
			json.dump(path_mod_owner, ef)

	log.info(f'download_timer = {download_timer}')
	log.info(f'extract_timer = {extract_timer}')
	log.info(f'mod_converge_timer = {mod_converge_timer}')
	log.info(f'apply_converge_timer = {apply_converge_timer}')
	log.info(f'purge_timer = {purge_timer}')
	log.info(f'preproc_timer = {preproc_timer}')
	log.info(f'postproc_timer = {postproc_timer}')
	log.info(f'load_order_timer = {load_order_timer}')
	log.info(f'enable_plugins_timer = {enable_plugins_timer}')
コード例 #31
0
ファイル: cli.py プロジェクト: mcgyver5/apatite
def render(plist, pdir, pfile):
    "generate the list markdown from the yaml listing"
    normalize(pfile=pfile, plist=plist)
    topic_map = plist.get_projects_by_type('topic')
    topic_toc_text = format_tag_toc(topic_map)
    projects_by_topic = format_all_categories(topic_map)

    plat_map = plist.get_projects_by_type('platform')
    plat_toc_text = format_tag_toc(plat_map)
    projects_by_plat = format_all_categories(plat_map)

    context = {
        'TOPIC_TOC': topic_toc_text,
        'TOPIC_TEXT': projects_by_topic,
        'PLATFORM_TOC': plat_toc_text,
        'PLATFORM_TEXT': projects_by_plat,
        'TOTAL_COUNT': len(plist.project_list)
    }

    templates_path = pdir + '/templates/'
    if not os.path.isdir(templates_path):
        raise APACLIError('expected "templates" directory at %r' %
                          templates_path)

    for filename in iter_find_files(templates_path, '*.tmpl.md'):
        tmpl_text = open(filename).read()
        target_filename = os.path.split(filename)[1].replace('.tmpl', '')
        output_text = tmpl_text.format(**context)
        with atomic_save(pdir + '/' + target_filename) as f:
            f.write(output_text.encode('utf8'))

    feed_tmpl_path = templates_path + '/atom.xml'
    if os.path.exists(feed_tmpl_path):

        def _stderr_log_func(level, name, message):
            import sys
            sys.stderr.write('%s - %s - %s\n' % (level.upper(), name, message))
            sys.stderr.flush()

        ashes_env = AshesEnv([templates_path], log_func=_stderr_log_func)
        proj_dict_list = []
        for proj in plist.project_list:
            cur = proj.to_dict()
            cur['name_slug'] = proj.name_slug
            cur['date_added_utc'] = proj.date_added.isoformat() + 'Z'
            cur['urls'] = get_url_list(proj)
            proj_dict_list.append(cur)
        cur_dt = datetime.datetime.utcnow().replace(
            microsecond=0).isoformat() + 'Z'
        res = ashes_env.render(
            'atom.xml', {
                'projects':
                sorted(proj_dict_list,
                       key=lambda x: x['date_added'],
                       reverse=True),
                'last_generated_utc':
                cur_dt
            })
        with atomic_save(pdir + '/atom.xml') as f:
            f.write(res.encode('utf8'))

    return