コード例 #1
0
ファイル: freeze.py プロジェクト: cevmartinez/clld
def create_release_func(args,
                        tag=None,
                        name=None,
                        dataset=None):  # pragma: no cover
    """
    Create a release of a GitHub repository.

    .. seealso:: https://developer.github.com/v3/repos/releases/#create-a-release
    """
    if github3 is None:
        args.log.critical(
            "github3 is not installed. Can't access the GitHub API.")
        return

    token = os.environ.get('GITHUB_TOKEN')
    if token is None:
        args.log.critical('Environment variable GITHUB_TOKEN must be set.')
        return

    req = args.env['request']
    dataset = dataset or req.dataset
    today = date.today()
    tag = tag or 'v%s-%s' % (today.year, today.month)

    repo = github3.login(token=token).repository('clld', args.module.__name__)
    return repo.create_release(
        tag,
        name='%s %s' %
        (name or dataset.name, tag[1:] if tag.startswith('v') else tag),
        body='<p>%s</p>' %
        TxtCitation(None).render(dataset, req).encode('utf8'))
コード例 #2
0
ファイル: download.py プロジェクト: Woseseltops/clld
    def create(self, req, filename=None, verbose=True):
        p = self.abspath(req)
        if not p.dirname().exists():
            p.dirname().mkdir()
        tmp = path('%s.tmp' % p)

        if self.rdf:
            # we do not create archives with a readme for rdf downloads, because each
            # RDF entity points to the dataset and the void description of the dataset
            # covers all relevant metadata.
            with closing(GzipFile(tmp, 'w')) as fp:
                self.before(req, fp)
                for i, item in enumerate(
                        page_query(self.query(req), verbose=verbose)):
                    self.dump(req, fp, item, i)
                self.after(req, fp)
        else:
            with ZipFile(tmp, 'w', ZIP_DEFLATED) as zipfile:
                if not filename:
                    fp = StringIO()
                    self.before(req, fp)
                    for i, item in enumerate(
                            page_query(self.query(req), verbose=verbose)):
                        self.dump(req, fp, item, i)
                    self.after(req, fp)
                    fp.seek(0)
                    zipfile.writestr(self.name, fp.read())
                else:
                    zipfile.write(filename, self.name)
                zipfile.writestr(
                    'README.txt', """
{0} data download
{1}

Data of {0} is published under the following license:
{2}

It should be cited as

{3}
""".format(
                        req.dataset.name,
                        '=' * (len(req.dataset.name.encode('utf8')) +
                               len(' data download')), req.dataset.license,
                        TxtCitation(None).render(req.dataset,
                                                 req).encode('utf8')))
        if p.exists():
            p.remove()
        tmp.move(p)
コード例 #3
0
    def create(self, req, filename=None, verbose=True):
        p = self.abspath(req)
        if not p.parent.exists():  # pragma: no cover
            p.parent.mkdir()
        tmp = Path('%s.tmp' % p.as_posix())

        if self.rdf:
            # we do not create archives with a readme for rdf downloads, because each
            # RDF entity points to the dataset and the void description of the dataset
            # covers all relevant metadata.
            #
            # TODO: write test for the file name things!?
            #
            with closing(
                    GzipFile(filename=Path(tmp.stem).stem,
                             fileobj=tmp.open('wb'))) as fp:
                self.before(req, fp)
                for i, item in enumerate(
                        page_query(self.query(req), verbose=verbose)):
                    self.dump(req, fp, item, i)
                self.after(req, fp)
        else:
            with ZipFile(tmp.as_posix(), 'w', ZIP_DEFLATED) as zipfile:
                if not filename:
                    fp = self.get_stream()
                    self.before(req, fp)
                    for i, item in enumerate(
                            page_query(self.query(req), verbose=verbose)):
                        self.dump(req, fp, item, i)
                    self.after(req, fp)
                    zipfile.writestr(self.name, self.read_stream(fp))
                else:  # pragma: no cover
                    zipfile.write(filename, self.name)
                zipfile.writestr(
                    'README.txt',
                    README.format(
                        req.dataset.name,
                        '=' * (len(req.dataset.name) + len(' data download')),
                        req.dataset.license,
                        TxtCitation(None).render(req.dataset,
                                                 req)).encode('utf8'))
        if p.exists():  # pragma: no cover
            remove(p)
        move(tmp, p)
コード例 #4
0
ファイル: freeze.py プロジェクト: cevmartinez/clld
def freeze_readme(dataset, req):  # pragma: no cover
    return FREEZE_README.format(
        dataset.name,
        '=' * (len(dataset.name.encode('utf8')) + len(' data dump')),
        dataset.license,
        TxtCitation(None).render(dataset, req), dataset.domain, dataset.id)
コード例 #5
0
def format_readme(req, dataset):
    return README.format(
        dataset.name,
        '=' * (len(dataset.name) + len(' data download')),
        dataset.license,
        TxtCitation(None).render(dataset, req))
コード例 #6
0
    def create(self, req, filename=None, verbose=True):
        p = self.abspath(req)
        if not p.parent.exists():  # pragma: no cover
            p.parent.mkdir()
        tmp = Path('%s.tmp' % p)

        language_url_pattern = self.route_url_pattern(req, 'language')

        with ZipFile(tmp.as_posix(), 'w', ZIP_DEFLATED) as zipfile:
            tables = []
            for param in DBSession.query(Parameter).options(joinedload(Parameter.domain)):
                fname = '%s-%s.csv' % (req.dataset.id, param.id)
                zipfile.writestr(fname, self.get_values(param, language_url_pattern))
                tables.append({
                    '@type': 'Table',
                    'url': fname,
                    'notes': [
                        {
                            '@id': req.resource_url(param),
                            'dc:identifier': param.id,
                            'dc:title': param.name,
                            'dc:description': param.description or ''}] + [
                        {
                            '@type': 'DomainElement',
                            'name': de.name,
                            'description': de.description,
                            'numeric': de.number
                        } for de in param.domain
                    ],
                })

            md = CsvmJsonAdapter.csvm_basic_doc(req, tables=tables)
            md.update({
                '@type': 'TableGroup',
                'dc:language': list(self.get_languages(req, language_url_pattern)),
                'tableSchema': {
                    "columns": [
                        {
                            "name": "ID",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Language_ID",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Parameter_ID",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Contribution_ID",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Value",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Source",
                            "datatype": "string",
                        },
                        {
                            "name": "Comment",
                            "datatype": "string",
                        },
                    ],
                    "primaryKey": "ID",
                    'aboutUrl': self.route_url_pattern(req, 'value', '{ID}'),
                },
            })
            zipfile.writestr(
                '%s.csv-metadata.json' % req.dataset.id, json.dumps(md, indent=4))
            bib = Database([
                rec.bibtex() for rec in DBSession.query(Source).order_by(Source.name)])
            zipfile.writestr('%s.bib' % req.dataset.id, ('%s' % bib).encode('utf8'))
            zipfile.writestr(
                'README.txt',
                README.format(
                    req.dataset.name,
                    '=' * (
                        len(req.dataset.name)
                        + len(' data download')),
                    req.dataset.license,
                    TxtCitation(None).render(req.dataset, req)).encode('utf8'))
        if p.exists():  # pragma: no cover
            remove(p)
        move(tmp, p)