Exemple #1
0
 def _query_citing_records(self, show_duplicates=False, session=None):
     """Returns records which cites this one."""
     if not session:
         session = db.session
     index_ref = self._get_index_ref()
     if not index_ref:
         raise Exception("There is no index_ref for this object")
     citation_query = session.query(RecordMetadata).with_entities(
         RecordMetadata.id, RecordMetadata.json['control_number'])
     citation_filter = referenced_records(RecordMetadata.json).contains(
         [index_ref])
     filter_deleted_records = or_(
         not_(type_coerce(RecordMetadata.json,
                          JSONB).has_key('deleted')),  # noqa: W601
         not_(RecordMetadata.json['deleted'] == cast(True, JSONB)))
     only_literature_collection = type_coerce(
         RecordMetadata.json,
         JSONB)['_collections'].contains(['Literature'])
     citations = citation_query.filter(citation_filter,
                                       filter_deleted_records,
                                       only_literature_collection)
     if not show_duplicates:
         # It just hides duplicates, and still can show citations
         # which do not have proper PID in PID store
         # Duplicated data should be removed with the CLI command
         citations = citations.distinct(
             RecordMetadata.json['control_number'])
     return citations
Exemple #2
0
    def query(self, req):
        Family = sa.orm.aliased(Language, flat=True)
        Father = sa.orm.aliased(Language, flat=True)
        _Country = sa.orm.aliased(Country, name='_country')

        query = req.db.query(
                Languoid.id, Family.id.label('family_id'), Father.id.label('parent_id'),
                Languoid.name,
                Languoid.bookkeeping,
                sa.type_coerce(Languoid.level, sa.Text).label('level'),
                sa.type_coerce(Languoid.status, sa.Text).label('status'),
                Languoid.latitude, Languoid.longitude,
                sa.select([Identifier.name])
                    .where(LanguageIdentifier.identifier_pk == Identifier.pk)
                    .where(LanguageIdentifier.language_pk == Language.pk)
                    .where(Identifier.type == 'iso639-3')
                    .label('iso639P3code'),
                Languoid.description, Languoid.markup_description,
                Languoid.child_family_count, Languoid.child_language_count, Languoid.child_dialect_count,
                sa.select([sa.literal_column("string_agg(_country.id, ' ' ORDER BY _country.id)")])
                    .where(Languoidcountry.country_pk == _Country.pk)
                    .where(Languoidcountry.languoid_pk == Languoid.pk)
                    .label('country_ids'),
            ).select_from(Languoid).filter(Languoid.active)\
            .outerjoin(Family, Family.pk == Languoid.family_pk)\
            .outerjoin(Father, Father.pk == Languoid.father_pk)\
            .order_by(Languoid.id)
        return query
Exemple #3
0
def get_query_records_to_index(pid_types):
    """
    Return a query for retrieving all non deleted records by pid_type

    Args:
        pid_types(List[str]): a list of pid types

    Return:
        SQLAlchemy query for non deleted record with pid type in `pid_types`
    """
    query = (
        db.session.query(PersistentIdentifier.object_uuid).join(
            RecordMetadata,
            type_coerce(
                PersistentIdentifier.object_uuid,
                String) == type_coerce(RecordMetadata.id, String)).filter(
                    PersistentIdentifier.pid_type.in_(pid_types),
                    PersistentIdentifier.object_type == 'rec',
                    PersistentIdentifier.status == PIDStatus.REGISTERED,
                    or_(
                        not_(
                            type_coerce(RecordMetadata.json,
                                        JSONB).has_key('deleted')),
                        RecordMetadata.json["deleted"] == cast(False, JSONB))
                    # noqa: F401
                ))
    return query
Exemple #4
0
 def _query_citing_records(self, show_duplicates=False):
     """Returns records which cites this one."""
     index_ref = self._get_index_ref()
     if not index_ref:
         raise Exception("There is no index_ref for this object")
     citation_query = RecordMetadata.query.with_entities(RecordMetadata.id,
                                                         RecordMetadata.json['control_number'])
     citation_filter = referenced_records(RecordMetadata.json).contains([index_ref])
     filter_deleted_records = or_(not_(type_coerce(RecordMetadata.json, JSONB).has_key('deleted')),  # noqa: W601
                                  not_(RecordMetadata.json['deleted'] == cast(True, JSONB)))
     only_literature_collection = type_coerce(RecordMetadata.json, JSONB)['_collections'].contains(['Literature'])
     filter_superseded_records = or_(
         not_(type_coerce(RecordMetadata.json, JSONB).has_key('related_records')),  # noqa: W601
         not_(type_coerce(RecordMetadata.json, JSONB)['related_records'].contains([{'relation': 'successor'}]))
     )
     citations = citation_query.filter(citation_filter,
                                       filter_deleted_records,
                                       filter_superseded_records,
                                       only_literature_collection)
     if not show_duplicates:
         # It just hides duplicates, and still can show citations
         # which do not have proper PID in PID store
         # Duplicated data should be removed with the CLI command
         citations = citations.distinct(RecordMetadata.json['control_number'])
     return citations
Exemple #5
0
def get_query_records_to_index(pid_types):
    """
    Return a query for retrieving all non deleted records by pid_type

    Args:
        pid_types(List[str]): a list of pid types

    Return:
        SQLAlchemy query for non deleted record with pid type in `pid_types`
    """
    query = (
        db.session.query(PersistentIdentifier.object_uuid).join(RecordMetadata, type_coerce(PersistentIdentifier.object_uuid, String) == type_coerce(RecordMetadata.id, String))
        .filter(
            PersistentIdentifier.pid_type.in_(pid_types),
            PersistentIdentifier.object_type == 'rec',
            PersistentIdentifier.status == PIDStatus.REGISTERED,
            or_(
                not_(
                    type_coerce(RecordMetadata.json, JSONB).has_key('deleted')
                ),
                RecordMetadata.json["deleted"] == cast(False, JSONB)
            )
            # noqa: F401
        )
    )
    return query
Exemple #6
0
    def get(self, bag):
        query = g.tran.query(db.Document).filter_by(_deleted='infinity', _id=bag[ID])
        query = query.filter(
            or_(db.Document.data.contains(type_coerce({"user_id": g.user.id}, JSONB)),
                db.Document.data.contains(type_coerce({"executor_id": g.user.id}, JSONB)),
                type_coerce(db.Document.approval['roles_id'], JSONB).has_any(array(g.user.roles_id)) if len(
                    g.user.roles_id) > 0 else None))

        doc = query.one()
        return {'docs': orm_to_json(doc)}
 def status(cls):
     current_datetime = datetime.datetime.now()
     return case([
         (current_datetime < cls.start_date,
          type_coerce(EventStatusEnum.open, Enum(EventStatusEnum))),
         (and_(current_datetime > cls.start_date,
               current_datetime < cls.finish_date),
          type_coerce(EventStatusEnum.ongoing, Enum(EventStatusEnum))),
         (current_datetime > cls.finish_date,
          type_coerce(EventStatusEnum.closed, Enum(EventStatusEnum))),
     ],
                 else_=type_coerce(EventStatusEnum.undefined,
                                   Enum(EventStatusEnum)))
Exemple #8
0
    def test_type_coerce_auto_label_label_style_none(self):
        table1 = self.table1

        self.assert_compile(
            select(
                type_coerce(table1.c.name, Integer),
                type_coerce(table1.c.name, String),
                table1.c.name,
            ).set_label_style(LABEL_STYLE_NONE),
            # ideally type_coerce wouldn't label at all...
            "SELECT some_table.name AS name, "
            "some_table.name AS name, "
            "some_table.name FROM some_table",
        )
Exemple #9
0
    def test_type_coerce_auto_label_label_style_disambiguate(self):
        table1 = self.table1

        self.assert_compile(
            select(
                type_coerce(table1.c.name, Integer),
                type_coerce(table1.c.name, String),
                table1.c.name,
            ),
            # ideally type_coerce wouldn't label at all...
            "SELECT some_table.name AS name, "
            "some_table.name AS name__1, "
            "some_table.name AS name_1 FROM some_table",
        )
    def test_type_coerce_auto_label(self):
        table1 = self.table1

        self.assert_compile(
            select([
                type_coerce(table1.c.name, Integer),
                type_coerce(table1.c.name, String),
                table1.c.name,
            ]),
            # ideally type_coerce wouldn't label at all...
            "SELECT some_table.name AS name, "
            "some_table.name AS name, "
            "some_table.name FROM some_table",
        )
Exemple #11
0
def create_journal_dict():
    """
    Returns a dictionary that is populated with refextracts's journal KB from the database.

        { SOURCE: DESTINATION }

    which represents that ``SOURCE`` is translated to ``DESTINATION`` when found.

    Note that refextract expects ``SOURCE`` to be normalized, which means removing
    all non alphanumeric characters, collapsing all contiguous whitespace to one
    space and uppercasing the resulting string.
    """
    only_journals = type_coerce(RecordMetadata.json,
                                JSONB)["_collections"].contains(["Journals"])
    only_not_deleted = not_(
        type_coerce(RecordMetadata.json, JSONB).has_key("deleted")  # noqa
    ) | not_(  # noqa
        type_coerce(RecordMetadata.json, JSONB)["deleted"] == cast(
            True, JSONB))
    entity_short_title = RecordMetadata.json["short_title"]
    entity_journal_title = RecordMetadata.json["journal_title"]["title"]
    entity_title_variants = RecordMetadata.json["title_variants"]

    titles_query = RecordMetadata.query.with_entities(
        entity_short_title,
        entity_journal_title).filter(only_journals, only_not_deleted)

    title_variants_query = RecordMetadata.query.with_entities(
        entity_short_title,
        entity_title_variants).filter(only_journals, only_not_deleted)

    title_dict = {}

    for (short_title, journal_title) in titles_query.all():
        title_dict[normalize_title(short_title)] = short_title
        title_dict[normalize_title(journal_title)] = short_title

    for (short_title, title_variants) in title_variants_query.all():
        if title_variants is None:
            continue

        sub_dict = {
            normalize_title(title_variant): short_title
            for title_variant in title_variants
        }

        title_dict.update(sub_dict)

    return title_dict
Exemple #12
0
    def find_by_holding(cls, **kwargs):
        """Find item versions based on their holdings information.

        Every given kwarg will be queried as a key-value pair in the items
        holding.

        :returns: List[(UUID, version_id)] with `version_id` as used by
                  `RecordMetadata.version_id`.
        """
        def _get_filter_clause(obj, key, value):
            val = obj[key].astext
            CASTS = {
                bool: lambda x: cast(x, BOOLEAN),
                int: lambda x: cast(x, INTEGER),
                datetime.date: lambda x: cast(x, DATE),
            }
            if (not isinstance(value, six.string_types) and
                    isinstance(value, collections.Sequence)):
                if len(value) == 2:
                    return CASTS[type(value[0])](val).between(*value)
                raise ValueError('Too few/many values for a range query. '
                                 'Range query requires two values.')
            return CASTS.get(type(value), lambda x: x)(val) == value

        RecordMetadataVersion = version_class(RecordMetadata)

        data = type_coerce(RecordMetadataVersion.json, JSONB)
        path = ('_circulation', 'holdings')

        subquery = db.session.query(
            RecordMetadataVersion.id.label('id'),
            RecordMetadataVersion.version_id.label('version_id'),
            func.json_array_elements(data[path]).label('obj')
        ).subquery()

        obj = type_coerce(subquery.c.obj, JSONB)

        query = db.session.query(
            RecordMetadataVersion.id,
            RecordMetadataVersion.version_id
        ).filter(
            RecordMetadataVersion.id == subquery.c.id,
            RecordMetadataVersion.version_id == subquery.c.version_id,
            *(_get_filter_clause(obj, k, v) for k, v in kwargs.items())
        )

        for result in query:
            yield result
Exemple #13
0
    def tree(cls, include_self=False, with_steps=False, with_terminal=False):
        child, parent = (sa.orm.aliased(cls, name=n)
                         for n in ('child', 'parent'))
        tree_1 = sa.select([child.id.label('child_id')])
        if include_self:
            parent_id = child.id
        else:
            parent_id = child.parent_id
            tree_1.append_whereclause(parent_id != None)
        tree_1.append_column(parent_id.label('parent_id'))
        if with_steps:
            steps = 0 if include_self else 1
            tree_1.append_column(sa.literal(steps).label('steps'))
        if with_terminal:
            if include_self:
                terminal = sa.type_coerce(child.parent_id == None, sa.Boolean)
            else:
                terminal = sa.literal(False)
            tree_1.append_column(terminal.label('terminal'))
        tree_1 = tree_1.cte('tree', recursive=True)

        tree_2 = sa.select([tree_1.c.child_id, parent.parent_id])\
            .select_from(tree_1.join(parent, parent.id == tree_1.c.parent_id))\
            .where(parent.parent_id != None)
        if with_steps:
            tree_2.append_column(tree_1.c.steps + 1)
        if with_terminal:
            gparent = sa.orm.aliased(Languoid, name='grandparent')
            tree_2.append_column(gparent.parent_id == None)
            tree_2 = tree_2.select_from(tree_2.froms[-1].outerjoin(
                gparent, gparent.id == parent.parent_id))
        return tree_1.union_all(tree_2)
Exemple #14
0
 def _col_after_parent_attach(col, table):
     e = CheckConstraint(
         type_coerce(col, type_).in_(x.value for x in type_.enum
                                     if x not in type_.exclude_values),
         'valid_enum_{}'.format(col.name))
     e.info['alembic_dont_render'] = True
     assert e.table is table
Exemple #15
0
def get_master_calibration_image(image, calibration_type, master_selection_criteria,
                                 use_only_older_calibrations=False, db_address=_DEFAULT_DB):
    calibration_criteria = CalibrationImage.type == calibration_type.upper()
    calibration_criteria &= CalibrationImage.instrument_id == image.instrument.id
    calibration_criteria &= CalibrationImage.is_master.is_(True)

    for criterion in master_selection_criteria:
        # We have to cast to strings according to the sqlalchemy docs for version 1.3:
        # https://docs.sqlalchemy.org/en/latest/core/type_basics.html?highlight=json#sqlalchemy.types.JSON
        calibration_criteria &= cast(CalibrationImage.attributes[criterion], String) ==\
                                type_coerce(getattr(image, criterion), JSON)

    # During real-time reduction, we want to avoid using different master calibrations for the same block,
    # therefore we make sure the the calibration frame used was created before the block start time
    if use_only_older_calibrations and image.block_start is not None:
        calibration_criteria &= CalibrationImage.datecreated < image.block_start

    with get_session(db_address=db_address) as db_session:
        calibration_images = db_session.query(CalibrationImage).filter(calibration_criteria).all()

    # Exit if no calibration file found
    if len(calibration_images) == 0:
        return None

    # Find the closest date
    date_deltas = np.abs(np.array([i.dateobs - image.dateobs for i in calibration_images]))
    closest_calibration_image = calibration_images[np.argmin(date_deltas)]
    calibration_file = os.path.join(closest_calibration_image.filepath, closest_calibration_image.filename)

    if abs(min(date_deltas)) > datetime.timedelta(days=30):
        msg = "The closest calibration file in the database was created more than 30 days before or after " \
              "the image being reduced."
        logger.warning(msg, image=image, extra_tags={'master_calibration': os.path.basename(calibration_file)})

    return calibration_file
Exemple #16
0
 def bind_expression(self, bindvalue):
     # convert the bind's type from PGPString to
     # String, so that it's passed to psycopg2 as is without
     # a dbapi.Binary wrapper
     # raise Exception("asdf")
     bindvalue = type_coerce(bindvalue, String)
     return func.tsrange(bindvalue)
Exemple #17
0
def listing(bag):
    query = g.tran.query(db.Payments._id).filter_by(
        _deleted='infinity', company_id=g.session['company_id'])
    payment_vars = vars(db.Payments)
    for payment_var in payment_vars:
        if isinstance(payment_vars[payment_var], InstrumentedAttribute):
            query = query.add_column(payment_vars[payment_var])

    if "filter" in bag:
        if "data" in bag["filter"] and isinstance(bag["filter"]["data"], dict):
            query = query.filter(
                db.Payments.data.contains(
                    type_coerce(bag["filter"]["data"], JSONB)))
            del bag["filter"]["data"]
        query = query.filter_by(**bag["filter"])

    if "order_by" in bag:
        query = query.order_by(*bag["order_by"])
    else:
        query = query.order_by(db.Payments._created.desc())

    count = query.count()
    if "limit" in bag:
        query = query.limit(bag["limit"])
    if "offset" in bag:
        query = query.offset(bag["offset"])
    if 'with_related' in bag and bag['with_related'] is True:
        query = find_relations(query)
    payments = query.all()
    payments = orm_to_json(payments)
    return {'payments': payments, 'count': count}
Exemple #18
0
def close_expired_jobs(notify):
    now = datetime.datetime.utcnow()
    today = now.strftime("%Y-%m-%d")

    record_json = type_coerce(RecordMetadata.json, JSONB)
    before_deadline_date = record_json["deadline_date"].astext.cast(
        DateTime) < today
    only_jobs_collection = record_json["_collections"].contains(["Jobs"])
    only_not_closed = not_(record_json["status"].astext == "closed")
    only_not_deleted = or_(
        not_(record_json.has_key("deleted")),  # noqa: W601
        not_(record_json["deleted"] == cast(True, JSONB)),
    )
    expired_jobs = RecordMetadata.query.filter(only_jobs_collection,
                                               only_not_deleted,
                                               only_not_closed,
                                               before_deadline_date).all()
    expired_job_records = [
        JobsRecord(job.json, model=job) for job in expired_jobs
    ]
    for job_record in expired_job_records:
        job_record["status"] = "closed"
        job_record.update(dict(job_record))

    db.session.commit()

    if notify:
        for job_record in expired_job_records:
            send_job_deadline_reminder(dict(job_record))

    LOGGER.info("Closed expired jobs",
                notify=notify,
                num_records=len(expired_jobs))
Exemple #19
0
def get_literature_recids_for_orcid(orcid):
    """Return the Literature recids that were claimed by an ORCiD.

    We record the fact that the Author record X has claimed the Literature
    record Y by storing in Y an author object with a ``$ref`` pointing to X
    and the key ``curated_relation`` set to ``True``. Therefore this method
    first searches the DB for the Author records for the one containing the
    given ORCiD, and then uses its recid to search in ES for the Literature
    records that satisfy the above property.

    Args:
        orcid (str): the ORCiD.

    Return:
        list(int): the recids of the Literature records that were claimed
        by that ORCiD.

    """
    orcid_object = '[{"schema": "ORCID", "value": "%s"}]' % orcid
    # this first query is written in a way that can use the index on (json -> ids)
    author_rec_uuid = db.session.query(RecordMetadata.id)\
        .filter(type_coerce(RecordMetadata.json, JSONB)['ids'].contains(orcid_object)).one().id
    author_recid = db.session.query(PersistentIdentifier.pid_value).filter(
        PersistentIdentifier.object_type == 'rec',
        PersistentIdentifier.object_uuid == author_rec_uuid,
        PersistentIdentifier.pid_type == 'aut',
    ).one().pid_value

    query = Q('match', authors__curated_relation=True) & Q('match', authors__recid=author_recid)
    search_by_curated_author = LiteratureSearch().query('nested', path='authors', query=query)\
                                                 .params(_source=['control_number'], size=9999)

    return [el['control_number'] for el in search_by_curated_author]
Exemple #20
0
    def save(self, bag):
        if '_created' in bag:
            del bag["_created"]
        if '_deleted' in bag:
            del bag["_deleted"]
        if 'date' not in bag['data']:
            bag["data"]['date'] = str(datetime.now())

        if '_id' in bag:
            query = g.tran.query(db.Document).filter_by(_id=bag['_id']) \
                .filter(db.Document.data.contains(type_coerce({"user_id": g.user.id}, JSONB)),
                        db.Document.document_status != 'committed')
            document = query.first()
            if document is None:
                raise CbsException(USER_NO_ACCESS)
        if g.user.roles_id is None:
            raise CbsException(GENERIC_ERROR,
                               u'Вам необходимо получить права')
        else:
            pg_db = PostgresDatabase()
            bag['type'] = 'Document'
            bag['document_status'] = 'draft'
            bag['data']['user_id'] = g.user.id
            if len(g.user.roles_id) > 0:
                bag['approval']['approval_roles'] = []
                for role_id in bag['approval']['roles_id']:
                    bag['approval']['approval_roles'].append({
                        'role_id': role_id
                    })
            _id, _rev = pg_db.store(bag, new_edits=True)
        return {"ok": True, "id": _id, "rev": _rev}
Exemple #21
0
    def _win(self):
        with get_session() as s:
            ktyp_id = get_ktyp(s, "winning").id

        return type_coerce(
            and_(Game.ktyp_id != None, Game.ktyp_id == ktyp_id,
                 Game.end <= self.end), Integer)
Exemple #22
0
def match_references_by_uuids(literature_uuids):
    record_json = type_coerce(RecordMetadata.json, JSONB)
    has_references = record_json.has_key("references")  # noqa: W601
    selected_uuids = RecordMetadata.id.in_(literature_uuids)
    not_deleted = or_(  # exclude deleted records incase some are deleted after uuids are fetched by the callee
        not_(record_json.has_key("deleted")),  # noqa: W601
        not_(record_json["deleted"] == cast(True, JSONB)),
    )
    with_references_query = RecordMetadata.query.filter(
        selected_uuids, has_references, not_deleted
    )

    for record_metadata in with_references_query.all():
        references = record_metadata.json["references"]
        match_result = match_references(references)

        if not match_result["any_link_modified"]:
            continue

        literature = LiteratureRecord(record_metadata.json, model=record_metadata)
        literature["references"] = dedupe_list(match_result["matched_references"])
        literature.update(dict(literature))

        db.session.commit()
        added_recids = match_result["added_recids"]
        removed_recids = match_result["removed_recids"]
        LOGGER.info(
            "References are matched",
            uuid=record_metadata.id,
            recid=record_metadata.json["control_number"],
            added_recids=added_recids,
            added_recid_count=len(added_recids),
            removed_recids=removed_recids,
            removed_recid_count=len(removed_recids),
        )
    def test_update_returning_w_type_coerce_expression(self, connection):
        table = self.tables.tables
        connection.execute(
            table.insert(),
            [
                {
                    "persons": 5,
                    "goofy": "somegoofy1"
                },
                {
                    "persons": 3,
                    "goofy": "somegoofy2"
                },
            ],
        )

        result = connection.execute(table.update().where(
            table.c.persons > 4).values(goofy="newgoofy").returning(
                type_coerce(table.c.goofy, String)))
        eq_(result.fetchall(), [("FOOnewgoofy", )])

        result2 = connection.execute(
            select(table.c.id, table.c.goofy).order_by(table.c.id))
        eq_(
            result2.fetchall(),
            [(1, "FOOnewgoofyBAR"), (2, "FOOsomegoofy2BAR")],
        )
Exemple #24
0
def match_references_by_uuids(literature_uuids):
    record_json = type_coerce(RecordMetadata.json, JSONB)
    has_references = record_json.has_key("references")  # noqa: W601
    selected_uuids = RecordMetadata.id.in_(literature_uuids)
    with_references_query = RecordMetadata.query.filter(selected_uuids, has_references)
    for record_metadata in with_references_query.all():
        references = record_metadata.json.get("references")
        match_result = match_references(references)
        if match_result["any_link_modified"]:
            literature = LiteratureRecord(record_metadata.json, model=record_metadata)
            literature["references"] = match_result["matched_references"]
            literature.update(dict(literature))

            db.session.commit()
            LOGGER.info("MATCHER-after-commit")
            added_recids = match_result["added_recids"]
            removed_recids = match_result["removed_recids"]
            LOGGER.info(
                "References are matched",
                uuid=record_metadata.id,
                recid=record_metadata.json["control_number"],
                added_recids=added_recids,
                added_recid_count=len(added_recids),
                removed_recids=removed_recids,
                removed_recid_count=len(removed_recids),
            )
Exemple #25
0
def get_literature_recids_for_orcid(orcid):
    """Return the Literature recids that were claimed by an ORCiD.

    We record the fact that the Author record X has claimed the Literature
    record Y by storing in Y an author object with a ``$ref`` pointing to X
    and the key ``curated_relation`` set to ``True``. Therefore this method
    first searches the DB for the Author records for the one containing the
    given ORCiD, and then uses its recid to search in ES for the Literature
    records that satisfy the above property.

    Args:
        orcid (str): the ORCiD.

    Return:
        list(int): the recids of the Literature records that were claimed
        by that ORCiD.

    """
    orcid_object = '[{"schema": "ORCID", "value": "%s"}]' % orcid
    # this first query is written in a way that can use the index on (json -> ids)
    author_rec_uuid = db.session.query(RecordMetadata.id)\
        .filter(type_coerce(RecordMetadata.json, JSONB)['ids'].contains(orcid_object)).one().id
    author_recid = db.session.query(PersistentIdentifier.pid_value).filter(
        PersistentIdentifier.object_type == 'rec',
        PersistentIdentifier.object_uuid == author_rec_uuid,
        PersistentIdentifier.pid_type == 'aut',
    ).one().pid_value

    query = Q('match', authors__curated_relation=True) & Q(
        'match', authors__recid=author_recid)
    search_by_curated_author = LiteratureSearch().query('nested', path='authors', query=query)\
                                                 .params(_source=['control_number'], size=9999)

    return [el['control_number'] for el in search_by_curated_author]
Exemple #26
0
 def bind_expression(self, bindvalue):
     """Convert the bind's type from PGPString to String, so that it's
     passed to psycopg2 as is without convert the bind's type from
     PGPString to a dbapi.Binary wrapper."""
     bindvalue = type_coerce(bindvalue, String)
     return func.pgp_sym_encrypt(bindvalue, self.passphrase,
                                 'compress-algo=1, cipher-algo=aes256')
    def test_nested_type_trans(self):
        customer = self.tables.customer
        order = self.tables.order
        item = self.tables.item

        class SpecialType(TypeDecorator):
            impl = Integer

            def process_result_value(self, value, dialect):
                return str(value) + "_processed"

        sub_sub_stmt = nested(select([type_coerce(item.c.price, SpecialType)]).\
                                    where(item.c.order_id ==
                                            order.c.id)).label('i')
        sub_stmt = nested(select([sub_sub_stmt]).where(order.c.customer_id ==
                                            customer.c.id)).label('o')
        stmt = select([sub_stmt]).where(customer.c.id == 1)
        r = config.db.execute(stmt)
        row = r.fetchone()
        sub_result = row['o']
        sub_sub_result = sub_result.fetchone()['i']
        eq_(
            list(sub_sub_result),
            [('9.99_processed',), ('19.99_processed',)]
        )
Exemple #28
0
 def distance_to(cls, other_catchment):
     return type_coerce(
         1e-6 * ((Descriptors.centroid_ngr_x - other_catchment.descriptors.centroid_ngr_x) *
                 (Descriptors.centroid_ngr_x - other_catchment.descriptors.centroid_ngr_x) +
                 (Descriptors.centroid_ngr_y - other_catchment.descriptors.centroid_ngr_y) *
                 (Descriptors.centroid_ngr_y - other_catchment.descriptors.centroid_ngr_y)),
         Float())
Exemple #29
0
    def list_dids(self, scope, filters, type='collection', ignore_case=False, limit=None,
                  offset=None, long=False, recursive=False, session=None):
        # Currently for sqlite only add, get and delete is implemented.
        if not self.json_implemented(session):
            raise NotImplementedError

        query = session.query(models.DidMeta)
        if scope is not None:
            query = query.filter(models.DidMeta.scope == scope)
        filters.pop('name', None)
        for k, v in iteritems(filters):
            if session.bind.dialect.name == 'oracle':
                query = query.filter(text("json_exists(meta,'$.%s?(@==''%s'')')" % (k, v)))
            else:
                query = query.filter(cast(models.DidMeta.meta[k], String) == type_coerce(v, JSON))

        if long:
            for row in query.yield_per(5):
                yield {
                    'scope': row.scope,
                    'name': row.name,
                    'did_type': 'Info not available in JSON Plugin',
                    'bytes': 'Info not available in JSON Plugin',
                    'length': 'Info not available in JSON Plugin'
                }
        else:
            for row in query.yield_per(5):
                yield row.name
Exemple #30
0
 def bind_expression(self, bindvalue):
     # convert the bind's type from PGPString to
     # String, so that it's passed to psycopg2 as is without
     # a dbapi.Binary wrapper
     # raise Exception("asdf")
     bindvalue = type_coerce(bindvalue, String)
     return func.tsrange(bindvalue)
Exemple #31
0
def get(self, bag):
    query = g.tran.query(db.Companies) \
        .filter_by(_deleted='infinity', _id=bag['id'])
    doc_vars = vars(db.Companies)
    for var in doc_vars:
        if isinstance(doc_vars[var], InstrumentedAttribute):
            query = query.add_column(doc_vars[var])

    if 'with_related' in bag and bag['with_related'] is True:
        company_status_value = g.tran.query(
            func.row_to_json(text('enums.*'))).select_from(db.Enums) \
            .filter_by(_deleted='infinity', name='company_status') \
            .filter(db.Enums.data['key'].cast(TEXT) == cast(db.Companies.company_status, TEXT)) \
            .as_scalar().label('company_status_value')

        company_type_value = g.tran.query(
            func.row_to_json(text('enums.*'))).select_from(db.Enums) \
            .filter_by(_deleted='infinity', name='company_type') \
            .filter(db.Enums.data['key'].cast(TEXT) == cast(db.Companies.company_type, TEXT)) \
            .as_scalar().label('company_type_value')

        entry_user = g.tran.query(func.json_build_object(
            "id", db.User.id, "username", db.User.username, "email", db.User.email, "rec_date", db.User.rec_date,
            "data", db.User.data, "role", db.User.role)).select_from(db.User).filter_by(
            id=db.Companies.entry_user_id) \
            .as_scalar().label('entry_user')

        typeofownership = g.tran.query(func.row_to_json(text('typeofownership.*'))).select_from(db.Typeofownership) \
            .filter_by(_deleted='infinity', _id=db.Companies.typeofownership_id).as_scalar() \
            .label('typeofownership')

        dircountry = g.tran.query(func.row_to_json(text('dircountry.*'))).select_from(db.DirCountry) \
            .filter_by(_deleted='infinity', _id=db.Companies.dircountry_id).as_scalar() \
            .label('dircountry')

        dircoate = g.tran.query(func.row_to_json(text('dircoate.*'))).select_from(db.DirCoate) \
            .filter_by(_deleted='infinity', _id=db.Companies.dircoate_id).as_scalar() \
            .label('dircoate')

        roles = g.tran.query(func.jsonb_agg(func.row_to_json(text('roles.*')))).select_from(db.Roles) \
            .filter_by(_deleted='infinity') \
            .filter(type_coerce(db.Companies.roles_id).has_any(array([db.Roles._id]))) \
            .as_scalar().label('roles')

        company_users = g.tran.query(db.Companyemployees.user_id).filter_by(_deleted='infinity',
                                                                            company_id=bag['id']).all()
        company_users = []
        for user_id in company_users:
            user = g.tran.query(func.json_build_object(
                "id", db.User.id, "username", db.User.username, "email", db.User.email, "rec_date", db.User.rec_date,
                "data", db.User.data, "role", db.User.role)).select_from(db.User) \
                .filter_by(id=user_id).first()
            company_users.append(user)

        query = query.add_columns(company_status_value, company_type_value, entry_user, company_users, typeofownership,
                                  dircountry, roles, dircoate)

    company = query.one()
    return {'doc': orm_to_json(company)}
Exemple #32
0
def get_courses(order_by='course_rating', order_direction=DESCENDING, limit=100, offset=0, **kwargs):
    numeric_columns = { 'course_rating', 'instructor_rating', 'workload' }
    all_columns = keys | numeric_columns | { 'grade' }

    exact_keys = { key for key in keys if key in kwargs and kwargs[key] != AVERAGE }
    group_keys = keys - kwargs.keys()

    order_by_name = order_by if (order_by in all_columns and kwargs.get(order_by, '') != AVERAGE) else 'course_rating'

    query = select(
            [courses.columns[key] for key in group_keys] +
            [type_coerce(func.avg(courses.columns[key]), Float).label(key) for key in numeric_columns] +
            [type_coerce(func.avg(case(
                { 'A': 4.0, 'B': 3.0, 'C': 2.0, 'NR': 0.0 },
                value=courses.columns.grade,
                else_=0.0,
            )), Float).label('grade')]
        ).where(
            and_(*[courses.columns[key] == kwargs[key] for key in exact_keys])
        ).group_by(
            *[courses.columns[key] for key in group_keys]
        ).order_by(
            desc(order_by_name) if order_direction == DESCENDING else asc(order_by_name)
        ).limit(min(100, max(1, limit))).offset(max(0, offset))

    results = query.execute().fetchall()

    dict_result = []
    for result in results:
        item = dict(result.items())
        for key in exact_keys:
            item[key] = kwargs[key]
        grade = item['grade']
        if grade >= 3.5:
            grade = 'A'
        elif grade >= 2.5:
            grade = 'B'
        elif grade >= 1.5:
            grade = 'C'
        else:
            grade = 'NR'
        item['grade'] = grade
        dict_result.append(item)

    return dict_result
    return query
Exemple #33
0
 def _col_after_parent_attach(col, table):
     int_col = type_coerce(col, SmallInteger)
     e = CheckConstraint(
         int_col.in_(x.value for x in type_.enum
                     if x not in type_.exclude_values),
         f'valid_enum_{col.name}')
     e.info['alembic_dont_render'] = True
     assert e.table is table
    def test_limit_preserves_typing_information(self):
        class MyType(TypeDecorator):
            impl = Integer

        stmt = select([type_coerce(column("x"), MyType).label("foo")]).limit(1)
        dialect = oracle.dialect()
        compiled = stmt.compile(dialect=dialect)
        assert isinstance(compiled._create_result_map()["foo"][-1], MyType)
Exemple #35
0
    def test_limit_preserves_typing_information(self):
        class MyType(TypeDecorator):
            impl = Integer

        stmt = select([type_coerce(column("x"), MyType).label("foo")]).limit(1)
        dialect = oracle.dialect()
        compiled = stmt.compile(dialect=dialect)
        assert isinstance(compiled._create_result_map()["foo"][-1], MyType)
Exemple #36
0
def select_list(userid, form):
    # Find the unique violation types and the number of reporters. This will be
    # joined against the Report model to get the violations/reporters for each
    # selected report.
    subq = (
        ReportComment.dbsession.query(
            ReportComment.reportid,
            sa.func.count(),
            sa.type_coerce(
                sa.func.array_agg(ReportComment.violation.distinct()),
                ARRAY(sa.Integer, as_tuple=True)).label('violations'))
        .filter(ReportComment.violation != 0)
        .group_by(ReportComment.reportid)
        .subquery())

    # Find reports, joining against the aforementioned subquery, and eager-load
    # the reports' owners.
    q = (
        Report.dbsession.query(Report, subq)
        .options(joinedload(Report.owner))
        .join(subq, Report.reportid == subq.c.reportid)
        .reset_joinpoint())

    # For each type of report, eagerly load the content reported and the
    # content's owner. Also, keep track of the Login model aliases used for each
    # report type so they can be filtered against later.
    login_aliases = []
    for column_name in _report_types:
        login_alias = aliased(Login)
        login_aliases.append(login_alias)
        q = (
            q
            .outerjoin(getattr(Report, column_name))
            .outerjoin(login_alias)
            .options(contains_eager(column_name + '.owner', alias=login_alias))
            .reset_joinpoint())

    # Filter by report status. form.status can also be 'all', in which case no
    # filter is applied.
    if form.status == 'closed':
        q = q.filter_by(is_closed=True)
    elif form.status == 'open':
        q = q.filter_by(is_closed=False)

    # If filtering by the report's content's owner, iterate over the previously
    # collected Login model aliases to compare against Login.login_name.
    if form.submitter:
        submitter = legacy.login_name(form.submitter)
        q = q.filter(sa.or_(l.login_name == submitter for l in login_aliases))

    # If filtering by violation type, see if the violation is in the array
    # aggregate of unique violations for this report.
    if form.violation and form.violation != '-1':
        q = q.filter(sa.literal(int(form.violation)) == sa.func.any(subq.c.violations))

    q = q.order_by(Report.opened_at.desc())
    return [(report, report_count, map(_convert_violation, violations))
            for report, _, report_count, violations in q.all()]
Exemple #37
0
def select_list(userid, form):
    # Find the unique violation types and the number of reporters. This will be
    # joined against the Report model to get the violations/reporters for each
    # selected report.
    subq = (
        ReportComment.dbsession.query(
            ReportComment.reportid,
            sa.func.count(),
            sa.type_coerce(
                sa.func.array_agg(ReportComment.violation.distinct()),
                ARRAY(sa.Integer, as_tuple=True)).label('violations'))
        .filter(ReportComment.violation != 0)
        .group_by(ReportComment.reportid)
        .subquery())

    # Find reports, joining against the aforementioned subquery, and eager-load
    # the reports' owners.
    q = (
        Report.dbsession.query(Report, subq)
        .options(joinedload(Report.owner))
        .join(subq, Report.reportid == subq.c.reportid)
        .reset_joinpoint())

    # For each type of report, eagerly load the content reported and the
    # content's owner. Also, keep track of the Login model aliases used for each
    # report type so they can be filtered against later.
    login_aliases = []
    for column_name in _report_types:
        login_alias = aliased(Login)
        login_aliases.append(login_alias)
        q = (
            q
            .outerjoin(getattr(Report, column_name))
            .outerjoin(login_alias)
            .options(contains_eager(column_name + '.owner', alias=login_alias))
            .reset_joinpoint())

    # Filter by report status. form.status can also be 'all', in which case no
    # filter is applied.
    if form.status == 'closed':
        q = q.filter_by(is_closed=True)
    elif form.status == 'open':
        q = q.filter_by(is_closed=False)

    # If filtering by the report's content's owner, iterate over the previously
    # collected Login model aliases to compare against Login.login_name.
    if form.submitter:
        submitter = d.get_sysname(form.submitter)
        q = q.filter(sa.or_(l.login_name == submitter for l in login_aliases))

    # If filtering by violation type, see if the violation is in the array
    # aggregate of unique violations for this report.
    if form.violation and form.violation != '-1':
        q = q.filter(sa.literal(int(form.violation)) == sa.func.any(subq.c.violations))

    q = q.order_by(Report.opened_at.desc())
    return [(report, report_count, map(_convert_violation, violations))
            for report, _, report_count, violations in q.all()]
Exemple #38
0
    def test_crit_against_int_coerce_type(self):
        name = self.tables.data_table.c.name
        col = self.tables.data_table.c['data']

        self._test_index_criteria(
            and_(name == 'r6', cast(col["a"], String) == type_coerce(5, JSON)),
            "r6",
            test_literal=False
        )
Exemple #39
0
 def __getitem__(self, index):
     super_ = super(ARRAY_D.Comparator, self).__getitem__(index)
     if not isinstance(index, slice) and self.type.dimensions > 1:
         super_ = type_coerce(
             super_,
             ARRAY_D(self.type.item_type,
                     dimensions=self.type.dimensions - 1,
                     zero_indexes=self.type.zero_indexes))
     return super_
Exemple #40
0
    def test_crit_against_int_coerce_type(self):
        name = self.tables.data_table.c.name
        col = self.tables.data_table.c['data']

        self._test_index_criteria(and_(
            name == 'r6',
            cast(col["a"], String) == type_coerce(5, JSON)),
                                  "r6",
                                  test_literal=False)
Exemple #41
0
 def in_(self, other):
     if isinstance(other, collections.Iterable):
         ret = []
         for elem in other:
             if isinstance(elem, EnumSymbol):
                 elem = type_coerce(elem, DeclEnum)
             ret.append(elem)
         other = ret
     return types.Enum.Comparator.in_(self, other)
Exemple #42
0
    def _fifteenrune(self):
        with get_session() as s:
            ktyp_id = get_ktyp(s, "winning").id

        return type_coerce(and_(
            Game.ktyp_id != None,
            Game.ktyp_id == ktyp_id,
            Game.end <= self.end,
            self._rune(15)), Integer)
Exemple #43
0
 def apply(self, q, cuts):
     """ Apply a set of filters, which can be given as a set of tuples in
     the form (ref, operator, value), or as a string in query form. If it
     is ``None``, no filter will be applied. """
     info = []
     for (ref, operator, value) in self.parse(cuts):
         info.append({'ref': ref, 'operator': operator, 'value': value})
         table, column = self.cube.model[ref].bind(self.cube)
         q = self.ensure_table(q, table)
         q = q.where(column == type_coerce(value, column.type))
     return info, q
def delete_records_without_control_number():
    """
    Find all record without a control number and delete them.
    """
    # Find all records without control_number.
    records = RecordMetadata.query.filter(not_(
        type_coerce(RecordMetadata.json, JSONB).has_key(
            'control_number'))).all()

    for record in records:
        _delete_record(record)
Exemple #45
0
def get_bpm_filename(instrument_id, ccdsum, db_address=_DEFAULT_DB):
    with get_session(db_address=db_address) as db_session:
        criteria = (CalibrationImage.type == 'BPM', CalibrationImage.instrument_id == instrument_id,
                    cast(CalibrationImage.attributes['ccdsum'], String) == type_coerce(ccdsum, JSON))
        bpm_query = db_session.query(CalibrationImage).filter(*criteria)
        bpm = bpm_query.order_by(desc(CalibrationImage.dateobs)).first()

        if bpm is not None:
            bpm_path = os.path.join(bpm.filepath, bpm.filename)
        else:
            bpm_path = None
    return bpm_path
    def test_ambiguous_column_by_col_plus_label(self):
        users = self.tables.users

        users.insert().execute(user_id=1, user_name='john')
        result = select(
            [users.c.user_id,
                type_coerce(users.c.user_id, Integer).label('foo')]).execute()
        row = result.first()
        eq_(
            row[users.c.user_id], 1
        )
        eq_(
            row[1], 1
        )
def list_codes():
    if request_wants_json():
        with transaction():
            result = db.session.query(
                Code.id, Code.value,
                type_coerce(Code.user_id, db.Boolean)
            ).select_from(Code).outerjoin(User)
            return jsonify(rows=[{
                'id': code_id,
                'code': value,
                'requested': requested,
            } for code_id, value, requested in result])
    else:
        return render_template('list_codes.html')
    def setup_mappers(cls):
        mapper(cls.classes.Person, cls.tables.person, properties=dict(
            pets=relationship(
                cls.classes.Pet, primaryjoin=(
                    orm.foreign(cls.tables.pets.c.person_id) ==
                    sa.cast(
                        sa.type_coerce(cls.tables.person.c.id, Integer),
                        Integer
                    )
                )
            )
        ))

        mapper(cls.classes.Pet, cls.tables.pets)
Exemple #49
0
    def default(self, o):
        if isinstance(o, unicode):
            return str(o)

        if isinstance(o, list):
            return [GeoJSONEncoder.default(self, item) for item in o]

        if isinstance(o, dict):
            resp = {}
            for key in o.keys():
                val = GeoJSONEncoder.default(self, o[key])
                resp[key] = val
            return resp

        if isinstance(o, geoalchemy2.WKBElement):
            str_data = db.session.scalar(geoalchemy2.functions.ST_AsGeoJSON(o))
            return flask.json.loads(str_data)

        if isinstance(o, geoalchemy2.RasterElement):
            try:
                gv = type_coerce(ST_DumpAsPolygons(o), GeomvalType()).label("gvs")
                q1 = db.session.query(gv)
                q2 = db.session.query(geoalchemy2.func.ST_AsGeoJSON(geoalchemy2.func.ST_Transform(q1.subquery().c.gvs.geom, 3857)))
                geoms = q2.all()

                q3 = db.session.query(q1.subquery().c.gvs.val)
                vals = q3.all()

                i = 0
                dat = []
                for geom in geoms:
                    val, poly = vals[i][0], flask.json.loads(geom[0])
                    dat.append((poly, val))
                    i += 1

                #js = flask.json.dumps(dat, cls=GeoJSONEncoder)
                return dat
            except Exception:
                pass

        if isinstance(o, RasterTile):
            resp = {}
            for key in o.__dict__:
                if key.startswith('_') or key.startswith('__'):
                    continue
                resp[key] = GeoJSONEncoder.default(self, o.__dict__[key])
            return flask.json.dumps(resp, cls=JSONEncoder)

        return flask.json.dumps(o, cls=JSONEncoder)
Exemple #50
0
 def signature_timestamp(self):
     return type_coerce(case(
         [(
             File.signed == True,
             text("""
                 (
                     to_timestamp(
                         signature_metadata->>'timestamp',
                         'YYYY-MM-DD"T"HH24:MI:SS.US'
                     )::timestamp without time zone
                 )
             """)
         )],
         else_=text('NULL')
     ), UTCDateTime)
Exemple #51
0
def get_all_unlinked_references():
    """Return a list of dict, in which each dictionary corresponds to one reference object
    and the status of core or non core"""
    query = (
        RecordMetadata.query
        .filter(
            type_coerce(RecordMetadata.json, JSONB)['_collections']
            .contains(['Literature'])
        )
        .with_entities(RecordMetadata.json)
    )

    for record in query.yield_per(1000):
        core = record.json.get('core')
        for reference in record.json.get('references', []):
            if 'record' not in reference:
                yield {'core': core, 'reference': reference}
Exemple #52
0
def add_claims_procedure_stems(claim_lines, codes):
    """
    Keyword Args:
    claim_lines: Medical Claim Lines table

    Returns: sqlalchemy selectable that runs get_claims_base_data and adds the procedure stem
    """
    base_without_stems = get_claims_base_data(claim_lines, codes)\
        .correlate(None)\
        .alias('base_without_stems')
    stem = sa.type_coerce(
        base_without_stems.c.procedure_name_array,
        pg.ARRAY(sa.Text))[1].label('procedure_name_stem')
    columns = [
        base_without_stems.c.servicing_provider_npi,
        base_without_stems.c.procedure_code,
        base_without_stems.c.procedure_name,
        stem]
    return sa.select(columns)
Exemple #53
0
def normalize_journal_titles(obj, eng):
    """Normalize the journal titles

    Normalize the journal titles stored in the `journal_title` field of each object
    contained in `publication_info`.

    Note:
        The DB is queried in order to get the `$ref` of each journal and add it in
        `journal_record`.

    TODO:
        Refactor: it must be checked that `normalize_journal_title` is appropriate.

    Args:
        obj: a workflow object.
        eng: a workflow engine.

    Returns:
       None
    """
    publications = obj.data.get('publication_info')

    if not publications:
        return None

    for index, publication in enumerate(publications):
        if 'journal_title' in publication:
            normalized_title = normalize_journal_title(publication['journal_title'])
            obj.data['publication_info'][index]['journal_title'] = normalized_title
            ref_query = RecordMetadata.query.filter(
                RecordMetadata.json['_collections'].op('?')('Journals')).filter(
                cast(RecordMetadata.json['short_title'], String) == type_coerce(normalized_title, JSON))
            result = db.session.execute(ref_query).fetchone()

            if result:
                obj.data['publication_info'][index]['journal_record'] = result.records_metadata_json['self']
Exemple #54
0
 def _set_table(self, column, table):
     e = CheckConstraint(type_coerce(column, self).in_(x.value for x in self.enum),
                         'valid_enum_{}'.format(column.name))
     e.info['alembic_dont_render'] = True
     assert e.table is table
Exemple #55
0
		def __ne__(self, other):
			if isinstance(other, bool):
				other = type_coerce(other, NPBoolean)
			return types.Boolean.Comparator.__ne__(self, other)
def _upgrade_data(migrate_engine):
    # Rename duplicates to be unique.
    meta = sqlalchemy.schema.MetaData(migrate_engine)

    # ORM tables
    metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
    metadef_objects = Table('metadef_objects', meta, autoload=True)
    metadef_properties = Table('metadef_properties', meta, autoload=True)
    metadef_tags = Table('metadef_tags', meta, autoload=True)
    metadef_resource_types = Table('metadef_resource_types', meta,
                                   autoload=True)

    # Fix duplicate metadef_namespaces
    # Update the non-first record(s) with an unique namespace value
    dbrecs = _upgrade_metadef_namespaces_get_duplicates(migrate_engine)
    for row in dbrecs:
        s = (metadef_namespaces.update()
             .where(metadef_namespaces.c.id > row['id'])
             .where(metadef_namespaces.c.namespace == row['namespace'])
             )
        if migrate_engine.name == 'sqlite':
            s = (s.values(namespace=(row['namespace'] + '-DUPL-' +
                                     type_coerce(metadef_namespaces.c.id,
                                                 String)),
                          display_name=(row['namespace'] + '-DUPL-' +
                                        type_coerce(metadef_namespaces.c.id,
                                                    String))))
        else:
            s = s.values(namespace=func.concat(row['namespace'],
                                               '-DUPL-',
                                               metadef_namespaces.c.id),
                         display_name=func.concat(row['namespace'],
                                                  '-DUPL-',
                                                  metadef_namespaces.c.id))
        s.execute()

    # Fix duplicate metadef_objects
    dbrecs = _upgrade_metadef_objects_get_duplicates(migrate_engine)
    for row in dbrecs:
        s = (metadef_objects.update()
             .where(metadef_objects.c.id > row['id'])
             .where(metadef_objects.c.namespace_id == row['namespace_id'])
             .where(metadef_objects.c.name == str(row['name']))
             )
        if migrate_engine.name == 'sqlite':
            s = (s.values(name=(row['name'] + '-DUPL-'
                          + type_coerce(metadef_objects.c.id, String))))
        else:
            s = s.values(name=func.concat(row['name'], '-DUPL-',
                                          metadef_objects.c.id))
        s.execute()

    # Fix duplicate metadef_properties
    dbrecs = _upgrade_metadef_properties_get_duplicates(migrate_engine)
    for row in dbrecs:
        s = (metadef_properties.update()
             .where(metadef_properties.c.id > row['id'])
             .where(metadef_properties.c.namespace_id == row['namespace_id'])
             .where(metadef_properties.c.name == str(row['name']))
             )
        if migrate_engine.name == 'sqlite':
            s = (s.values(name=(row['name'] + '-DUPL-' +
                                type_coerce(metadef_properties.c.id, String)))
                 )
        else:
            s = s.values(name=func.concat(row['name'], '-DUPL-',
                                          metadef_properties.c.id))
        s.execute()

    # Fix duplicate metadef_tags
    dbrecs = _upgrade_metadef_tags_get_duplicates(migrate_engine)
    for row in dbrecs:
        s = (metadef_tags.update()
             .where(metadef_tags.c.id > row['id'])
             .where(metadef_tags.c.namespace_id == row['namespace_id'])
             .where(metadef_tags.c.name == str(row['name']))
             )
        if migrate_engine.name == 'sqlite':
            s = (s.values(name=(row['name'] + '-DUPL-' +
                                type_coerce(metadef_tags.c.id, String)))
                 )
        else:
            s = s.values(name=func.concat(row['name'], '-DUPL-',
                                          metadef_tags.c.id))
        s.execute()

    # Fix duplicate metadef_resource_types
    dbrecs = _upgrade_metadef_resource_types_get_duplicates(migrate_engine)
    for row in dbrecs:
        s = (metadef_resource_types.update()
             .where(metadef_resource_types.c.id > row['id'])
             .where(metadef_resource_types.c.name == str(row['name']))
             )
        if migrate_engine.name == 'sqlite':
            s = (s.values(name=(row['name'] + '-DUPL-' +
                                type_coerce(metadef_resource_types.c.id,
                                            String)))
                 )
        else:
            s = s.values(name=func.concat(row['name'], '-DUPL-',
                                          metadef_resource_types.c.id))
        s.execute()
Exemple #57
0
 def _col_after_parent_attach(col, table):
     e = CheckConstraint(type_coerce(col, type_).in_(x.value for x in type_.enum if x not in type_.exclude_values),
                         'valid_enum_{}'.format(col.name))
     e.info['alembic_dont_render'] = True
     assert e.table is table
Exemple #58
0
 def isnot(self, other):
     if other is None:
         return types.Boolean.Comparator.isnot(self, other)
     if isinstance(other, bool):
         other = type_coerce(other, NPBoolean)
     return types.Boolean.Comparator.__ne__(self, other)