Ejemplo n.º 1
0
def _ensure_view(conn, fields, name, replace_existing, where_expression):
    """
    Ensure a view exists for the given fields
    """
    # Create a view of search fields (for debugging convenience).
    # 'dv_' prefix: dynamic view. To distinguish from views that are created as part of the schema itself.
    view_name = tables.schema_qualified('dv_{}_dataset'.format(name))
    exists = _pg_exists(conn, view_name)
    # This currently leaves a window of time without the views: it's primarily intended for development.
    if exists and replace_existing:
        _LOG.debug('Dropping view: %s (replace=%r)', view_name,
                   replace_existing)
        conn.execute('drop view %s' % view_name)
        exists = False
    if not exists:
        _LOG.debug('Creating view: %s', view_name)
        conn.execute(
            tables.CreateView(
                view_name,
                select([
                    field.alchemy_expression.label(field.name)
                    for field in fields.values()
                ]).select_from(
                    tables.DATASET.join(tables.DATASET_TYPE).join(
                        tables.METADATA_TYPE)).where(where_expression)))
    else:
        _LOG.debug('View exists: %s (replace=%r)', view_name, replace_existing)
    legacy_name = tables.schema_qualified('{}_dataset'.format(name))
    if _pg_exists(conn, legacy_name):
        _LOG.debug('Dropping legacy view: %s', legacy_name)
        conn.execute('drop view %s' % legacy_name)
Ejemplo n.º 2
0
def _check_field_index(conn,
                       fields,
                       name_prefix,
                       filter_expression,
                       should_exist=True,
                       concurrently=False,
                       replace_existing=False,
                       index_type=None):
    """
    Check the status of a given index: add or remove it as needed
    """
    if index_type is None:
        if len(fields) > 1:
            raise ValueError('Must specify index type for composite indexes.')
        index_type = fields[0].postgres_index_type

    field_name = '_'.join([f.name.lower() for f in fields])
    # Our normal indexes start with "ix_", dynamic indexes with "dix_"
    index_name = 'dix_{prefix}_{field_name}'.format(prefix=name_prefix.lower(),
                                                    field_name=field_name)
    # Previous naming scheme
    legacy_name = 'dix_field_{prefix}_dataset_{field_name}'.format(
        prefix=name_prefix.lower(),
        field_name=field_name,
    )
    indexed_expressions = [f.alchemy_expression for f in fields]
    index = Index(
        index_name,
        *indexed_expressions,
        postgresql_where=filter_expression,
        postgresql_using=index_type,
        # Don't lock the table (in the future we'll allow indexing new fields...)
        postgresql_concurrently=concurrently)
    exists = _pg_exists(conn, tables.schema_qualified(index_name))
    legacy_exists = _pg_exists(conn, tables.schema_qualified(legacy_name))

    # This currently leaves a window of time without indexes: it's primarily intended for development.
    if replace_existing or (not should_exist):
        if exists:
            _LOG.debug('Dropping index: %s (replace=%r)', index_name,
                       replace_existing)
            index.drop(conn)
            exists = False
        if legacy_exists:
            _LOG.debug('Dropping legacy index: %s (replace=%r)', legacy_name,
                       replace_existing)
            Index(legacy_name, *indexed_expressions).drop(conn)
            legacy_exists = False

    if should_exist:
        if not (exists or legacy_exists):
            _LOG.info('Creating index: %s', index_name)
            index.create(conn)
        else:
            _LOG.debug('Index exists: %s  (replace=%r)', index_name,
                       replace_existing)