def upgrade(migrate_engine):
    print(__doc__)
    metadata.bind = migrate_engine
    HistoryDatasetAssociation_table = Table("history_dataset_association", metadata, autoload=True)
    # Load existing tables
    metadata.reflect()
    # Add 2 indexes to the galaxy_user table
    i = Index('ix_hda_extension', HistoryDatasetAssociation_table.c.extension)
    try:
        i.create()
    except Exception:
        log.exception("Adding index 'ix_hda_extension' to history_dataset_association table failed.")

    # Set the default data in the galaxy_user table, but only for null values
    cmd = "UPDATE history_dataset_association SET extension = 'qual454' WHERE extension = 'qual' and peek like \'>%%\'"
    try:
        migrate_engine.execute(cmd)
    except Exception:
        log.exception("Resetting extension qual to qual454 in history_dataset_association failed.")
    cmd = "UPDATE history_dataset_association SET extension = 'qualsolexa' WHERE extension = 'qual' and peek not like \'>%%\'"
    try:
        migrate_engine.execute(cmd)
    except Exception:
        log.exception("Resetting extension qual to qualsolexa in history_dataset_association failed.")
    # Add 1 index to the history_dataset_association table
    try:
        i.drop()
    except Exception:
        log.exception("Dropping index 'ix_hda_extension' to history_dataset_association table failed.")
def upgrade(migrate_engine):
    display_migration_details()
    metadata.bind = migrate_engine
    db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
    HistoryDatasetAssociation_table = Table( "history_dataset_association", metadata, autoload=True )
    # Load existing tables
    metadata.reflect()
    # Add 2 indexes to the galaxy_user table
    i = Index( 'ix_hda_extension', HistoryDatasetAssociation_table.c.extension )
    try:
        i.create()
    except Exception as e:
        log.debug( "Adding index 'ix_hda_extension' to history_dataset_association table failed: %s" % ( str( e ) ) )

    # Set the default data in the galaxy_user table, but only for null values
    cmd = "UPDATE history_dataset_association SET extension = 'qual454' WHERE extension = 'qual' and peek like \'>%%\'"
    try:
        db_session.execute( cmd )
    except Exception as e:
        log.debug( "Resetting extension qual to qual454 in history_dataset_association failed: %s" % ( str( e ) ) )
    cmd = "UPDATE history_dataset_association SET extension = 'qualsolexa' WHERE extension = 'qual' and peek not like \'>%%\'"
    try:
        db_session.execute( cmd )
    except Exception as e:
        log.debug( "Resetting extension qual to qualsolexa in history_dataset_association failed: %s" % ( str( e ) ) )
    # Add 1 index to the history_dataset_association table
    try:
        i.drop()
    except Exception as e:
        log.debug( "Dropping index 'ix_hda_extension' to history_dataset_association table failed: %s" % ( str( e ) ) )
示例#3
0
    def __init__(self, engine, fieldsmap=None):
        """
        NOTE: `engine` must be using a gevent-safe db connector.
        """
        self.connection = engine.connect()
        metadata.create_all(engine)
        self.fieldsmap = (fieldsmap if fieldsmap is not None
                          else DEFAULT_FIELDSMAP.copy())

        # create explicitly specified indexes
        indexed = self.fieldsmap.pop('_indexed')
        for fieldnames in indexed:
            if isinstance(fieldnames, StringTypes):
                fieldnames = [fieldnames]
            idxname = '_'.join(['ix_messages'] + list(fieldnames))
            col_objs = [getattr(messages.c, fieldname)
                        for fieldname in fieldnames]
            idx = Index(idxname, *col_objs)
            try:
                idx.create(engine)
            except OperationalError:
                pass

        # create indexes for the used `fieldN` fields
        used_fields = set()
        for typemap in self.fieldsmap.values():
            used_fields.update(typemap.values())
        for field in used_fields:
            idxname = 'ix_messages_field%d' % field
            idx = Index(idxname, getattr(messages.c, fieldname))
            try:
                idx.create(engine)
            except OperationalError:
                pass
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    resource_providers = Table('resource_providers', meta, autoload=True)
    columns_to_add = [
            ('root_provider_id',
                Column('root_provider_id', Integer,
                       ForeignKey('resource_providers.id'))),
            ('parent_provider_id',
                Column('parent_provider_id', Integer,
                       ForeignKey('resource_providers.id'))),
    ]
    for col_name, column in columns_to_add:
        if not hasattr(resource_providers.c, col_name):
            resource_providers.create_column(column)

    indexed_columns = set()
    for idx in resource_providers.indexes:
        for c in idx.columns:
            indexed_columns.add(c.name)

    if 'root_provider_id' not in indexed_columns:
        index = Index('resource_providers_root_provider_id_idx',
                resource_providers.c.root_provider_id)
        index.create()
    if 'parent_provider_id' not in indexed_columns:
        index = Index('resource_providers_parent_provider_id_idx',
                resource_providers.c.parent_provider_id)
        index.create()
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table("records", meta, autoload=True)

    name_idx = Index("rec_name_index", records_table.c.name)
    name_idx.create()
示例#6
0
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()
    try:
        if migrate_engine.name == 'mysql':
            # Strip slug index prior to creation so we can do it manually.
            slug_index = None
            for ix in Page_table.indexes:
                if ix.name == 'ix_page_slug':
                    slug_index = ix
            Page_table.indexes.remove(slug_index)
        Page_table.create()
        if migrate_engine.name == 'mysql':
            # Create slug index manually afterward.
            i = Index("ix_page_slug", Page_table.c.slug, mysql_length=200)
            i.create()
    except Exception:
        log.exception("Could not create page table")
    try:
        PageRevision_table.create()
    except Exception:
        log.exception("Could not create page_revision table")

    # Add 1 column to the user table
    User_table = Table("galaxy_user", metadata, autoload=True)
    col = Column('username', String(255), index=True, unique=True, default=False)
    col.create(User_table, index_name='ix_user_username', unique_name='username')
    assert col is User_table.c.username
def loadTable(datapath, datatable, delimiter, dtype, engine, indexCols=[], skipLines=1, chunkSize=100000, **kwargs):
    cnt = 0
    with open(datapath) as fh:
        while cnt < skipLines:
            fh.readline()
            cnt += 1
        cnt = 0
        tmpstr = ''
        for l in fh:
            tmpstr += l
            cnt += 1
            if cnt%chunkSize == 0:
                print "Loading chunk #%i"%(int(cnt/chunkSize))
                dataArr = numpy.genfromtxt(StringIO(tmpstr), dtype=dtype, delimiter=delimiter, **kwargs)
                engine.execute(datatable.insert(), [dict((name, numpy.asscalar(l[name])) for name in l.dtype.names) for l in dataArr])
                tmpstr = ''
        #Clean up the last chunk
        if len(tmpstr) > 0:
            dataArr = numpy.genfromtxt(StringIO(tmpstr), dtype=dtype, delimiter=delimiter, **kwargs)
            try:
                engine.execute(datatable.insert(), [dict((name, numpy.asscalar(l[name])) for name in l.dtype.names) for l in dataArr])
            # If the file only has one line, the result of genfromtxt is a 0-d array, so cannot be iterated
            except TypeError:
                engine.execute(datatable.insert(), [dict((name, numpy.asscalar(dataArr[name])) for name in dataArr.dtype.names),])

    for col in indexCols:
        if hasattr(col, "__iter__"):
            print "Creating index on %s"%(",".join(col))
            colArr = (datatable.c[c] for c in col)
            i = Index('%sidx'%''.join(col), *colArr)
        else:
            print "Creating index on %s"%(col)
            i = Index('%sidx'%col, datatable.c[col])

        i.create(engine)
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    build_requests = Table('build_requests', meta, autoload=True)

    columns_to_add = [
            ('instance_uuid',
                Column('instance_uuid', String(length=36))),
            ('instance',
                Column('instance', Text())),
    ]
    for (col_name, column) in columns_to_add:
        if not hasattr(build_requests.c, col_name):
            build_requests.create_column(column)

    for index in build_requests.indexes:
        if [c.name for c in index.columns] == ['instance_uuid']:
            break
    else:
        index = Index('build_requests_instance_uuid_idx',
                build_requests.c.instance_uuid)
        index.create()

    inspector = reflection.Inspector.from_engine(migrate_engine)
    constrs = inspector.get_unique_constraints('build_requests')
    constr_names = [constr['name'] for constr in constrs]
    if 'uniq_build_requests0instance_uuid' not in constr_names:
        UniqueConstraint('instance_uuid', table=build_requests,
                name='uniq_build_requests0instance_uuid').create()
示例#9
0
def upgrade(migrate_engine):
    metadata.bind = migrate_engine

    print __doc__
    metadata.reflect()

    Visualization_table = Table( "visualization", metadata, autoload=True )
    Visualization_revision_table = Table( "visualization_revision", metadata, autoload=True )

    # Create dbkey columns.
    x = Column( "dbkey", TEXT )
    y = Column( "dbkey", TEXT )
    x.create( Visualization_table )
    y.create( Visualization_revision_table )
    # Manually create indexes for compatability w/ mysql_length.
    xi = Index( "ix_visualization_dbkey", Visualization_table.c.dbkey, mysql_length=200)
    xi.create()
    yi = Index( "ix_visualization_revision_dbkey", Visualization_revision_table.c.dbkey, mysql_length=200)
    yi.create()
    assert x is Visualization_table.c.dbkey
    assert y is Visualization_revision_table.c.dbkey

    all_viz = migrate_engine.execute( "SELECT visualization.id as viz_id, visualization_revision.id as viz_rev_id, visualization_revision.config FROM visualization_revision \
                    LEFT JOIN visualization ON visualization.id=visualization_revision.visualization_id" )
    for viz in all_viz:
        viz_id = viz['viz_id']
        viz_rev_id = viz['viz_rev_id']
        if viz[Visualization_revision_table.c.config]:
            dbkey = loads(viz[Visualization_revision_table.c.config]).get('dbkey', "").replace("'", "\\'")
            migrate_engine.execute("UPDATE visualization_revision SET dbkey='%s' WHERE id=%s" % (dbkey, viz_rev_id))
            migrate_engine.execute("UPDATE visualization SET dbkey='%s' WHERE id=%s" % (dbkey, viz_id))
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table("records", meta, autoload=True)

    index = Index("designate_recordset_id", records_table.c.designate_recordset_id)
    index.create()
示例#11
0
def _mk_index_on(engine, ts_name):
    fc_table = introspect_table(engine, "{}_RegressionIndicator".format(ts_name))

    fast_fc_lookup = Index('idx_fast_ri_lookup', fc_table.c.RegressionID)
    try:
        fast_fc_lookup.create(engine)
    except (sqlalchemy.exc.OperationalError, sqlalchemy.exc.ProgrammingError) as e:
        logger.warning("Skipping index creation on {}, because of {}".format(fc_table.name, e.message))
示例#12
0
文件: pyhis_dao.py 项目: twdb/txhis
 def _create_indexes(self):
     # create an index on timeseries values if it doesn't exist
     try:
         i = Index('ix_timeseries_values_id',
                   model.DataValue.__table__.c.timeseries_id)
         i.create(self.engine)
     except OperationalError:
         pass
def upgrade(migrate_engine):
    """Add an index to make the scheduler lookups of compute_nodes and joined
    compute_node_stats more efficient.
    """
    meta = MetaData(bind=migrate_engine)
    cn_stats = Table(TABLE_NAME, meta, autoload=True)
    idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
    idx.create(migrate_engine)
示例#14
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    schedules = Table('schedules', meta, autoload=True)

    index = Index(INDEX_NAME, schedules.c.next_run)
    index.create(migrate_engine)
示例#15
0
def _mk_index_on(engine, ts_name):
    fc_table = introspect_table(engine, "{}_FieldChangeV2".format(ts_name))

    fast_fc_lookup = Index('idx_fast_fieldchange_lookup', fc_table.c.StartOrderID)
    try:
        fast_fc_lookup.create(engine)
    except (sqlalchemy.exc.OperationalError, sqlalchemy.exc.ProgrammingError) as e:
        logger.warning("Skipping index creation on {}, because of {}".format(fc_table.name, e.message))
示例#16
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    jobs = Table('jobs', meta, autoload=True)

    index = Index(INDEX_NAME, jobs.c.hard_timeout)
    index.create(migrate_engine)
def upgrade(migrate_engine):
    table, index = _get_table_index(migrate_engine)
    if index:
        LOG.info('Skipped adding compute_nodes_uuid_idx because an '
                 'equivalent index already exists.')
        return
    index = Index('compute_nodes_uuid_idx', table.c.uuid, unique=True)
    index.create(migrate_engine)
示例#18
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    images = Table('images', meta, autoload=True)

    index = Index(INDEX_NAME, images.c.owner)
    index.create(migrate_engine)
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()
    i = Index("ix_hda_ta_history_dataset_association_id", HistoryDatasetAssociationTagAssociation_table.c.history_dataset_association_id)
    try:
        i.create()
    except Exception:
        log.exception("Adding index 'ix_hdata_history_dataset_association_id' to table 'history_dataset_association_tag_association' table failed.")
def upgrade(migrate_engine):
    meta, table, index = _get_table_index(migrate_engine)
    if index:
        LOG.info('Skipped adding %s because an equivalent index'
                 ' already exists.', INDEX_NAME)
        return
    columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
    index = Index(INDEX_NAME, *columns)
    index.create(migrate_engine)
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta = MetaData()
    meta.bind = migrate_engine

    metrics = Table("metrics", meta, autoload=True)
    metrics_timestamp_index = Index("metrics_timestamp_index", metrics.columns.timestamp)
    metrics_timestamp_index.create(bind=migrate_engine)
示例#22
0
def build_markov(cursor, cmdchar, ctrlchan, speaker=None, initial_run=False, debug=False):
    """ Builds a markov dictionary."""
    if initial_run:
        cursor.query(Babble_last).delete()
    lastrow = cursor.query(Babble_last).first()
    if not lastrow:
        lastrow = Babble_last(last=0)
        cursor.add(lastrow)
    if debug:
        t = time.time()
    curr, markov = generate_markov(cursor, cmdchar, ctrlchan, speaker, lastrow, initial_run)
    if debug:
        print('Generated markov in %f' % (time.time() - t))
        t = time.time()
    data, count_data = build_rows(cursor, markov, initial_run)
    if debug:
        print('Rows built in %f' % (time.time() - t))
    if initial_run:
        if debug:
            t = time.time()
        cursor.execute('DROP INDEX IF EXISTS ix_babble_key')
        cursor.execute(Babble.__table__.delete())
        cursor.execute(Babble_count.__table__.delete())
        if debug:
            print('Created index in %f' % (time.time() - t))
    if debug:
        t = time.time()
    if initial_run and cursor.bind.dialect.name == 'postgresql':
        # Crazy magic to insert a ton of data really fast, drops runtime in half on large datasets.
        raw_cursor = cursor.connection().connection.cursor()
        prev = 0
        for i in range(20000, len(data), 20000):
            args_str = '\n'.join([raw_cursor.mogrify("INSERT INTO babble (source,target,key,word,freq) VALUES(%s,%s,%s,%s,%s);", x).decode() for x in data[prev:i]])
            raw_cursor.execute(args_str)
            prev = i
        args_str = '\n'.join([raw_cursor.mogrify("INSERT INTO babble (source,target,key,word,freq) VALUES(%s,%s,%s,%s,%s);", x).decode() for x in data[prev:]])
        raw_cursor.execute(args_str)
    else:
        data = [{'source': x[0], 'target': x[1], 'key': x[2], 'word': x[3], 'freq': x[4]} for x in data]
        cursor.bulk_insert_mappings(Babble, data)
    cursor.bulk_insert_mappings(Babble_count, count_data)
    if debug:
        print('Inserted rows in %f' % (time.time() - t))
    if curr is not None:
        lastrow.last = curr
    if initial_run:
        if debug:
            t = time.time()
        key_index = Index('ix_babble_key', Babble.key)
        key_index.create(cursor.connection())
        if debug:
            print('Created index in %f' % (time.time() - t))
    if debug:
        t = time.time()
    cursor.commit()
    if debug:
        print('Commited in %f' % (time.time() - t))
def downgrade(migrate_engine):
    meta = MetaData(bind=migrate_engine)
    load_tables = dict((table_name, Table(table_name, meta, autoload=True))
                       for table_name in INDEXES.keys())
    for table_name, indexes in INDEXES.items():
        table = load_tables[table_name]
        for index_name, column in indexes:
            index = Index(index_name, table.c[column])
            index.create()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    images = Table('images', meta, autoload=True)

    created_index = Index(CREATED_AT_INDEX, images.c.created_at)
    created_index.create(migrate_engine)
    updated_index = Index(UPDATED_AT_INDEX, images.c.updated_at)
    updated_index.create(migrate_engine)
示例#25
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    t = Table('bm_nodes', meta, autoload=True)
    uuid_col = Column('uuid', String(36))
    t.create_column(uuid_col)

    uuid_ux = Index('uuid_ux', t.c.uuid, unique=True)
    uuid_ux.create(migrate_engine)
示例#26
0
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    display_migration_details()
    metadata.reflect()
    i = Index( "ix_hda_ta_history_dataset_association_id", HistoryDatasetAssociationTagAssociation_table.c.history_dataset_association_id )
    try:
        i.create()
    except Exception, e:
        print str(e)
        log.debug( "Adding index 'ix_hdata_history_dataset_association_id' to table 'history_dataset_association_tag_association' table failed: %s" % str( e ) )
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    # Based on migration_get_by_instance_and_status
    # from: nova/db/sqlalchemy/api.py
    t = Table('migrations', meta, autoload=True)
    i = Index('migrations_instance_uuid_and_status_idx', t.c.deleted,
            t.c.instance_uuid, t.c.status)
    i.create(migrate_engine)
示例#28
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    for table in ['block_device_mapping',
                  'consoles',
                  'volumes']:
        t = Table(table, meta, autoload=True)
        i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid)
        i.create(migrate_engine)
示例#29
0
文件: babble.py 项目: tjcsl/cslbot
def build_markov(cursor, cmdchar, ctrlchan, speaker=None, initial_run=False, debug=False):
    """Builds a markov dictionary."""
    if initial_run:
        cursor.query(Babble_last).delete()
    lastrow = cursor.query(Babble_last).first()
    if not lastrow:
        lastrow = Babble_last(last=0)
        cursor.add(lastrow)
    t = time.time()  # for debug
    messages = get_messages(cursor, cmdchar, ctrlchan, speaker, lastrow.last)
    # FIXME: count can be too low if speaker is not None
    curr = messages[-1].id if messages else None
    markov = generate_markov(cursor, 1, messages, initial_run)
    markov2 = generate_markov(cursor, 2, messages, initial_run)
    if debug:
        print('Generated markov in %f' % (time.time() - t))
        t = time.time()
    data, count_data = build_rows(cursor, 1, markov, initial_run)
    data2, count_data2 = build_rows(cursor, 2, markov2, initial_run)
    if debug:
        print('Rows built in %f' % (time.time() - t))
    if initial_run:
        t = time.time()  # for debug
        delete_tables(cursor)
        if debug:
            print('Tables deleted in %f' % (time.time() - t))
    t = time.time()  # for debug
    if initial_run and cursor.bind.dialect.name == 'postgresql':
        postgres_hack(cursor, 1, data)
        postgres_hack(cursor, 2, data2)
    else:
        data = [{'source': x[0], 'target': x[1], 'key': x[2], 'word': x[3], 'freq': x[4]} for x in data]
        cursor.bulk_insert_mappings(Babble, data)
        data2 = [{'source': x[0], 'target': x[1], 'key': x[2], 'word': x[3], 'freq': x[4]} for x in data2]
        cursor.bulk_insert_mappings(Babble2, data2)
    cursor.bulk_insert_mappings(Babble_count, count_data)
    cursor.bulk_insert_mappings(Babble_count, count_data2)
    if debug:
        print('Inserted rows in %f' % (time.time() - t))
    if curr is not None:
        lastrow.last = curr
    if initial_run:
        if debug:
            t = time.time()
        key_index = Index('ix_babble_key', Babble.key)
        key_index2 = Index('ix_babble2_key', Babble2.key)
        key_index.create(cursor.connection())
        key_index2.create(cursor.connection())
        if debug:
            print('Created index in %f' % (time.time() - t))
    t = time.time()  # for debug
    cursor.commit()
    if debug:
        print('Commited in %f' % (time.time() - t))
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    instances = Table('instances', meta, autoload=True)

    # Based on instance_get_all_host_and_node
    # from: nova/db/sqlalchemy/api.py
    index = Index(INDEX_NAME,
                  instances.c.host, instances.c.node, instances.c.deleted)
    index.create(migrate_engine)
            'subdomain': item.get('subdomain', None),
            'domain': item.get('domain', None),
            'tld': item.get('tld', None),
            'query': item.get('query', None),
        } for item_old_name, item in v['list'].items()]
        psycopg2.extras.execute_batch(
            cursor,
            """INSERT INTO warninglists(list_name, list_version, address, hash, subdomain, domain, tld, query) VALUES (%(list_name)s, %(list_version)s, %(address)s, %(hash)s, %(subdomain)s, %(domain)s, %(tld)s, %(query)s)""",
            tbi)
        raw_conn.commit()
    else:
        logging.debug(f"{name}, {version} already in db - SKIPPING")

cursor.close()
conn.close()
raw_conn.close()

try:
    warninglists_address_idx.create(engine)
except:
    logging.error(f"warninglists_address_idx already exists")
try:
    warninglists_hash_idx.create(engine)
except:
    logging.error(f"warninglists_hash_idx already exists")
try:
    warninglists_domain_idx.create(engine)
except:
    logging.error(f"warninglists_domain_idx already exists")
engine.dispose()
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    index = Index('idx_meter_rid_cname', Meter.resource_id,
                  Meter.counter_name)
    index.create(bind=migrate_engine)
示例#33
0
    if args.config:
        success = load_config(args.config)
        if success:
            logger.config_logging(file_name="migrate_database.log",
                                  log_level=CONFIG["log_level"],
                                  dir_name=CONFIG["log_path"],
                                  day_rotate=False,
                                  when="D",
                                  interval=1,
                                  max_size=20,
                                  backup_count=5,
                                  console=True)

            LOG.info("migrate start")
            engine, _ = ApplicationsTable.init_engine_and_session()
            application_name_index.create(bind=engine)

            engine, _ = TasksTable.init_engine_and_session()
            task_name_index.create(bind=engine)
            task_start_at_index.create(bind=engine)
            task_end_at_index.create(bind=engine)
            task_status_index.create(bind=engine)

            engine, _ = WorkflowsTable.init_engine_and_session()
            workflow_name_index.create(bind=engine)

            engine, _ = WorksTable.init_engine_and_session()
            work_name_index.create(bind=engine)
            work_start_at_index.create(bind=engine)
            work_end_at_index.create(bind=engine)
            work_status_index.create(bind=engine)
                  str(e))

    try:
        HistoryDatasetAssociationTagAssociation_table.drop()
        HistoryDatasetAssociationTagAssociation_table.create()
    except OperationalError, e:
        # Handle error that results from and index name that is too long; this occurs
        # in MySQL.
        if str(e).find("CREATE INDEX") != -1:
            # Manually create index.
            i = Index(
                "ix_hda_ta_history_dataset_association_id",
                HistoryDatasetAssociationTagAssociation_table.c.
                history_dataset_association_id)
            try:
                i.create()
            except Exception, e:
                print str(e)
                log.debug(
                    "Adding index 'ix_hda_ta_history_dataset_association_id' to table 'history_dataset_association_tag_association' table failed: %s"
                    % str(e))
    except Exception, e:
        print str(e)
        log.debug(
            "Recreating history_dataset_association_tag_association table failed: %s"
            % str(e))

    # Create page_tag_association table.
    try:
        PageTagAssociation_table.create()
    except Exception, e:
示例#35
0
def _update_col(column, table, data_type, columns):
    """
    Update the column based on the database operation.
    :param column: Base column.
    :type column: BaseColumn
    :param columns: Existing column names in the database for the given table.
    :type columns: list
    :returns: SQLAlchemy column object.
    :rtype: Column
    """
    from stdm.data.configuration.columns import BoundsColumn

    alchemy_column = Column(column.name, data_type, **_base_col_attrs(column))

    idx_name = None
    if column.index:
        idx_name = u'idx_{0}_{1}'.format(column.entity.name, column.name)
    unique_name = None
    if column.unique:
        unique_name = u'unq_{0}_{1}'.format(column.entity.name, column.name)

    if column.action == DbItem.CREATE:
        # Ensure the column does not exist otherwise an exception will be thrown
        if not column.name in columns:
            alchemy_column.create(table=table, unique_name=unique_name)

            # Create check constraints accordingly
            if isinstance(column, BoundsColumn) and \
                    column.can_create_check_constraints():
                # Create check constraint if need be
                chk_const = check_constraint(column, alchemy_column, table)
                if not chk_const is None:
                    chk_const.create()

    elif column.action == DbItem.ALTER:
        # Ensure the column exists before altering
        if column.name in columns:
            col_attrs = _base_col_attrs(column)
            col_attrs['table'] = table
            alchemy_column.alter(**col_attrs)

    elif column.action == DbItem.DROP:
        # Ensure the column exists before dropping
        if column.name in columns:
            _clear_ref_in_entity_relations(column)
            # Use drop cascade command
            drop_cascade_column(column.entity.name, column.name)

    # Ensure column is added to the table
    if alchemy_column.table is None:
        alchemy_column._set_parent(table)
    # add different type of index for columns with index
    if column.index:
        _bind_metadata(metadata)
        inspector = reflection.Inspector.from_engine(metadata.bind)
        indexes_list = inspector.get_indexes(column.entity.name)
        indexes = [i['name'] for i in indexes_list if not i['unique']]
        # get_indexes do not get gist indexes so try/ except needs to be used.
        try:
            if idx_name not in indexes:

                if column.TYPE_INFO == 'GEOMETRY':
                    idx = Index(idx_name,
                                alchemy_column,
                                postgresql_using='gist')
                    idx.create()

                else:
                    idx = Index(idx_name,
                                alchemy_column,
                                postgresql_using='btree')
                    idx.create()
        except Exception:
            pass

    return alchemy_column
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()

    Visualiation_table = Table("visualization", metadata, autoload=True)
    # Create visualization_user_share_association table.
    try:
        VisualizationUserShareAssociation_table.create()
    except Exception:
        log.exception(
            "Creating visualization_user_share_association table failed.")

    # Add columns & create indices for supporting sharing to visualization table.
    deleted_column = Column("deleted", Boolean, default=False, index=True)
    importable_column = Column("importable",
                               Boolean,
                               default=False,
                               index=True)
    slug_column = Column("slug", TEXT)
    published_column = Column("published", Boolean, index=True)

    try:
        # Add column.
        deleted_column.create(Visualiation_table,
                              index_name="ix_visualization_deleted")
        assert deleted_column is Visualiation_table.c.deleted

        # Fill column with default value.
        cmd = "UPDATE visualization SET deleted = %s" % engine_false(
            migrate_engine)
        migrate_engine.execute(cmd)
    except Exception:
        log.exception("Adding deleted column to visualization table failed.")

    try:
        # Add column.
        importable_column.create(Visualiation_table,
                                 index_name='ix_visualization_importable')
        assert importable_column is Visualiation_table.c.importable

        # Fill column with default value.
        cmd = "UPDATE visualization SET importable = %s" % engine_false(
            migrate_engine)
        migrate_engine.execute(cmd)
    except Exception:
        log.exception(
            "Adding importable column to visualization table failed.")

    try:
        slug_column.create(Visualiation_table)
        assert slug_column is Visualiation_table.c.slug
    except Exception:
        log.exception("Adding slug column to visualization table failed.")

    try:
        if migrate_engine.name == 'mysql':
            # Have to create index manually.
            cmd = "CREATE INDEX ix_visualization_slug ON visualization ( slug ( 100 ) )"
            migrate_engine.execute(cmd)
        else:
            i = Index("ix_visualization_slug", Visualiation_table.c.slug)
            i.create()
    except Exception:
        log.exception("Adding index 'ix_visualization_slug' failed.")

    try:
        # Add column.
        published_column.create(Visualiation_table,
                                index_name='ix_visualization_published')
        assert published_column is Visualiation_table.c.published

        # Fill column with default value.
        cmd = "UPDATE visualization SET published = %s" % engine_false(
            migrate_engine)
        migrate_engine.execute(cmd)
    except Exception:
        log.exception("Adding published column to visualization table failed.")

    # Create visualization_tag_association table.
    try:
        VisualizationTagAssociation_table.create()
    except Exception:
        log.exception("Creating visualization_tag_association table failed.")

    # Create visualization_annotation_association table.
    try:
        VisualizationAnnotationAssociation_table.create()
    except Exception:
        log.exception(
            "Creating visualization_annotation_association table failed.")

    # Need to create index for visualization annotation manually to deal with errors.
    try:
        if migrate_engine.name == 'mysql':
            # Have to create index manually.
            cmd = "CREATE INDEX ix_visualization_annotation_association_annotation ON visualization_annotation_association ( annotation ( 100 ) )"
            migrate_engine.execute(cmd)
        else:
            i = Index("ix_visualization_annotation_association_annotation",
                      VisualizationAnnotationAssociation_table.c.annotation)
            i.create()
    except Exception:
        log.exception(
            "Adding index 'ix_visualization_annotation_association_annotation' failed."
        )
示例#37
0
def build_markov(cursor, cmdchar, ctrlchan, speaker=None, initial_run=False, debug=False):
    """ Builds a markov dictionary."""
    if initial_run:
        cursor.query(Babble_last).delete()
    lastrow = cursor.query(Babble_last).first()
    if not lastrow:
        lastrow = Babble_last(last=0)
        cursor.add(lastrow)
    if debug:
        t = time.time()
    curr, markov = generate_markov(cursor, cmdchar, ctrlchan, speaker, lastrow, initial_run)
    if debug:
        print('Generated markov in %f' % (time.time() - t))
        t = time.time()
    data, count_data = build_rows(cursor, markov, initial_run)
    if debug:
        print('Rows built in %f' % (time.time() - t))
    if initial_run:
        if debug:
            t = time.time()
        if cursor.bind.dialect.name == 'mysql':
            cursor.execute('DROP INDEX ix_babble_key ON babble')
        else:
            cursor.execute('DROP INDEX IF EXISTS ix_babble_key')
        cursor.execute(Babble.__table__.delete())
        cursor.execute(Babble_count.__table__.delete())
    if debug:
        t = time.time()
    if initial_run and cursor.bind.dialect.name == 'postgresql':
        # Crazy magic to insert a ton of data really fast, drops runtime in half on large datasets.
        raw_cursor = cursor.connection().connection.cursor()
        prev = 0
        for i in range(20000, len(data), 20000):
            args_str = '\n'.join([raw_cursor.mogrify("INSERT INTO babble (source,target,key,word,freq) VALUES(%s,%s,%s,%s,%s);", x).decode() for x in data[prev:i]])
            # Don't die on empty log table.
            if args_str:
                raw_cursor.execute(args_str)
            prev = i
        args_str = '\n'.join([raw_cursor.mogrify("INSERT INTO babble (source,target,key,word,freq) VALUES(%s,%s,%s,%s,%s);", x).decode() for x in data[prev:]])
        # Don't die on empty log table.
        if args_str:
            raw_cursor.execute(args_str)
    else:
        data = [{'source': x[0], 'target': x[1], 'key': x[2], 'word': x[3], 'freq': x[4]} for x in data]
        cursor.bulk_insert_mappings(Babble, data)
    cursor.bulk_insert_mappings(Babble_count, count_data)
    if debug:
        print('Inserted rows in %f' % (time.time() - t))
    if curr is not None:
        lastrow.last = curr
    if initial_run:
        if debug:
            t = time.time()
        key_index = Index('ix_babble_key', Babble.key)
        key_index.create(cursor.connection())
        if debug:
            print('Created index in %f' % (time.time() - t))
    if debug:
        t = time.time()
    cursor.commit()
    if debug:
        print('Commited in %f' % (time.time() - t))
示例#38
0
def _add_index(migrate_engine, table, index_name, idx_columns):
    index = Index(index_name, *[getattr(table.c, col) for col in idx_columns])
    index.create()
示例#39
0
"""
Run migrations to create models.
"""
import os
import config
import models
from sqlalchemy import create_engine, Index
from sqlalchemy.orm import sessionmaker

# file's directory
PWD = os.path.dirname(os.path.realpath(__file__))

# create the engine
engine = create_engine('sqlite:///{}/{}'.format(PWD, config.sqlite_file),
                       echo=True)

# migrate the db tables
models.Base.metadata.create_all(engine)

# add index for external_id
external_id_index = Index('external_id_index', models.Email.external_id)
external_id_index.create(bind=engine)
示例#40
0
def create_one_to_many_relationship_if_not_exists(parenttable, childtable):
    if parenttable.name.lower()+'_id' in childtable.columns: return
    col=  Column(parenttable.name.lower()+'_id', Integer)#, ForeignKey(parenttable.name+'.id'))
    col.create( childtable)
    ind = Index('ix_'+childtable.name.lower()+'_'+parenttable.name.lower()+'_id', col , unique = False)
    ind.create()
示例#41
0
 def create_source_id_index(session):
     engine = session.get_bind()
     patient_source_id_idx = Index(Encounter.SRC_INDEX, Encounter.source_id)
     patient_source_id_idx.create(engine)
     session.commit()
示例#42
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    instances = Table('fixed_ips', meta, autoload=True)
    index = Index('address', instances.c.address)
    index.create(migrate_engine)