Exemplo n.º 1
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    instance_table = Table('instances', meta, autoload=True)

    instance_table.c.datastore_version_id.alter(nullable=True)

    update(
        table=instance_table,
        whereclause="datastore_version_id='%s'" % LEGACY_VERSION_ID,
        values=dict(datastore_version_id=None)
    ).execute()

    datastores_table = Table('datastores',
                             meta,
                             autoload=True)
    datastore_versions_table = Table('datastore_versions',
                                     meta,
                                     autoload=True)

    delete(
        table=datastore_versions_table,
        whereclause="id='%s'" % LEGACY_VERSION_ID
    ).execute()
    delete(
        table=datastores_table,
        whereclause="id='%s'" % LEGACY_DATASTORE_ID
    ).execute()
Exemplo n.º 2
0
def rulesCallback(ip,port,rules):
	serverid = getServerId(ip,port)

	for rule in rules:
		if rule[0] == 'tickrate':
			db.execute(
				update(tbl_server_history)
				.where(
					and_(
						tbl_server_history.c.id==serverid,
						tbl_server_history.c.date==tm,
					)
				)
				.values(
					tickrate=rule[1]
				)
			)
		elif rule[0] == 'ent_count':
			db.execute(
				update(tbl_server_history)
				.where(
					and_(
						tbl_server_history.c.id==serverid,
						tbl_server_history.c.date==tm,
					)
				)
				.values(
					ent_count=rule[1].replace(',','')
				)
			)
Exemplo n.º 3
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    instance_table = Table('instances', meta, autoload=True)

    if has_instances_wo_datastore_version(instance_table):
        instances = find_all_instances_wo_datastore_version(instance_table)
        image_id = find_image("mysql")

        datastores_table = Table('datastores',
                                 meta,
                                 autoload=True)
        datastore_versions_table = Table('datastore_versions',
                                         meta,
                                         autoload=True)

        version_id = create_legacy_version(datastores_table,
                                           datastore_versions_table,
                                           image_id)
        for instance in instances:
            update(
                table=instance_table,
                whereclause="id='%s'" % instance.id,
                values=dict(datastore_version_id=version_id)
            ).execute()

    instance_table.c.datastore_version_id.alter(nullable=False)
Exemplo n.º 4
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    instance_table = Table('instances', meta, autoload=True)

    if has_instances_wo_datastore_version(instance_table):
        instances = find_all_instances_wo_datastore_version(instance_table)
        image_id = find_image("mysql")

        datastores_table = Table('datastores',
                                 meta,
                                 autoload=True)
        datastore_versions_table = Table('datastore_versions',
                                         meta,
                                         autoload=True)

        version_id = create_legacy_version(datastores_table,
                                           datastore_versions_table,
                                           image_id)
        for instance in instances:
            update(
                table=instance_table,
                whereclause="id='%s'" % instance.id,
                values=dict(datastore_version_id=version_id)
            ).execute()

    instance_table.c.datastore_version_id.alter(nullable=False)
Exemplo n.º 5
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    instance_table = Table('instances', meta, autoload=True)

    instance_table.c.datastore_version_id.alter(nullable=True)

    update(
        table=instance_table,
        whereclause="datastore_version_id='%s'" % LEGACY_VERSION_ID,
        values=dict(datastore_version_id=None)
    ).execute()

    datastores_table = Table('datastores',
                             meta,
                             autoload=True)
    datastore_versions_table = Table('datastore_versions',
                                     meta,
                                     autoload=True)

    delete(
        table=datastore_versions_table,
        whereclause="id='%s'" % LEGACY_VERSION_ID
    ).execute()
    delete(
        table=datastores_table,
        whereclause="id='%s'" % LEGACY_DATASTORE_ID
    ).execute()
Exemplo n.º 6
0
 def release(self):
     with self._session() as session:
         query = update(QueuedURL).values({QueuedURL.status: Status.todo.value})\
             .where(QueuedURL.status==Status.in_progress.value)
         session.execute(query)
         query = update(QueuedFile).values({QueuedFile.status: Status.todo.value}) \
             .where(QueuedFile.status==Status.in_progress.value)
         session.execute(query)
Exemplo n.º 7
0
 def release(self):
     with self._session() as session:
         query = update(QueuedURL).values({QueuedURL.status: Status.todo.value})\
             .where(QueuedURL.status==Status.in_progress.value)
         session.execute(query)
         query = update(QueuedFile).values({QueuedFile.status: Status.todo.value}) \
             .where(QueuedFile.status==Status.in_progress.value)
         session.execute(query)
Exemplo n.º 8
0
def _update_nodes(nodemap, instances, migrations):
    """For each migration and matching instance record, update the node columns
    if the referenced host is single-node.

    Skip updates for multi-node hosts.  In that case, there's no way to
    determine which node on a host the record should be associated with.
    """
    q = select([migrations.c.id, migrations.c.source_compute,
               migrations.c.dest_compute, instances.c.uuid, instances.c.host,
               instances.c.node],

               whereclause=and_(migrations.c.source_compute != None,
                                migrations.c.dest_compute != None,
                                instances.c.deleted == False,
                                migrations.c.status != 'reverted',
                                migrations.c.status != 'error'),

               from_obj=migrations.join(instances,
                   migrations.c.instance_uuid == instances.c.uuid)
    )

    result = q.execute()
    for migration_id, src, dest, uuid, instance_host, instance_node in result:

        values = {}

        nodes = nodemap.get(src, [])

        if len(nodes) == 1:
            # the source host is a single-node, safe to update node
            node = nodes[0]
            values['source_node'] = node

            if src == instance_host and node != instance_node:
                update(instances).where(instances.c.uuid == uuid).\
                        values(node=node)

        nodes = nodemap.get(dest, [])
        if len(nodes) == 1:
            # the dest host is a single-node, safe to update node
            node = nodes[0]
            values['dest_node'] = node

            if dest == instance_host and node != instance_node:
                update(instances).where(instances.c.uuid == uuid).\
                        values(node=node)

        if values:
            q = update(migrations,
                   values=values,
                   whereclause=migrations.c.id == migration_id)
            q.execute()
Exemplo n.º 9
0
    def GenCode(self, kwargs):


        getCurrCode = select([CodeListing]).where(CodeListing.name_tbl == kwargs["table"])

        codeTbl = 0
        
        result = self.conn.execute(getCurrCode).fetchone()
        currentCode = 1
        if result != None:
            currentCode = int(result.curCode)+1
            self.conn.execute(update(CodeListing).where(CodeListing.code == int(result.code)).values(curCode=currentCode))
        else:
            if "column" not in kwargs:
                kwargs["column"] = "code"

            description = ""
            headCode = 0
            code = CodeGen().getCodeLstingCode()


            if "description" in kwargs:
                description = kwargs["description"]

            elif "headCode" in kwargs:
                headCode=int(kwargs["headCode"])



            self.conn.execute(insert(CodeListing).values(curCode=currentCode, name_tbl=kwargs["table"], name_col=kwargs["column"],
                                                    description=description, headCode=headCode, code=code))

        return currentCode
Exemplo n.º 10
0
    def populate_files(self):
        with self._session() as session:
            datetime_ago = datetime.datetime.utcnow() - datetime.timedelta(days=3)
            query = session.query(IAItem.id).filter(
                or_(
                    IAItem.refresh_date.is_(None),
                    IAItem.public_date > datetime_ago
            ))

            for row in query:
                identifier = row[0]

                _logger.info('Populating item %s.', identifier)
                files = yield self._api.get_item_files(identifier)

                query = insert(File).prefix_with('OR IGNORE')
                values = []

                for filename, size in files:
                    values.append({
                        'ia_item_id': identifier,
                        'filename': filename,
                        'size': size,
                    })

                session.execute(query, values)

                query = update(IAItem).where(IAItem.id == identifier)
                session.execute(
                    query,
                    {'refresh_date': datetime.datetime.utcnow()}
                )

                session.commit()
Exemplo n.º 11
0
Arquivo: sqlfs.py Projeto: RaHus/sqlfs
    def write(self, path, buf, offset):
        print "*** write", path, buf, offset
        if getDepth(path) >= 3:
            pe = getParts(path)
        else:
            return -errno.ENOSYS

        if getDepth(path) == 4 and (pe[-2] == "data"):
            table = Table(pe[-3], self.meta, schema=pe[-4], autoload=True)
            values = buf.split(",")
            a = pe[-1].split("#_")
            # u = table.update().\
            #                where(table.c[str(a[0])]==bindparam('old'+str(a[0]))).\
            #                values(table.c[str(a[0])]=bindparam('new'+str(a[0])))
            dk = []
            dv = {}
            for k in table.columns.keys():
                dk.append(k)
            i = 0
            for v in values:
                dv[str(dk[i])] = v
                i += 1
            dbg()
            ret = self.engine.execute(update(table, values=dv, whereclause=table.c[str(a[0])] == a[1]))
        elif getDepth(path) == 3 and (pe[-1] == "definition"):
            pass  # table = Table(pe[-2], self.meta, schema=pe[-3], autoload=True)
            # a = str(CreateTable(table).compile(self.engine))
        return len(buf)
Exemplo n.º 12
0
    def populate_daily_stats(self):
        with self._session() as session:
            query = delete(DailyStat)
            session.execute(query)

            query = session.query(IAItem.id, IAItem.public_date)

            for ia_item_id, public_date in query:
                date = public_date.date()
                total_size = 0

                rows = session.query(File.size)\
                    .filter_by(ia_item_id=ia_item_id)\
                    .filter(File.job_id.isnot(None))

                for size, in rows:
                    total_size += size

                session.execute(
                    insert(DailyStat).prefix_with('OR IGNORE'),
                    {'date': date}
                )

                query = update(DailyStat)\
                    .values({'size': DailyStat.size + total_size})\
                    .where(DailyStat.date == date)
                session.execute(query)
Exemplo n.º 13
0
def infoTimeout(info):
	print "Server timed out!", info
	serverKey = "%s:%s" % info
	if serverKey in allTimeouts:
		del allTimeouts[serverKey]

	if not serverKey in numRetries or numRetries[serverKey] > 0:
		numRetries[serverKey] -= 1
		serverlistCallback(info)
	else:
		print "Server retry limit reached:",info
		serverid = getServerId(info[0],info[1])
		db.execute(insert(tbl_server_history).prefix_with('IGNORE'),
			id=serverid,
			date=tm,
			status=0,
		)

		db.execute(
			update(tbl_servers)
			.where ( tbl_servers.c.id==serverid)
			.values(
				last_sample=tm
			)
		)
Exemplo n.º 14
0
def update_experiment(start, end, session):
    ExperimentMeasurement = get_ExperimentMeasurement()
    _sample_id = ExperimentMeasurement.sample_id
    session.execute(
        update(ExperimentMeasurement).where(
            _sample_id == Sample.id and start <= _sample_id <= end).values(
                experiment_id=Sample.experiment))
Exemplo n.º 15
0
    def check_in(self, url, new_status, increment_try_count=True,
                 url_result=None):
        with self._session() as session:
            values = {
                QueuedURL.status: new_status.value
            }

            if url_result:
                values.update(url_result.database_items())

            if increment_try_count:
                values[QueuedURL.try_count] = QueuedURL.try_count + 1

            # TODO: rewrite as a join for clarity
            subquery = select([URLString.id]).where(URLString.url == url)\
                .limit(1)
            query = update(QueuedURL).values(values)\
                .where(QueuedURL.url_string_id == subquery)

            session.execute(query)

            if new_status == Status.done and url_result and url_result.filename:
                query = insert(QueuedFile).prefix_with('OR IGNORE').values({
                    'queued_url_id': subquery
                })
                session.execute(query)
Exemplo n.º 16
0
    def write(self,path,buf,offset):
        print '*** write', path, buf, offset

        if not self.layout(path):
            return -errno.ENOSYS #Error Not Found

        if getDepth(path) >= 2:
            pe = getParts(path)
        else:
            return -errno.ENOSYS

        if getDepth(path) == 4 and ( pe[-2] == 'data' ):   
            table = Table(pe[-3], self.meta, schema=pe[-4], autoload=True)
            if isinstance(buf, int):
                print "buff is int"
                return buf
            
            values = str(buf).split(',')
            a=pe[-1].split('#_')
            dv=dict(zip(table.columns.keys(), values))
            #try:
            ret = self.engine.execute(update(table,values=dv, whereclause=table.c[str(a[0])]==a[1]) )
            #except IntegrityError:
            #    ret = self.engine.execute(insert(table,values=dv) )
        elif getDepth(path) == 3 and ( pe[-1] == 'define' ):
            pass#table = Table(pe[-2], self.meta, schema=pe[-3], autoload=True)
            #a = str(CreateTable(table).compile(self.engine))
        return len(str(buf))
Exemplo n.º 17
0
 def raw_update(self, where, row):
     assert 'writer' == self.role
     row['updated_ts'] = int(time.time())
     stmt = update(self.table). \
         where(where). \
         values(row)
     return self.execute(stmt)
Exemplo n.º 18
0
    def equip(self, item: Item) -> bool:
        equipped = session.execute(
            update(db.Equipment).where(
                db.Equipment.user_id == self.user.id).values(
                    right_hand=item.item_id))

        return equipped
Exemplo n.º 19
0
    def check_in(self,
                 url,
                 new_status,
                 increment_try_count=True,
                 url_result=None):
        with self._session() as session:
            values = {QueuedURL.status: new_status.value}

            if url_result:
                values.update(url_result.database_items())

            if increment_try_count:
                values[QueuedURL.try_count] = QueuedURL.try_count + 1

            # TODO: rewrite as a join for clarity
            subquery = select([URLString.id]).where(URLString.url == url)\
                .limit(1)
            query = update(QueuedURL).values(values)\
                .where(QueuedURL.url_string_id == subquery)

            session.execute(query)

            if new_status == Status.done and url_result and url_result.filename:
                query = insert(QueuedFile).prefix_with('OR IGNORE').values(
                    {'queued_url_id': subquery})
                session.execute(query)
Exemplo n.º 20
0
    def populate_files(self):
        with self._session() as session:
            datetime_ago = datetime.datetime.utcnow() - datetime.timedelta(days=3)
            query = session.query(IAItem.id).filter(
                or_(
                    IAItem.refresh_date.is_(None),
                    IAItem.public_date > datetime_ago
            ))

            for row in query:
                identifier = row[0]

                _logger.info('Populating item %s.', identifier)
                files = yield self._api.get_item_files(identifier)

                query = insert(File).prefix_with('OR IGNORE')
                values = []

                for filename, size in files:
                    values.append({
                        'ia_item_id': identifier,
                        'filename': filename,
                        'size': size,
                    })

                session.execute(query, values)

                query = update(IAItem).where(IAItem.id == identifier)
                session.execute(
                    query,
                    {'refresh_date': datetime.datetime.utcnow()}
                )

                session.commit()
Exemplo n.º 21
0
    def populate_daily_stats(self):
        with self._session() as session:
            query = delete(DailyStat)
            session.execute(query)

            query = session.query(IAItem.id, IAItem.public_date)

            for ia_item_id, public_date in query:
                date = public_date.date()
                total_size = 0

                rows = session.query(File.size)\
                    .filter_by(ia_item_id=ia_item_id)\
                    .filter(File.job_id.isnot(None))

                for size, in rows:
                    total_size += size

                session.execute(
                    insert(DailyStat).prefix_with('OR IGNORE'),
                    {'date': date}
                )

                query = update(DailyStat)\
                    .values({'size': DailyStat.size + total_size})\
                    .where(DailyStat.date == date)
                session.execute(query)
Exemplo n.º 22
0
    def forcibly_preserve_special_notes_for_device(cls, req: CamcopsRequest,
                                                   device_id: int) -> None:
        """
        Force-preserve all special notes for a given device.

        WRITES TO DATABASE.

        For update methods, see also:
        http://docs.sqlalchemy.org/en/latest/orm/persistence_techniques.html
        """
        dbsession = req.dbsession
        new_era = req.now_era_format

        # METHOD 1: use the ORM, object by object
        #
        # noinspection PyProtectedMember
        # notes = dbsession.query(cls)\
        #     .filter(cls._device_id == device_id)\
        #     .filter(cls._era == ERA_NOW)\
        #     .all()
        # for note in notes:
        #     note._era = new_era

        # METHOD 2: use the Core, in bulk
        # You can use update(table)... or table.update()...;
        # http://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.update  # noqa

        # noinspection PyUnresolvedReferences
        dbsession.execute(
            update(cls.__table__)
            .where(cls.device_id == device_id)
            .where(cls.era == ERA_NOW)
            .values(era=new_era)
        )
def upgrade():
    session = sa.orm.Session(bind=op.get_bind())
    my_bd_l3outs = get_bd_l3out_values(session)
    for bd_l3out in my_bd_l3outs:
        bd_update = update(bridge_domains).values(
            {'limit_ip_learn_to_subnets': True}).where(
                bridge_domains.c.aim_id == bd_l3out['bd_aim_id'])
        session.execute(bd_update)
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    modules = Table('modules', meta, autoload=True)
    is_nullable = True if migrate_engine.name == "sqlite" else False
    column = Column(COLUMN_NAME_1, Boolean(), nullable=is_nullable, default=0)
    modules.create_column(column)
    column = Column(COLUMN_NAME_2, Integer(), nullable=is_nullable, default=5)
    modules.create_column(column)
    column = Column(COLUMN_NAME_3, Boolean(), nullable=is_nullable, default=0)
    modules.create_column(column)
    modules.c.contents.alter(Text(length=4294967295))
    # mark all non-visible, auto-apply and all-tenant modules as is_admin
    update(table=modules,
           values=dict(is_admin=1),
           whereclause="visible=0 or auto_apply=1 or tenant_id is null"
           ).execute()
Exemplo n.º 25
0
def update_supplier(db: Session, id: int, supplier_update: schemas.SupplierUpdate):
    properties_to_update = {key: value for key, value in supplier_update.dict().items() if value is not None}
    update_statement = update(models.Supplier) \
                       .where(models.Supplier.SupplierID == id) \
                       .values(**properties_to_update)
    db.execute(update_statement)
    db.commit()
    return get_supplier(db, id)
Exemplo n.º 26
0
def update_player(pid, name, corp_id, runner_id):
    db = get_db()
    plr = metadata.tables["player"]
    with db.begin() as conn:
        conn.execute(
            update(plr).where(plr.c.id == pid).values(p_name=name,
                                                      corp_id=corp_id,
                                                      runner_id=runner_id))
Exemplo n.º 27
0
    def convert_check_in(self, file_id, status):
        with self._session() as session:
            values = {'status': status.value}

            query = update(QueuedFile).values(values) \
                .where(QueuedFile.id == file_id)

            session.execute(query)
Exemplo n.º 28
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    modules = Table('modules', meta, autoload=True)
    is_nullable = True if migrate_engine.name == "sqlite" else False
    column = Column(COLUMN_NAME_1, Boolean(), nullable=is_nullable, default=0)
    modules.create_column(column)
    column = Column(COLUMN_NAME_2, Integer(), nullable=is_nullable, default=5)
    modules.create_column(column)
    column = Column(COLUMN_NAME_3, Boolean(), nullable=is_nullable, default=0)
    modules.create_column(column)
    modules.c.contents.alter(Text(length=4294967295))
    # mark all non-visible, auto-apply and all-tenant modules as is_admin
    update(table=modules,
           values=dict(is_admin=1),
           whereclause="visible=0 or auto_apply=1 or tenant_id is null"
           ).execute()
def cmd():
    """
    Updating a specific part of the JSON using an execute update command
    that creates an SQL command, partly bypassing the models.

    It needs more work - For the moment, I didn't manage to do the updates
    in this way.
    """
    # Importing the engine
    from sqla_json.sqla_management import engine
    from sqlalchemy.orm import sessionmaker
    from sqla_json.models.node import DbNode
    from sqlalchemy.orm.attributes import flag_modified
    from sqla_json import timezone
    # from sqlalchemy.dialects.postgresql import insert
    from sqlalchemy.sql.expression import update

    import json
    import time

    sec = timezone.now().second
    msec = timezone.now().microsecond

    # Creating the needed session
    Session = sessionmaker(bind=engine)
    session = Session()

    print "Finding all the nodes and updating them one by one"
    counter = 0
    start_time = time.time()

    # # The following works
    # session.execute(update(DbNode, values={DbNode.attributes: json.dumps(
    #         ['attr', msec, {'bar': ('baz', 'bulk_update_json_model_sql',
    #                                           sec, 2)}])})
    #                 )

    # And the following too
    def get_json():
        get_json.counter += 1
        return json.dumps(
            ['attr', msec + get_json.counter, {'bar': ('baz', 'bulk_update_json_model_sql',
                                                       sec + get_json.counter, 2)}])
    get_json.counter = 0
    # dbn.extras[2]['bar'][0] = "aaa"
    # session.execute(update(DbNode, values={DbNode.extras[2]['bar'][0]: "aaa"}))
    session.execute(update(DbNode, values={DbNode.extras[2]: "aaa"}))
    # DbNode.update().values(extras='b')
    # .values(DbNode.extras[2] = "aaa")


    session.commit()
    end_time = time.time()

    print("Updated the json fields (attributes and extras) "
          "of {} nodes.".format(counter))

    print "Elapsed time --- {} seconds --- ".format(end_time - start_time)
Exemplo n.º 30
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    ds_table = Table('datastores', meta, autoload=True)
    ds_version_table = Table('datastore_versions', meta, autoload=True)
    ds_version_table.create_column(
        Column('version', String(255), nullable=True))

    ds_versions = select(columns=[text("id"), text("name")],
                         from_obj=ds_version_table).execute()

    # Use 'name' value as init 'version' value
    for version in ds_versions:
        update(table=ds_version_table,
               whereclause=text("id='%s'" % version.id),
               values=dict(version=version.name)).execute()

    # Change unique constraint, need to drop the foreign key first and add back
    # later
    constraint_names = db_utils.get_foreign_key_constraint_names(
        engine=migrate_engine,
        table='datastore_versions',
        columns=['datastore_id'],
        ref_table='datastores',
        ref_columns=['id'])
    db_utils.drop_foreign_key_constraints(
        constraint_names=constraint_names,
        columns=[ds_version_table.c.datastore_id],
        ref_columns=[ds_table.c.id])

    UniqueConstraint('datastore_id',
                     'name',
                     name='ds_versions',
                     table=ds_version_table).drop()
    UniqueConstraint('datastore_id',
                     'name',
                     'version',
                     name='ds_versions',
                     table=ds_version_table).create()

    db_utils.create_foreign_key_constraints(
        constraint_names=constraint_names,
        columns=[ds_version_table.c.datastore_id],
        ref_columns=[ds_table.c.id])
Exemplo n.º 31
0
    def clean_cookies(cls, session, username):
        """ clean password and cookie for username
        :param session: db session
        :param username: Cookie.username
        """

        session.execute(update(Cookie).
                        where(Cookie.username == username).
                        values(password='', cookie='', updated_at=text('NOW()')))
def upgrade():
    session = sa.orm.Session(bind=op.get_bind())
    my_bd_l3outs = get_bd_l3out_values(session)
    for bd_l3out in my_bd_l3outs:
        bd_update = update(bridge_domains).values({
            'limit_ip_learn_to_subnets':
            True
        }).where(bridge_domains.c.aim_id == bd_l3out['bd_aim_id'])
        session.execute(bd_update)
Exemplo n.º 33
0
 def set_voice_category(self, xid: int, value: str) -> str:
     max_len = Channel.voice_category.property.columns[
         0].type.length  # type: ignore
     name = value[:max_len]
     query = (update(Channel).where(Channel.xid == xid).values(
         voice_category=name).execution_options(synchronize_session=False))
     DatabaseSession.execute(query)
     DatabaseSession.commit()
     return name
Exemplo n.º 34
0
    def populate_jobs(self):
        with self._session() as session:
            query = session.query(File.ia_item_id, File.filename, File.size)\
                .filter_by(job_id=None)

            for row in query:
                ia_item_id, filename, size = row

                filename_info = parse_filename(filename)

                if not filename_info:
                    continue

                job_ident = filename_info['ident'] or \
                    '{}{}'.format(filename_info['date'], filename_info['time'])

                query = insert(Job).prefix_with('OR IGNORE')
                value = {
                    'id': job_ident,
                    'domain': filename_info['domain'],
                }
                session.execute(query, [value])

                values = {}

                if filename_info['aborted']:
                    values['aborts'] = Job.aborts + 1

                if filename_info['extension'] == 'warc.gz':
                    values['warcs'] = Job.warcs + 1
                    values['size'] = Job.size + size
                elif filename_info['extension'] == 'json':
                    values['jsons'] = Job.jsons + 1

                if values:
                    query = update(Job).values(values)\
                        .where(Job.id == job_ident)
                    session.execute(query)

                query = update(File)\
                    .values({'job_id': job_ident})\
                    .where(File.ia_item_id == ia_item_id)\
                    .where(File.filename == filename)
                session.execute(query)
Exemplo n.º 35
0
    def populate_jobs(self):
        with self._session() as session:
            query = session.query(File.ia_item_id, File.filename, File.size)\
                .filter_by(job_id=None)

            for row in query:
                ia_item_id, filename, size = row

                filename_info = parse_filename(filename)

                if not filename_info:
                    continue

                job_ident = filename_info['ident'] or \
                    '{}{}'.format(filename_info['date'], filename_info['time'])

                query = insert(Job).prefix_with('OR IGNORE')
                value = {
                    'id': job_ident,
                    'domain': filename_info['domain'],
                }
                session.execute(query, [value])

                values = {}

                if filename_info['aborted']:
                    values['aborts'] = Job.aborts + 1

                if filename_info['extension'] == 'warc.gz':
                    values['warcs'] = Job.warcs + 1
                    values['size'] = Job.size + size
                elif filename_info['extension'] == 'json':
                    values['jsons'] = Job.jsons + 1

                if values:
                    query = update(Job).values(values)\
                        .where(Job.id == job_ident)
                    session.execute(query)

                query = update(File)\
                    .values({'job_id': job_ident})\
                    .where(File.ia_item_id == ia_item_id)\
                    .where(File.filename == filename)
                session.execute(query)
Exemplo n.º 36
0
    def convert_check_in(self, file_id, status):
        with self._session() as session:
            values = {
                'status': status.value
            }

            query = update(QueuedFile).values(values) \
                .where(QueuedFile.id == file_id)

            session.execute(query)
Exemplo n.º 37
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    instance_table = Table('instances', meta, autoload=True)
    datastore_versions_table = Table('datastore_versions',
                                     meta,
                                     autoload=True)

    if has_instances_wo_datastore_version(instance_table):
        instances = find_all_instances_wo_datastore_version(instance_table)
        image_id = find_image("mysql")

        datastores_table = Table('datastores',
                                 meta,
                                 autoload=True)

        version_id = create_legacy_version(datastores_table,
                                           datastore_versions_table,
                                           image_id)
        for instance in instances:
            update(
                table=instance_table,
                whereclause="id='%s'" % instance.id,
                values=dict(datastore_version_id=version_id)
            ).execute()

    constraint_names = db_utils.get_foreign_key_constraint_names(
        engine=migrate_engine,
        table='instances',
        columns=['datastore_version_id'],
        ref_table='datastore_versions',
        ref_columns=['id'])
    db_utils.drop_foreign_key_constraints(
        constraint_names=constraint_names,
        columns=[instance_table.c.datastore_version_id],
        ref_columns=[datastore_versions_table.c.id])

    instance_table.c.datastore_version_id.alter(nullable=False)

    db_utils.create_foreign_key_constraints(
        constraint_names=constraint_names,
        columns=[instance_table.c.datastore_version_id],
        ref_columns=[datastore_versions_table.c.id])
Exemplo n.º 38
0
    def __iter__(self):
        # First, set all the users in the SQL DB to inactive
        set_all_inactive = update(self.table)
        self.connection.execute(set_all_inactive, {'active': '0'})
        transaction.commit()

        for item in self.previous:
            try:
                # Then set the ones still contained in the LDAP to active
                u = update(self.table,
                           self.table.c.get('userid') == item.get('userid'))

                self.connection.execute(u, {'active': '1'})
                yield item

            except OperationalError, e:
                transaction.abort()
                self.logger.warn("SQL operational error: %s" % e)
            except:
Exemplo n.º 39
0
def get_load():

    header = {
        'Authorization': 'Basic ' + b64encode(config.api_credentials).decode()
    }

    url = 'https://www.mysportsfeeds.com/api/feed/pull/\
    mlb/2017-regular/cumulative_player_stats.json?playerstats=AB,H,R,HR,ER'

    print('Getting data...', str(datetime.now()))
    resp = r.get(url=url, headers=header)
    print('Done.')

    data = json.loads(resp.text)['cumulativeplayerstats']

    players = data['playerstatsentry']

    engine = create_engine(config.db_uri)

    Base = declarative_base()

    class player_stats(Base):
        __tablename__ = 'player_stats'
        playerid = Column(Integer, primary_key=True)
        firstname = Column(String)
        lastname = Column(String)
        team = Column(String)
        homeruns = Column(Integer)
        last_updated = Column(DateTime)

    class entries(Base):
        __tablename__ = 'entries'
        name = Column(String, primary_key=True)
        playerid = Column(Integer, primary_key=True)

    session = sessionmaker()

    session.configure(bind=engine)

    s = session()

    print('Loading data...')
    now = str(datetime.now())
    for p in players:
        upd = update(table=player_stats)\
        .where(and_(player_stats.firstname==p['player']['FirstName'],
                    player_stats.lastname==p['player']['LastName'],
                    player_stats.team==p['team']['Name'],))\
        .values(homeruns=p['stats']['Homeruns']['#text'], last_updated=now)
        s.execute(upd)

    s.commit()

    s.close()
    print('Done.')
Exemplo n.º 40
0
def switch_tournament_activity(tid):
    db = get_db()
    tourn = metadata.tables["tournament"]
    with db.begin() as conn:
        t = conn.execute(select(tourn).where(tourn.c.id == tid)).fetchone()
        if t.active:
            setto = False
        else:
            setto = True
        conn.execute(
            update(tourn).where(tourn.c.id == tid).values(active=setto))
Exemplo n.º 41
0
    def __iter__(self):
        # First, set all the users in the SQL DB to inactive
        set_all_inactive = update(self.table)
        self.connection.execute(set_all_inactive, {'active': '0'})
        transaction.commit()

        for item in self.previous:
            try:
                # Then set the ones still contained in the LDAP to active
                u = update(
                    self.table,
                    self.table.c.get('userid') == item.get('userid'))

                self.connection.execute(u, {'active': '1'})
                yield item

            except OperationalError, e:
                transaction.abort()
                self.logger.warn("SQL operational error: %s" % e)
            except:
 def set_signature_for_document_id(self, document_id, systematic_node):
     '''
     Does what the method name says.
     '''
     if systematic_node is None:
         value = None
     else:
         value = "%s" % systematic_node.id
     query = update(self.doc_table).values(
         standort=value).where(self.doc_table.c.laufnr == document_id)
     self.connection.execute(query)
Exemplo n.º 43
0
 def set_motd(self, xid: int, message: Optional[str] = None) -> str:
     if message:
         max_len = Channel.motd.property.columns[
             0].type.length  # type: ignore
         motd = message[:max_len]
     else:
         motd = ""
     query = (update(Channel).where(Channel.xid == xid).values(
         motd=motd).execution_options(synchronize_session=False))
     DatabaseSession.execute(query)
     DatabaseSession.commit()
     return motd
    async def _update_info(self, info: dict):
        self.log.info(f'updating info of symbol {info["name"]}')

        engine, session = await self._create_session()
        async with session() as session:
            await session.execute(
                update(StockInfo).where(
                    StockInfo.symbol == info['symbol']).values(info))

            await session.commit()

        await self._close(session, engine)
Exemplo n.º 45
0
def record_result(mid, corp_score, runner_score):
    db = get_db()
    table_match = metadata.tables["match"]
    with db.begin() as conn:
        match = conn.execute(
            select(table_match).where(table_match.c.id == mid)).fetchone()
        if get_player(match["corp_id"]).is_bye or get_player(
                match["runner_id"]).is_bye:
            return
        conn.execute(
            update(table_match).where(table_match.c.id == mid).values(
                corp_score=corp_score, runner_score=runner_score))
Exemplo n.º 46
0
    def _update_named_records(self, items, item_model, name_model, primary_key, primary_updates, name_updates):
        session = self.dbsession
        id_map = {}

        keyfn = itemgetter(primary_key)
        items.sort(key=keyfn)

        for key, group in groupby(items, keyfn):
            id_map[key] = list(group)

        source = set(id_map.keys())

        pk = getattr(item_model, primary_key)

        existing = set(x[0] for x in session.query(pk).all())

        to_update = existing & source

        if primary_updates:
            kw = {getattr(item_model, x): bindparam(x) for x in primary_updates}
            u = update(item_model.__table__).where(
                getattr(item_model, primary_key) == bindparam('pk')).values(kw)
            upd = (id_map[x][0] for x in to_update)
            cols = [primary_key] + primary_updates
            keyfn = itemgetter(*cols)
            cols = ['pk'] + primary_updates
            session.execute(u, [dict(list(zip(cols, keyfn(x)))) for x in upd])

        if name_model:
            kw = {getattr(name_model, x): bindparam(x) for x in name_updates}
            u = update(name_model.__table__).where(and_(
                getattr(name_model, primary_key) == bindparam('pk'),
                name_model.LangID == bindparam('langid'))).values(kw)

            upd = (y for x in to_update for y in id_map[x])
            cols = [primary_key, 'LangID'] + name_updates
            keyfn = itemgetter(*cols)
            cols = ['pk', 'langid'] + name_updates
            session.execute(u, [dict(list(zip(cols, keyfn(x)))) for x in upd])
Exemplo n.º 47
0
def upgrade():
    bind = op.get_bind()
    if isinstance(bind, MockEngineStrategy.MockConnection):
        log.warning("Using mock connection; skipping step")
        return
    # logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)  # for echo
    # bind._echo = True  # echo on, in a hacky way
    dbsession = orm.Session(bind=bind)

    # How to do this?
    # (1) One approach is to create a "cut-down" Patient class with just two
    # columns (pk_, uuid) and use the ORM. That works fine but is slightly
    # confusing in that we have two Patient classes across the code base.
    # (2) Or, we could use the main Patient class. However, a query on that,
    # like dbsession.query(Patient).filter(Patient.uuid.is_(None)), executes a
    # SELECT for all columns -- which may include columns that don't yet exist
    # in this database revision.
    # (3) Or, we could use SQLAlchemy Core, like this:

    # Some shorthand:
    # noinspection PyUnresolvedReferences
    patient_table = Patient.__table__  # type: Table
    # noinspection PyProtectedMember
    pk_col = patient_table.columns._pk  # type: Column
    uuid_col = patient_table.columns.uuid  # type: Column

    # SELECT patient._pk FROM patient WHERE patient.uuid IS NULL:
    pk_query = select([pk_col]).where(uuid_col.is_(None))
    rows = dbsession.execute(pk_query)
    pks_needing_uuid = [row[0] for row in rows]

    # Determine new UUIDs:
    update_values = [
        {"pk": _pk, "uuid": uuid.uuid4()}  # generates a random UUID
        for _pk in pks_needing_uuid
    ]

    if update_values:
        # UPDATE patient SET uuid=%(uuid)s WHERE patient._pk = %(pk)s:
        update_statement = (
            update(patient_table)
            .where(pk_col == bindparam("pk"))
            .values(uuid=bindparam("uuid"))
        )
        # ... with many parameter pairs:
        # https://docs.sqlalchemy.org/en/14/tutorial/data_update.html
        dbsession.execute(update_statement, update_values)

    # COMMIT:
    dbsession.commit()
Exemplo n.º 48
0
    def update_one(self, url, **kwargs):
        with self._session() as session:
            values = {}

            for key, value in kwargs.items():
                values[getattr(QueuedURL, key)] = value

            # TODO: rewrite as a join for clarity
            subquery = select([URLString.id]).where(URLString.url == url)\
                .limit(1)
            query = update(QueuedURL).values(values)\
                .where(QueuedURL.url_string_id == subquery)

            session.execute(query)
Exemplo n.º 49
0
    def update_one(self, url, **kwargs):
        with self._session() as session:
            values = {}

            for key, value in kwargs.items():
                values[getattr(URL, key)] = value

            # TODO: rewrite as a join for clarity
            subquery = select([URLString.id]).where(URLString.url == url)\
                .limit(1)
            query = update(URL).values(values)\
                .where(URL.url_str_id == subquery)

            session.execute(query)
Exemplo n.º 50
0
    def update(self, table, data, condition=None):

        if isinstance(table, str):
            table = self.get_table(table)

        if condition is None:
            condition = build_pkey_condition(table, data)

        elif isinstance(condition, dict):
            condition = build_condition_from_dict(table, condition)

        data = preprocess_table_data(table, data)
        stmt = update(table).where(condition).values(data[0])
        return self.execute(stmt)
Exemplo n.º 51
0
 def save(self, values, user, row=None):
     if not row:
         row = Slide()
         row.created = datetime.now()
         row.create_uid = user.id
     row.from_dict(values)
     row.updated = datetime.now()
     row.update_uid = user.id
     row.is_aktif = 'is_aktif' in values and values['is_aktif'] and 1 or 0
     row.disabled = 'disabled' in values and values['disabled'] and 1 or 0
     DBSession.add(row)
     DBSession.flush()
     if 'is_aktif' in values and values['is_aktif']:
         stmt = update(Slide).where(Slide.id!=row.id).\
                values(is_aktif=0)
         DBSession.execute(stmt)
         DBSession.flush()
      
     return row
Exemplo n.º 52
0
    def update(self, url, increment_try_count=False, **kwargs):
        assert isinstance(url, str)

        with self._session() as session:
            values = {}
            url_id_map = URLStrDBRecord.get_map(session, [url])
            url_str_id = url_id_map[url]

            for key, value in kwargs.items():
                values[getattr(URLDBRecord, key)] = value

            if increment_try_count:
                values[URLDBRecord.try_count] = URLDBRecord.try_count + 1

            query = update(URLDBRecord)\
                .values(values)\
                .where(URLDBRecord.url_str_id == url_str_id)

            session.execute(query)
Exemplo n.º 53
0
    def check_in(self, url, new_status, increment_try_count=True, **kwargs):
        with self._session() as session:
            values = {
                URL.status: new_status
            }

            for key, value in kwargs.items():
                values[getattr(URL, key)] = value

            if increment_try_count:
                values[URL.try_count] = URL.try_count + 1

            # TODO: rewrite as a join for clarity
            subquery = select([URLString.id]).where(URLString.url == url)\
                .limit(1)
            query = update(URL).values(values)\
                .where(URL.url_str_id == subquery)

            session.execute(query)
def downgrade():
    connection = op.get_bind()
    for table_name in _table_names:
        # first update the adaptive_min_delta algo to adaptive algo
        table = sa.table(table_name,
            sa.Column('id', sa.Integer()),
            sa.Column('pairing_algorithm', EnumType(_NewPairingAlgorithm)))
        stmt = update(table).\
            where(table.c.pairing_algorithm == bindparam('from_algo')).\
            values(pairing_algorithm = bindparam('to_algo'))
        connection.execute(stmt, [{
            'from_algo': _NewPairingAlgorithm.adaptive_min_delta,
            'to_algo': _NewPairingAlgorithm.adaptive}])

        # then modify the enum type
        with op.batch_alter_table(table_name, naming_convention=convention) as batch_op:
            batch_op.alter_column('pairing_algorithm',
                type_=EnumType(_OldPairingAlgorithm),
                existing_type=EnumType(_NewPairingAlgorithm),
                existing_server_default=null(),
                existing_nullable=True)
Exemplo n.º 55
0
    def move_to(self, position):
        db.session.execute('SET CONSTRAINTS task_rank_key DEFERRED')

        query = (
            expression.update(Task)
            .where(Task.rank >= position)
            .where(Task.id != self.id)
            .values(rank = Task.rank + 1)
            .returning(
                Task.id,
                Task.created_at,
                Task.updated_at,
                Task.completed_at,
                Task.rank,
                Task.description,
            )
        )

        results = db.session.execute(query)

        self.rank = position
        db.session.add(self)
        return results
Exemplo n.º 56
0
    def populate_json_files(self):
        with self._session() as session:
            query = session.query(File.ia_item_id, File.filename, File.job_id)\
                .filter(File.filename.endswith('.json'))\
                .filter(File.job_id.isnot(None))

            for identifier, filename, job_id in query:
                json_id = filename.replace('.json', '')

                if session.query(JSONMetadata.id).filter_by(id=json_id).scalar():
                    continue

                response = yield self._api.download_item_file(identifier, filename)

                try:
                    doc = json.loads(response.body.decode('utf-8', 'replace'))
                except ValueError:
                    _logger.exception('JSON error!')
                    continue

                url = doc.get('url')

                query = insert(JSONMetadata)
                values = {
                    'id': json_id,
                    'job_id': job_id,
                    'url': url,
                    'started_by': doc.get('started_by')
                }
                session.execute(query, [values])

                if job_id and url:
                    query = update(Job)\
                        .values({'url': url}).where(Job.id == job_id)
                    session.execute(query)

                session.commit()
Exemplo n.º 57
0
 def release(self):
     with self._session() as session:
         query = update(URL).values({URL.status: Status.todo})\
             .where(URL.status==Status.in_progress)
         session.execute(query)
 def update_instance_vips():
     update(table = instance_vip, whereclause="deleted = 0", 
            values=dict(virtual_instance_id = instance_vip.c.instance_id)).execute()
Exemplo n.º 59
0
def infoCallback(serverinfo):
	serverKey = "%s:%s" % (serverinfo['server_ip'],serverinfo['query_port'])

	if serverKey in allTimeouts:
		allTimeouts[serverKey].cancel()
		del allTimeouts[serverKey]

	if serverKey in numRetries:
		del numRetries[serverKey]

	#print serverinfo

	serverid = getServerId(serverinfo['server_ip'],serverinfo['query_port'])


	serverversion = serverinfo['version']
	if serverversion == '1.0.0.0':
		gd = serverinfo['gamedata'].split('|')
		serverversion = gd[0]

	db.execute(insert(tbl_server_history).prefix_with('IGNORE'),
		id=serverid,
		date=tm,
		map=serverinfo['map'],
		numplayers=serverinfo['numplayers'],
		maxplayers=serverinfo['maxplayers'],
		numbots=serverinfo['numbots'],
		password=serverinfo['password'],
		tickrate=0,
		version=serverversion,
		status=1,
	)

	db.execute(
		update(tbl_servers)
		.where ( tbl_servers.c.id==serverid)
		.values(
			last_sample=tm
		)
	)

	qc.startRulesQuery((serverinfo['server_ip'],serverinfo['query_port']))

	donameinsert = False
	res = db.execute(
			select([tbl_server_name_history],
				and_(
					tbl_server_name_history.c.id==serverid,
					tbl_server_name_history.c.date_end=='0000-00-00 00:00:00'
				)
			)
		)
	if res.rowcount == 0:
		donameinsert = True
	else:
		row = res.first()
		if row['server_name'].lower().strip() != serverinfo['server_name'].lower().strip():
			donameinsert = True
			db.execute(
				update(tbl_server_name_history)
				.where(
					and_(
						tbl_server_name_history.c.id==serverid,
						tbl_server_name_history.c.date_end=='0000-00-00 00:00:00'
					)
				)
				.values(
					date_end=datetime.datetime.now()
				)
			)

	if donameinsert:
		db.execute(insert(tbl_server_name_history),
			id=serverid,
			date_end='0000-00-00 00:00:00',
			date_start=datetime.datetime.now(),
			server_name=serverinfo['server_name'],
		)