def main(): logging.basicConfig(filename='logs/process_pbp.log',level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') config=json.loads(open('config.json').read()) # connect to database username = config['username'] password = config['password'] host = config['host'] database = config['database'] engine = create_engine('mysql://'+username+':'+password+'@'+host+'/'+database) conn = engine.connect() # get game_ids to process games_to_process = [] missing_games_query = select([distinct(schema.pbp.c.GAME_ID)]).where(schema.pbp.c.HOME_PLAYER1 == None) for game in conn.execute(missing_games_query): games_to_process.append(game.GAME_ID) # add players on floor to database for game_id in games_to_process: try: pbp_query = select([(schema.pbp)]).where(schema.pbp.c.GAME_ID == game_id) results = conn.execute(pbp_query) pbp_data = DataFrame(results.fetchall()) pbp_data.columns = results.keys() game_data = pbp.Lineups(pbp_data) pbp_with_lineups = game_data.get_players_on_floor_for_game() conn.execute(schema.pbp.insert(replace_string=""), pbp_with_lineups) except: logging.error(utils.LogException())
def synonyms(self, tax_id=None, tax_name=None): if not bool(tax_id) ^ bool(tax_name): raise ValueError( 'Exactly one of tax_id and tax_name may be provided.') names = self.names if tax_name: s1 = select([names.c.tax_id], names.c.tax_name == tax_name) res = s1.execute().fetchone() if res: tax_id = res[0] else: msg = '"{}" not found in names.tax_names'.format(tax_name) raise ValueError(msg) s = select([names.c.tax_name, names.c.is_primary], names.c.tax_id == tax_id) output = s.execute().fetchall() if not output: raise ValueError('"{}" not found in names.tax_id'.format(tax_id)) return output
def deleteModel(conn, metricId): """Delete the model by reseting model-specific attributes. This method will also make sure the data integrity is kept by removing any model related data when necessary, either at the model level or at the server/instance level :param conn: SQLAlchemy connection object :type conn: sqlalchemy.engine.Connection :param metricId: Metric uid """ with conn.begin(): # Save server name before deleting model # The server name is used later to delete server annotations if necessary serverRows = conn.execute((select([schema.metric.c.server]) .where(schema.metric.c.uid == metricId))) server = serverRows.scalar() # Defer actual deletion to htmengine deleteModel() implementation, which # will raise ObjectNotFound exception appropriately, terminating execution # of *this* function without prejudice. htmengineDeleteModel(conn, metricId) # When deleting the server's last model, also delete all annotations # associated with the server delete = (schema.annotation .delete() .where((schema.annotation.c.server == server) & ~schema.annotation.c.server.in_( select([schema.metric.c.server]) .where(schema.metric.c.server == server) .where(schema.metric.c.status != 0)))) conn.execute(delete)
def extractData(self): self.logger.info('Connecting to database') engine = create_engine(self.config['database']) meta = MetaData() meta.bind = engine self.job = Table('job', meta, autoload=True) self.job_status = Table('job_status', meta, autoload=True) self.job_status_type_description = Table('job_status_type_description', meta, autoload=True) job=self.job job_status=self.job_status s=self.job_status_type_description.select() self.type_db_values = s.execute().fetchall() self.queue_db_values = self.getJobSummary(job.c.queue) self.user_db_values = self.getJobSummary(job.c.userId, job.c.localUser) self.node_db_values = self.getJobSummary(job.c.workerNode) self.logger.info('Generating job list') node_job_status = self.config['node_job_status'].split(',') maximum = func.max(self.job_status.c.id).label('m') s1 = select([maximum]).group_by(job_status.c.jobId).alias('a') s2 = select([job.c.id, job.c.lrmsAbsLayerJobId, job.c.workerNode, job_status.c.type, job_status.c.time_stamp]).select_from(job.join(job_status).join(s1,job_status.c.id==text('m'))). \ where(and_(job_status.c.type.in_(node_job_status))) self.job_db_values = s2.execute().fetchall() return {}
def primary_from_name(self, tax_name): """ Return tax_id and primary tax_name corresponding to tax_name. """ names = self.names s1 = select([names.c.tax_id, names.c.is_primary], names.c.tax_name == tax_name) log.debug(str(s1)) res = s1.execute().fetchone() if res: tax_id, is_primary = res else: msg = '"{}" not found in names.tax_names'.format(tax_name) raise ValueError(msg) if not is_primary: s2 = select([names.c.tax_name], and_(names.c.tax_id == tax_id, names.c.is_primary == 1)) tax_name = s2.execute().fetchone()[0] return tax_id, tax_name, bool(is_primary)
def test_sql_save_load_extra_fields(self): """ Test if `SqlStore` reads and writes extra columns. """ # extend the db self.conn.execute('alter table `%s` add column extra varchar(256)' % self.store.table_name) # re-build store, as the table list is read upon `__init__` self.store = self._make_store(extra_fields={ sqlalchemy.Column('extra', sqlalchemy.VARCHAR(length=128)): GET.foo.value, }) # if this query does not error out, the column is defined q = sql.select([sqlfunc.count(self.store.t_store.c.extra)]).distinct() results = self.conn.execute(q) rows = results.fetchall() assert_equal(len(rows), 1) # create and save an object obj = SimplePersistableObject('an object') obj.foo = SimplePersistableObject('an attribute') id_ = self.store.save(obj) # check that the value has been saved q = sql.select([self.store.t_store.c.extra]).where(self.store.t_store.c.id == id_) # Oops, apparently the store.save call will close our # connection too. self.conn = self.store._engine.connect() results = self.conn.execute(q) rows = results.fetchall() assert_equal(len(rows), 1) assert_equal(rows[0][0], obj.foo.value)
def test_sql_create_extra_fields(self): """ Test if `SqlStore` creates extra columns. """ # extend the db self._make_store(self.db_url, extra_fields={sqlalchemy.Column('extra', sqlalchemy.VARCHAR(length=128)): (lambda arg: arg.foo.value)}) # if this query does not error out, the column is defined q = sql.select([sqlfunc.count(self.store.t_store.c.extra)]).distinct() results = self.conn.execute(q) # self.c.execute("select distinct count(extra) from %s" % self.store.table_name) rows = results.fetchall() assert_equal(len(rows), 1) # create and save an object obj = SimplePersistableObject('an object') obj.foo = SimplePersistableObject('an attribute') id_ = db.save(obj) # check that the value has been saved q = sql.select([self.store.t_store.c.extra]).where(self.store.t_store.c.id == id_) # self.c.execute("select extra from %s where id=%d" # % (self.store.table_name, id_)) results = self.conn.execute(q) rows = results.fetchall() assert_equal(len(rows), 1) assert_equal(rows[0][0], obj.foo.value)
def test_recursive(self): parts = table('parts', column('part'), column('sub_part'), column('quantity'), ) included_parts = select([ parts.c.sub_part, parts.c.part, parts.c.quantity]).\ where(parts.c.part=='our part').\ cte(recursive=True) incl_alias = included_parts.alias() parts_alias = parts.alias() included_parts = included_parts.union( select([ parts_alias.c.part, parts_alias.c.sub_part, parts_alias.c.quantity]).\ where(parts_alias.c.part==incl_alias.c.sub_part) ) s = select([ included_parts.c.sub_part, func.sum(included_parts.c.quantity).label('total_quantity')]).\ select_from(included_parts.join( parts,included_parts.c.part==parts.c.part)).\ group_by(included_parts.c.sub_part) self.assert_compile(s, "WITH RECURSIVE anon_1(sub_part, part, quantity) " "AS (SELECT parts.sub_part AS sub_part, parts.part " "AS part, parts.quantity AS quantity FROM parts " "WHERE parts.part = :part_1 UNION SELECT parts_1.part " "AS part, parts_1.sub_part AS sub_part, parts_1.quantity " "AS quantity FROM parts AS parts_1, anon_1 AS anon_2 " "WHERE parts_1.part = anon_2.sub_part) " "SELECT anon_1.sub_part, " "sum(anon_1.quantity) AS total_quantity FROM anon_1 " "JOIN parts ON anon_1.part = parts.part " "GROUP BY anon_1.sub_part" ) # quick check that the "WITH RECURSIVE" varies per # dialect self.assert_compile(s, "WITH anon_1(sub_part, part, quantity) " "AS (SELECT parts.sub_part AS sub_part, parts.part " "AS part, parts.quantity AS quantity FROM parts " "WHERE parts.part = :part_1 UNION SELECT parts_1.part " "AS part, parts_1.sub_part AS sub_part, parts_1.quantity " "AS quantity FROM parts AS parts_1, anon_1 AS anon_2 " "WHERE parts_1.part = anon_2.sub_part) " "SELECT anon_1.sub_part, " "sum(anon_1.quantity) AS total_quantity FROM anon_1 " "JOIN parts ON anon_1.part = parts.part " "GROUP BY anon_1.sub_part", dialect=mssql.dialect() )
def create_type(sess, fqname): u''' クラスなり型なりを追加 ''' t = tables.Type.__table__ query = sql.select([t.c.id], t.c.fqname == fqname, t) result = sess.execute(query).fetchall() if result: return result[0][0] name = fqname.split('.')[-1] typ = tables.Type(name=name, fqname=fqname) with sess.begin(): sess.add(typ) query = sql.select([t.c.id], t.c.fqname == fqname, t) result = sess.execute(query).fetchone()[0] return result
def test_recursive_union_no_alias_two(self): """ pg's example: WITH RECURSIVE t(n) AS ( VALUES (1) UNION ALL SELECT n+1 FROM t WHERE n < 100 ) SELECT sum(n) FROM t; """ # I know, this is the PG VALUES keyword, # we're cheating here. also yes we need the SELECT, # sorry PG. t = select([func.values(1).label("n")]).cte("t", recursive=True) t = t.union_all(select([t.c.n + 1]).where(t.c.n < 100)) s = select([func.sum(t.c.n)]) self.assert_compile(s, "WITH RECURSIVE t(n) AS " "(SELECT values(:values_1) AS n " "UNION ALL SELECT t.n + :n_1 AS anon_1 " "FROM t " "WHERE t.n < :n_2) " "SELECT sum(t.n) AS sum_1 FROM t" )
def test_all_aliases(self): orders = table('order', column('order')) s = select([orders.c.order]).cte("regional_sales") r1 = s.alias() r2 = s.alias() s2 = select([r1, r2]).where(r1.c.order > r2.c.order) self.assert_compile( s2, 'WITH regional_sales AS (SELECT "order"."order" ' 'AS "order" FROM "order") ' 'SELECT anon_1."order", anon_2."order" ' 'FROM regional_sales AS anon_1, ' 'regional_sales AS anon_2 WHERE anon_1."order" > anon_2."order"' ) s3 = select([orders]).select_from(orders.join(r1, r1.c.order == orders.c.order)) self.assert_compile( s3, 'WITH regional_sales AS ' '(SELECT "order"."order" AS "order" ' 'FROM "order")' ' SELECT "order"."order" ' 'FROM "order" JOIN regional_sales AS anon_1 ON anon_1."order" = "order"."order"' )
def author(mit_id, conn): """ Returns an author object for insertion into mongo summary collection. The format is as follows: {"_id": {"name": <name>, "mitid": <mitid>}, "type": "author", "size": <num docs>, "downloads": <num downloads>, "countries": [ {"country": <3 ltr code>, "downloads": <num downloads>},... ] "dates": [ {"date": <YYYY-MM-DD>, "downloads": <num>},... ]} """ requests_to_authors = requests.join(documents)\ .join(documents_authors)\ .join(authors) totals = select([ authors.c.mit_id, authors.c.name, select([func.count()]) .select_from(documents_authors.join(authors)) .where(authors.c.mit_id==bindparam('mit_id')) .label('size'), select([func.count()]) .select_from(requests_to_authors) .where(authors.c.mit_id==bindparam('mit_id')) .label('downloads') ])\ .where(authors.c.mit_id==bindparam('mit_id')) countries = select([requests.c.country, func.count().label('downloads')])\ .select_from(requests_to_authors)\ .where(authors.c.mit_id==bindparam('mit_id'))\ .group_by(requests.c.country) dates = select([ func.date_trunc('day', requests.c.datetime).label('date'), func.count().label('downloads')])\ .select_from(requests_to_authors)\ .where(authors.c.mit_id==bindparam('mit_id'))\ .group_by(func.date_trunc('day', requests.c.datetime)) author_obj = {'type': 'author'} res = conn.execute(totals, mit_id=mit_id).first() author_obj['_id'] = {'name': res['name'], 'mitid': res['mit_id']} author_obj['size'] = res['size'] author_obj['downloads'] = res['downloads'] res = conn.execute(countries, mit_id=mit_id) for row in res: author_obj.setdefault('countries', [])\ .append({'country': row['country'], 'downloads': row['downloads']}) res = conn.execute(dates, mit_id=mit_id) for row in res: author_obj.setdefault('dates', [])\ .append({'date': row['date'].strftime('%Y-%m-%d'), 'downloads': row['downloads']}) return author_obj
def grant_permission_for_group(permission_name, group_name): execute( groups_permissions.insert().values( group_id=select([groups.c.group_id]).where(groups.c.group_name == group_name), permission_id=select([permissions.c.permission_id]).where(permissions.c.permission_name == permission_name) ) )
def check_user_bought_product(self, user_uuid, product_uuid): """ Returns Falsy value if user has not bought product with given uuid else returns bought product\'s id Keyword Arguments: user_uuid -- unique user\'s uuid, product_uuid -- unique product\'s uuid """ sel = select([users.c.user_id]).where(users.c.user_uuid == user_uuid) user = self.conn.execute(sel).fetchone() if not user: return None sel = select([bought_products.c.bought_id]).select_from(products.join(bought_products))\ .where(and_(products.c.product_uuid == product_uuid,\ bought_products.c.user_id == user[0] )) try: res = self.conn.execute(sel).scalar() return res except: raise
def create_bought_product(self, qty, user_uuid, product_uuid): """ Create new bought item given product_uuid and user_uuid of product or user with given uuid doesnt exists returns None Returns primary_key that has been inserted Keyword Arguments: qty -- amount of items bought (int), user_uuid -- unique user uuid (str), product_uuid -- unique product uuid (str) """ try: user_id = self.conn.execute(select([users.c.user_id])\ .where(users.c.user_uuid == user_uuid)).scalar() product_id = self.conn.execute(select([products.c.product_id])\ .where(products.c.product_uuid == product_uuid)).scalar() except: raise if product_id and user_id: ins = bought_products.insert()\ .values(quantity = qty, user_id = user_id, product_id = product_id) trans = self.conn.begin() try: res = self.conn.execute(ins) trans.commit() return res.inserted_primary_key[0] except Exception as e: trans.rollback() raise logging.error(sys.exc_info[0]) else: return
def test_restart_simple_run(self): _do_run_simple_test_run(self, self.temp_dir, self.config, end_year=1983) runs_manager = RunManager(self.config) config = _get_run_config(temp_dir=self.temp_dir) runs_manager.update_environment_variables(run_resources=config) run_activity = runs_manager.services_db.get_table("run_activity") s = select([func.max(run_activity.c.run_id)]) run_id = runs_manager.services_db.execute(s).fetchone()[0] s = select([run_activity.c.status], whereclause=run_activity.c.run_id == run_id) status = runs_manager.services_db.execute(s).fetchone()[0] expected = "done" self.assertEqual(status, expected) runs_manager.restart_run(run_id, restart_year=1981, project_name="eugene_gridcell", skip_urbansim=False) s = select([run_activity.c.status], whereclause=run_activity.c.run_id == run_id) status = runs_manager.services_db.execute(s).fetchone()[0] expected = "done" self.assertEqual(status, expected) # Restaring without running urbansim should not re-run that year. # TODO: test that no models are run this time. runs_manager.restart_run(run_id, restart_year=1982, project_name="eugene_gridcell", skip_urbansim=True) s = select([run_activity.c.status], whereclause=run_activity.c.run_id == run_id) status = runs_manager.services_db.execute(s).fetchone()[0] expected = "done" self.assertEqual(status, expected) self.cleanup_test_run()
def testMetricDataTimeStampQueryParams(uid): ''' This test makes MetricDataHandler GET calls with from and to params : _models/<uid>/data?from=<>&to=<> ''' with repository.engineFactory().connect() as conn: firstMetricData = conn.execute( sql.select([schema.metric_data]) .where(schema.metric_data.c.uid == uid) .order_by(sql.expression.asc(schema.metric_data.c.timestamp)) .limit(1)).fetchall() lastMetricData = conn.execute( sql.select([schema.metric_data]) .where(schema.metric_data.c.uid == uid) .order_by(sql.expression.desc(schema.metric_data.c.timestamp)) .limit(1)).fetchall() firstTimeStamp = firstMetricData[0].timestamp lastTimeStamp = lastMetricData[0].timestamp response = self.app.get("/%s/data?from=%s&to=%s" % (uid, firstTimeStamp, lastTimeStamp), headers=self.headers) assertions.assertSuccess(self, response) getAllModelsResult = utils.jsonDecode(response.body) for metricData in getAllModelsResult['data']: self.assertGreaterEqual(datetime.strptime(metricData[0], '%Y-%m-%d %H:%M:%S'), firstTimeStamp) self.assertLessEqual(datetime.strptime(metricData[0], '%Y-%m-%d %H:%M:%S'), lastTimeStamp)
def test_positional_binds_2(self): orders = table('orders', column('order'), ) s = select([orders.c.order, literal("x")]).cte("regional_sales") s = select([s.c.order, literal("y")]) dialect = default.DefaultDialect() dialect.positional = True dialect.paramstyle = 'numeric' s1 = select([orders.c.order]).where(orders.c.order == 'x').\ cte("regional_sales_1") s1a = s1.alias() s2 = select([orders.c.order == 'y', s1a.c.order, orders.c.order, s1.c.order]).\ where(orders.c.order == 'z').\ cte("regional_sales_2") s3 = select([s2]) self.assert_compile( s3, 'WITH regional_sales_1 AS (SELECT orders."order" AS "order" ' 'FROM orders WHERE orders."order" = :1), regional_sales_2 AS ' '(SELECT orders."order" = :2 AS anon_1, ' 'anon_2."order" AS "order", ' 'orders."order" AS "order", ' 'regional_sales_1."order" AS "order" FROM orders, ' 'regional_sales_1 ' 'AS anon_2, regional_sales_1 ' 'WHERE orders."order" = :3) SELECT regional_sales_2.anon_1, ' 'regional_sales_2."order" FROM regional_sales_2', checkpositional=('x', 'y', 'z'), dialect=dialect)
def test_suffixes(self): orders = table('order', column('order')) s = select([orders.c.order]).cte("regional_sales") s = s.suffix_with("pg suffix", dialect='postgresql') s = s.suffix_with('oracle suffix', dialect='oracle') stmt = select([orders]).where(orders.c.order > s.c.order) self.assert_compile( stmt, 'WITH regional_sales AS (SELECT "order"."order" AS "order" ' 'FROM "order") SELECT "order"."order" FROM "order", ' 'regional_sales WHERE "order"."order" > regional_sales."order"' ) self.assert_compile( stmt, 'WITH regional_sales AS (SELECT "order"."order" AS "order" ' 'FROM "order") oracle suffix ' 'SELECT "order"."order" FROM "order", ' 'regional_sales WHERE "order"."order" > regional_sales."order"', dialect='oracle' ) self.assert_compile( stmt, 'WITH regional_sales AS (SELECT "order"."order" AS "order" ' 'FROM "order") pg suffix SELECT "order"."order" FROM "order", ' 'regional_sales WHERE "order"."order" > regional_sales."order"', dialect='postgresql' )
def upgrade(): connection = op.get_bind() tasks_rows = connection.execute( select([ tasks_table.c.id, tasks_table.c.title, tasks_table.c.description ])) tasks_by_id = {} for tasks_row in tasks_rows: tasks_by_id[tasks_row.id] = tasks_row task_group_tasks_rows = connection.execute( select([ task_group_tasks_table.c.id, task_group_tasks_table.c.task_id ])) for task_group_tasks_row in task_group_tasks_rows: task_id = task_group_tasks_row.task_id if task_id: tasks_row = tasks_by_id[task_id] op.execute( task_group_tasks_table.update()\ .values( title=tasks_row.title, description=tasks_row.description )\ .where( task_group_tasks_table.c.id == task_id ))
def get_users(user_id=None): if not user_id: page = int(request.args.get('page', 1)) per_page = int(request.args.get('per_page', 10)) stmt = select([users.c.id, users.c.username, users.c.created_at, users.c.updated_at])\ .limit(per_page).offset((page-1)* per_page) total_stmt = select([users.c.id]) conn = engine.connect() result = conn.execute(stmt) total_result = conn.execute(total_stmt) conn.close() rows = result.fetchall() total = total_result.rowcount else: stmt = select([users.c.id, users.c.username, users.c.created_at, users.c.updated_at])\ .where(users.c.id == int(user_id)) conn = engine.connect() result = conn.execute(stmt) conn.close() rows = result.fetchall() total = result.rowcount l = [dict(r) for r in rows] result_resp = { 'total': total, 'data': l } resp = jsonify(result_resp) resp.status_code = 200 return resp
def get_school_info(connection,school_id=None): if school_id is None: stmt = select([school]) return connection.execute(stmt) else: stmt = select([school]).where(school.c.id == school_id) return connection.execute(stmt)
def _get_trace_id(self, trace): # Get the hash. trace_hash = self.get_hash(trace) # Check the database. stmt = select([traces_table.c.id]).where( traces_table.c.trace_hash == trace_hash ) row = self._execute(stmt).first() if row: return row.id # Attempt to insert a new trace. compressed = zlib.compress(str(trace)) stmt = traces_table.insert().from_select([ traces_table.c.trace_hash, traces_table.c.data, ], select([ literal(trace_hash), literal(compressed, type_=Binary), ]).where( ~exists([traces_table.c.id]).where( traces_table.c.trace_hash == trace_hash ) )) self._execute(stmt) return self._get_trace_id(trace)
def get_tx_addresses(tx=None): in_addresses = [] out_addresses = [] if tx['removed']==True: in_addresses = ALL_VOUT.query.with_entities(ALL_VOUT.address, ALL_VOUT.value, ALL_VOUT.txin_tx_id, ALL_VOUT.txout_tx_hash).filter(ALL_VOUT.txin_tx_id==int(tx['id'])).order_by(ALL_VOUT.in_idx).all() out_addresses = ALL_VOUT.query.with_entities(ALL_VOUT.address, ALL_VOUT.value, ALL_VOUT.txin_tx_id, ALL_VOUT.txin_tx_hash).filter(ALL_VOUT.txout_tx_id==int(tx['id'])).order_by(ALL_VOUT.out_idx).all() return in_addresses , out_addresses s1 = select([STXO.address, STXO.value, STXO.txin_tx_id, STXO.txout_tx_hash, STXO.in_idx]).where(STXO.txin_tx_id == int(tx['id'])) s2 = select([VTXO.address, VTXO.value, VTXO.txin_tx_id, VTXO.txout_tx_hash, VTXO.in_idx]).where(VTXO.txin_tx_id == int(tx['id'])) q = s1.union(s2).alias('in_addresses') in_addresses=db_session.query(q).order_by('in_idx').all() s1 = select([STXO.address, STXO.value, STXO.txin_tx_id, STXO.txout_tx_hash, STXO.out_idx]).where(STXO.txout_tx_id == tx['id']) s2 = select([VTXO.address, VTXO.value, VTXO.txin_tx_id, VTXO.txout_tx_hash, VTXO.out_idx]).where(VTXO.txout_tx_id == tx['id']) q = s1.union(s2).alias('out_addresses') out_addresses=db_session.query(q).order_by('out_idx').all() return in_addresses , out_addresses
def _get_memory_id(self, mem): # Check the local cache. mem_hash = self.get_hash(mem) if mem_hash in self.memories: return self.memories[mem_hash] # Attempt to insert a new memory. # This is the expected case. stmt = memories_table.insert().from_select([ memories_table.c.name_hash, memories_table.c.name, ], select([ literal(mem_hash), literal(mem), ]).where( ~exists([memories_table.c.id]).where( memories_table.c.name_hash == mem_hash ) ) ) self._execute(stmt) # Check the database. stmt = select([memories_table.c.id]).where( memories_table.c.name_hash == mem_hash ) row = self._execute(stmt).first() ident = row.id self.memories[mem_hash] = ident return ident
def start(conf): # connect to db db.engine = engine = engine_from_config(dict(conf.items('sqlalchemy')), prefix='') db.metadata.bind = engine conn = engine.connect() Session = sessionmaker(bind=engine) session = Session() profiles = [] topics = [] for user in session.query(User): for profile in user.profiles: if profile.origin == 5: profiles.append(profile.profile_id) for topic in user.topics: if topic.profile_id in profiles: topics.append(topic.topic_id) for topic_id in topics: print "checking", topic_id s = select([func.count(db.t_message.c.message_id)], and_(db.t_message.c.origin == 5, db.t_message.c.topic_id == topic_id)) (count,) = conn.execute(s).fetchone() if count > 1000: (m_id,) = conn.execute(select([db.t_message.c.message_id], db.t_message.c.topic_id == topic_id).order_by( db.t_message.c.message_id.desc()).offset(1000).limit(1)).fetchone() print "purging", topic_id, count, m_id conn.execute(db.t_message.delete().where(and_(db.t_message.c.message_id < m_id, db.t_message.c.topic_id == topic_id)))
async def create_conversion_batch(entity_name, entity_id, format, user_id): entity_name = entity_name.upper() if entity_name == 'AUTHOR': author = model.Author.__table__ q = select([case([(author.c.first_name == None, author.c.last_name)], else_ = author.c.first_name + ' ' + author.c.last_name)])\ .where(author.c.id == entity_id) elif entity_name == 'SERIES': series = model.Series.__table__ q = select([series.c.title]).where(series.c.id == entity_id) elif entity_name == 'BOOKSHELF': shelf = model.Bookshelf.__table__ q = select([shelf.c.name]).where(shelf.c.id == entity_id) else: raise ValueError('Invalid entity name') format_id = await get_format_id(format) async with engine.acquire() as conn: batch = model.ConversionBatch.__table__ res = await conn.execute(q) name = await res.scalar() name = "Books for %s %s" % (entity_name.lower(), name) res = await conn.execute(batch.insert()\ .values(name=name, for_entity=entity_name, entity_id=entity_id, format_id=format_id, created_by_id = user_id, modified_by_id = user_id, version_id =1 )\ .returning(batch.c.id)) return await res.scalar()
def tags(metadata): conn = metadata.bind.connect() session = DBSession() taggings = metadata.tables["taggings"] tags = metadata.tables["tags"] tag_result = conn.execute(select([tags])) for row in tag_result: tag = Tag(id=row["id"], name=row["name"]) session.add(tag) session.flush() result = conn.execute(select([taggings])) for row in result: # get type tag_type = row["taggable_type"] if tag_type == "Group": Model = Group elif tag_type == "Person": Model = Profile elif tag_type == "Company": Model = Company # get tag id tag = Tag.query.get(row["tag_id"]) obj = Model.query.get(row["taggable_id"]) if obj: obj.tags.append(tag) # get taggable id session.flush()
def create_mapper(tag_tbl, tag_domain_tbl, tag_predicate_tbl, tag_value_tbl, tagging_tbl): "Mapper factory." m = mapper(Tag, tag_tbl, id_attribute='tag_id', slug_expression=lambda cls: as_slug_expression( func.concatenate(cls.domain, ':', cls.predicate, '=', cls.value)), extension=TagMapperExtension(tag_domain_tbl, tag_predicate_tbl, tag_value_tbl), properties= dict(tagged=relationship(Tagged, secondary=tagging_tbl, back_populates='tags'), domain=column_property( select([tag_domain_tbl.c.domain]) \ .where(tag_tbl.c.tag_domain_id == tag_domain_tbl.c.tag_domain_id) ), predicate=column_property( select([tag_predicate_tbl.c.predicate]) \ .where(tag_tbl.c.tag_predicate_id == tag_predicate_tbl.c.tag_predicate_id) ), value=column_property( select([tag_value_tbl.c.value]) \ .where(tag_tbl.c.tag_value_id == tag_value_tbl.c.tag_value_id) ), ) ) return m
def test_delete_from_select(self): table_name = "__test_deletefromselect_table__" uuidstrs = [] for unused in range(10): uuidstrs.append(uuid.uuid4().hex) conn = self.engine.connect() test_table = Table(table_name, self.meta, Column('id', Integer, primary_key=True, nullable=False, autoincrement=True), Column('uuid', String(36), nullable=False)) test_table.create() # Add 10 rows to table for uuidstr in uuidstrs: ins_stmt = test_table.insert().values(uuid=uuidstr) conn.execute(ins_stmt) # Delete 4 rows in one chunk column = test_table.c.id query_delete = sql.select([column], test_table.c.id < 5).order_by(column) delete_statement = utils.DeleteFromSelect(test_table, query_delete, column) result_delete = conn.execute(delete_statement) # Verify we delete 4 rows self.assertEqual(result_delete.rowcount, 4) query_all = sql.select([test_table])\ .where(test_table.c.uuid.in_(uuidstrs)) rows = conn.execute(query_all).fetchall() # Verify we still have 6 rows in table self.assertEqual(len(rows), 6)
def get_role_permissions(role): connection = op.get_bind() role = connection.execute( select([roles_table.c.permissions_json])\ .where(roles_table.c.name == role)).fetchone() return json.loads(role.permissions_json)
def add_owners(engine, metadata): """ Tag every object according to its owner: INSERT INTO tagged_object (tag_id, object_id, object_type) SELECT tag.id AS tag_id, slices.id AS object_id, 'chart' AS object_type FROM slices JOIN tag ON tag.name = CONCAT('owner:', slices.created_by_fk) LEFT OUTER JOIN tagged_object ON tagged_object.tag_id = tag.id AND tagged_object.object_id = slices.id AND tagged_object.object_type = 'chart' WHERE tagged_object.tag_id IS NULL; SELECT tag.id AS tag_id, dashboards.id AS object_id, 'dashboard' AS object_type FROM dashboards JOIN tag ON tag.name = CONCAT('owner:', dashboards.created_by_fk) LEFT OUTER JOIN tagged_object ON tagged_object.tag_id = tag.id AND tagged_object.object_id = dashboards.id AND tagged_object.object_type = 'dashboard' WHERE tagged_object.tag_id IS NULL; SELECT tag.id AS tag_id, saved_query.id AS object_id, 'query' AS object_type FROM saved_query JOIN tag ON tag.name = CONCAT('owner:', saved_query.created_by_fk) LEFT OUTER JOIN tagged_object ON tagged_object.tag_id = tag.id AND tagged_object.object_id = saved_query.id AND tagged_object.object_type = 'query' WHERE tagged_object.tag_id IS NULL; """ tag = metadata.tables["tag"] tagged_object = metadata.tables["tagged_object"] users = metadata.tables["ab_user"] slices = metadata.tables["slices"] dashboards = metadata.tables["dashboards"] saved_query = metadata.tables["saved_query"] columns = ["tag_id", "object_id", "object_type"] # create a custom tag for each user ids = select([users.c.id]) insert = tag.insert() for (id_,) in engine.execute(ids): try: engine.execute(insert, name=f"owner:{id_}", type=TagTypes.owner) except IntegrityError: pass # already exists charts = ( select( [ tag.c.id.label("tag_id"), slices.c.id.label("object_id"), literal(ObjectTypes.chart.name).label("object_type"), ] ) .select_from( join( join( slices, tag, tag.c.name == functions.concat("owner:", slices.c.created_by_fk), ), tagged_object, and_( tagged_object.c.tag_id == tag.c.id, tagged_object.c.object_id == slices.c.id, tagged_object.c.object_type == "chart", ), isouter=True, full=False, ) ) .where(tagged_object.c.tag_id.is_(None)) ) query = tagged_object.insert().from_select(columns, charts) engine.execute(query) dashboards = ( select( [ tag.c.id.label("tag_id"), dashboards.c.id.label("object_id"), literal(ObjectTypes.dashboard.name).label("object_type"), ] ) .select_from( join( join( dashboards, tag, tag.c.name == functions.concat("owner:", dashboards.c.created_by_fk), ), tagged_object, and_( tagged_object.c.tag_id == tag.c.id, tagged_object.c.object_id == dashboards.c.id, tagged_object.c.object_type == "dashboard", ), isouter=True, full=False, ) ) .where(tagged_object.c.tag_id.is_(None)) ) query = tagged_object.insert().from_select(columns, dashboards) engine.execute(query) saved_queries = ( select( [ tag.c.id.label("tag_id"), saved_query.c.id.label("object_id"), literal(ObjectTypes.query.name).label("object_type"), ] ) .select_from( join( join( saved_query, tag, tag.c.name == functions.concat("owner:", saved_query.c.created_by_fk), ), tagged_object, and_( tagged_object.c.tag_id == tag.c.id, tagged_object.c.object_id == saved_query.c.id, tagged_object.c.object_type == "query", ), isouter=True, full=False, ) ) .where(tagged_object.c.tag_id.is_(None)) ) query = tagged_object.insert().from_select(columns, saved_queries) engine.execute(query)
def get_auditor_permissions(): connection = op.get_bind() auditor_role = connection.execute( select([roles_table.c.id, roles_table.c.permissions_json])\ .where(roles_table.c.name == 'Auditor')).fetchone() return json.loads(auditor_role.permissions_json)
def visit_select(self, select, **kwargs): """Look for ``LIMIT`` and OFFSET in a select statement, and if so tries to wrap it in a subquery with ``rownum`` criterion. """ if not getattr(select, '_oracle_visit', None): if not self.dialect.use_ansi: if self.stack and 'from' in self.stack[-1]: existingfroms = self.stack[-1]['from'] else: existingfroms = None froms = select._get_display_froms(existingfroms) whereclause = self._get_nonansi_join_whereclause(froms) if whereclause is not None: select = select.where(whereclause) select._oracle_visit = True if select._limit is not None or select._offset is not None: # See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html # # Generalized form of an Oracle pagination query: # select ... from ( # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from ( # select distinct ... where ... order by ... # ) where ROWNUM <= :limit+:offset # ) where ora_rn > :offset # Outer select and "ROWNUM as ora_rn" can be dropped if limit=0 # TODO: use annotations instead of clone + attr set ? select = select._generate() select._oracle_visit = True # Wrap the middle select and add the hint limitselect = sql.select([c for c in select.c]) if select._limit and self.dialect.optimize_limits: limitselect = limitselect.prefix_with("/*+ FIRST_ROWS(%d) */" % select._limit) limitselect._oracle_visit = True limitselect._is_wrapper = True # If needed, add the limiting clause if select._limit is not None: max_row = select._limit if select._offset is not None: max_row += select._offset if not self.dialect.use_binds_for_limits: max_row = sql.literal_column("%d" % max_row) limitselect.append_whereclause( sql.literal_column("ROWNUM")<=max_row) # If needed, add the ora_rn, and wrap again with offset. if select._offset is None: limitselect.for_update = select.for_update select = limitselect else: limitselect = limitselect.column( sql.literal_column("ROWNUM").label("ora_rn")) limitselect._oracle_visit = True limitselect._is_wrapper = True offsetselect = sql.select( [c for c in limitselect.c if c.key!='ora_rn']) offsetselect._oracle_visit = True offsetselect._is_wrapper = True offset_value = select._offset if not self.dialect.use_binds_for_limits: offset_value = sql.literal_column("%d" % offset_value) offsetselect.append_whereclause( sql.literal_column("ora_rn")>offset_value) offsetselect.for_update = select.for_update select = offsetselect kwargs['iswrapper'] = getattr(select, '_is_wrapper', False) return compiler.SQLCompiler.visit_select(self, select, **kwargs)
from sqlalchemy import create_engine from sqlalchemy import MetaData, Column, Table, PrimaryKeyConstraint from sqlalchemy import String, DateTime, Float from sqlalchemy.sql import select import datetime metadata = MetaData() engine = create_engine('postgresql:///siki', echo=True) conn = engine.connect() pdata = Table('pdata', metadata, Column('id', String(20)), Column('easting', Float), Column('northing', Float), Column('elev', Float), Column('d', DateTime), PrimaryKeyConstraint('id', 'd')) metadata.create_all(engine) s = select([pdata]) result = conn.execute(s) for row in result: print row
def query(self): # Fetch the base query for sample, and filter them on the database bs = block_size.c.block_size subquery = powa_getstatdata_sample("db", bindparam("server")) # Put the where clause inside the subquery subquery = subquery.where(column("datname") == bindparam("database")) query = subquery.alias() c = query.c cols = [ c.srvid, to_epoch(c.ts), (sum(c.calls) / greatest(extract("epoch", c.mesure_interval), 1)).label("calls"), (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"), (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval), 1)).label("load"), total_read(c), total_hit(c) ] if self.has_extension_version(self.path_args[0], 'pg_stat_statements', '1.8'): cols.extend([ (sum(c.plantime) / greatest( extract("epoch", c.mesure_interval), 1)).label("planload"), (sum(c.wal_records) / greatest(extract("epoch", c.mesure_interval), 1)).label("wal_records"), (sum(c.wal_fpi) / greatest(extract("epoch", c.mesure_interval), 1)).label("wal_fpi"), (sum(c.wal_bytes) / greatest( extract("epoch", c.mesure_interval), 1)).label("wal_bytes") ]) from_clause = query if self.has_extension(self.path_args[0], "pg_stat_kcache"): # Add system metrics from pg_stat_kcache, kcache_query = kcache_getstatdata_sample("db") kc = inner_cc(kcache_query) kcache_query = (kcache_query.where( (kc.srvid == bindparam("server")) & (kc.datname == bindparam("database"))).alias()) kc = kcache_query.c def sum_per_sec(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (sum(col) / ts).label(col.name) total_sys_hit = (total_read(c) - sum(kc.reads) / greatest(extract("epoch", c.mesure_interval), 1.) ).label("total_sys_hit") total_disk_read = (sum(kc.reads) / greatest(extract("epoch", c.mesure_interval), 1.)).label("total_disk_read") minflts = sum_per_sec(kc.minflts) majflts = sum_per_sec(kc.majflts) # nswaps = sum_per_sec(kc.nswaps) # msgsnds = sum_per_sec(kc.msgsnds) # msgrcvs = sum_per_sec(kc.msgrcvs) # nsignals = sum_per_sec(kc.nsignals) nvcsws = sum_per_sec(kc.nvcsws) nivcsws = sum_per_sec(kc.nivcsws) cols.extend([ total_sys_hit, total_disk_read, minflts, majflts, # nswaps, msgsnds, msgrcvs, nsignals, nvcsws, nivcsws ]) from_clause = from_clause.join(kcache_query, kcache_query.c.ts == c.ts) return (select(cols).select_from(from_clause).where( c.calls != '0').group_by(c.srvid, c.ts, bs, c.mesure_interval).order_by( c.ts).params(samples=100))
swift = dci_config.get_store('files') engine = dci_config.get_engine(conf).connect() _TABLE = models.FILES # Calculate the total files to sync file_list = os.walk(conf['FILES_UPLOAD_FOLDER']) with tqdm.tqdm(total=sum(1 for _ in file_list)) as pbar: for dirname, dirnames, filenames in os.walk(conf['FILES_UPLOAD_FOLDER']): if not filenames: pbar.update(1) continue for filename in filenames: # Check if file exist in the DB query = sql.select([_TABLE]).where(_TABLE.c.id == filename) result = engine.execute(query) # If not, do not sync, that's an orphan file if result.rowcount == 0: tqdm.tqdm.write("File %s not found, do not sync" % filename) continue # If the file exist, check if it is already present in swift # and then upload it to swift if needed if result.rowcount == 1: tqdm.tqdm.write("File %s found in DB" % filename) top_path = dirname[len(conf['FILES_UPLOAD_FOLDER']):] row = result.fetchone() swift_path = swift.build_file_path(row['team_id'], row['job_id'], filename)
'author': composite(Author, podcasts.c.author_name, podcasts.c.author_email, doc="""An instance of :class:`mediacore.model.authors.Author`. Although not actually a relation, it is implemented as if it were. This was decision was made to make it easier to integrate with :class:`mediacore.model.auth.User` down the road."""), 'media': dynamic_loader(Media, backref='podcast', query_class=MediaQuery, passive_deletes=True, doc=\ """A query pre-filtered to media published under this podcast. Returns :class:`mediacore.model.media.MediaQuery`."""), 'media_count': column_property( sql.select( [sql.func.count(media.c.id)], media.c.podcast_id == podcasts.c.id, ).label('media_count'), deferred=True, doc="The total number of :class:`mediacore.model.media.Media` episodes." ), 'media_count_published': column_property( sql.select( [sql.func.count(media.c.id)], sql.and_( media.c.podcast_id == podcasts.c.id, media.c.reviewed == True, media.c.encoded == True, media.c.publishable == True, media.c.publish_on <= sql.func.current_timestamp(), sql.or_(
def validate_fields(config, request, fields, tables, active_user): """ This function validates/normalizes form fields/command arguments. Arguments: config - A configuration object. requests - is a web request containing POST data. tables - is a list of table names. fields - is a list of structures in the following format: CLOUD_FIELDS = { 'auto_active_group': active_user.active_group, # or None. 'format': { 'cloud_name': 'lowerdash', 'group_name': 'lowerdash', 'cores_slider': 'ignore', 'csrfmiddlewaretoken': 'ignore', 'group': 'ignore', 'ram_slider': 'ignore', }, } Possible format strings are: ['opt1', 'opt2', ...] - A list of valid options. ('table', 'column') - A list of valid options derrived from the named table and column. boolean - A value of True or False will be inserted into the output fields. dboolean - Database boolean values are either 0 or 1; allow and convert true/false/yes/no. float - A floating point value. ignore - Ignore missing mandatory fields or fields for undefined columns. integer - An integer value. lowercase - Make sure the input value is all lowercase (or error). lowerdash - Make sure the input value is all lowercase, nummerics, and dashes but can't start or end with a dash (or error). metadata - Identifies a pair of fields (eg. "xxx' and xxx_name) that contain ar metadata string and a metadata filename. If the filename conforms to pre-defined patterns (eg. ends with ".yaml"), the string will be checked to conform with the associated file type. password - A password value to be checked and hashed password1 - A password value to be verified against password2, checked and hashed. password2 - A password value to be verified against password1, checked and hashed. reject - Reject an otherwise valid field. uppercase - Make sure the input value is all uppercase (or error). POSTed fields in the form "name.1", "name.2", etc. will be treated as array fields, returning the variable "name" as a list of strings. """ from .view_utils import _validate_fields_pw_check from sqlalchemy import Table, MetaData from sqlalchemy.sql import select import cloudscheduler.lib.schema import re # Retrieve relevant (re: tables) schema. all_columns = [] primary_key_columns = [] Tables = {} Columns = {} for table_option in tables: table = table_option.split(',') try: Tables[table[0]] = Table(table[0], MetaData(bind=config.db_engine), autoload=True) except: raise Exception( 'view_utils.validate_fields: "tables" parameter contains an invalid table name "%s".' % table[0]) if len(table) > 1 and table[1] == 'n': continue Columns[table[0]] = [[], []] for column in Tables[table[0]].c: if column.name not in all_columns: all_columns.append(column.name) if column.primary_key: Columns[table[0]][0].append(column.name) if column.name not in primary_key_columns: primary_key_columns.append(column.name) else: Columns[table[0]][1].append(column.name) # Process fields parameter: Formats = {} Mandatory = [] NotEmpty = [] Options = { 'accept_primary_keys_only': False, 'auto_active_group': False, 'auto_active_user': False, 'unnamed_fields_are_bad': False, } for option_set in fields: for option in option_set: if option == 'format': for field in option_set[option]: Formats[field] = option_set[option][field] elif option == 'mandatory': if isinstance(option_set[option], list): Mandatory += option_set[option] else: Mandatory.append(option_set[option]) elif option == 'not_empty': if isinstance(option_set[option], list): NotEmpty += option_set[option] else: NotEmpty.append(option_set[option]) else: Options[option] = option_set[option] # Process input fields. Fields = {} if request.method == 'POST': for field in sorted(request.POST): if Options[ 'accept_primary_keys_only'] and field not in primary_key_columns: if not (field in Formats and Formats[field] == 'ignore'): return 1, 'request contained superfluous parameter "%s".' % field, None, None, None if Options['unnamed_fields_are_bad'] and field not in Formats: return 1, 'request contained a unnamed/bad parameter "%s".' % field, None, None, None field_alias = field value = request.POST[field] if field in Formats: if isinstance(Formats[field], (list, tuple)): if isinstance(Formats[field], tuple): options = [] s = select([ cloudscheduler.lib.schema.__dict__[Formats[field] [0]] ]) for row in config.db_connection.execute(s): if Formats[field][1] in row and ( not row[Formats[field][1]] in options): options.append(row[Formats[field][1]]) else: options = Formats[field] lower_value = value.lower() value = None for opt in options: if lower_value == opt.lower(): value = opt break if not value: return 1, 'value specified for "%s" must be one of the following options: %s.' % ( field, sorted(options)), None, None, None elif Formats[field] == 'dboolean': lower_value = value.lower() if lower_value == 'true' or lower_value == 'yes' or lower_value == '1': value = 1 elif lower_value == 'false' or lower_value == 'no' or lower_value == '0': value = 0 else: return 1, 'boolean value specified for "%s" must be one of the following: true, false, yes, no, 1, or 0.' % field, None, None, None elif Formats[field] == 'float': try: float_value = float(value) except: return 1, 'value specified for "%s" must be a floating point value.' % field, None, None, None elif Formats[field] == 'integer': try: integer = int(value) except: return 1, 'value specified for "%s" must be an integer value.' % field, None, None, None elif Formats[field] == 'lowerdash': if len(request.POST[field]) > 0 and re.match( "^[a-z0-9\-]*$", request.POST[field]) and request.POST[field][ 0] != '-' and request.POST[field][-1] != '-': value = request.POST[field] else: return 1, 'value specified for "%s" must be all lower case, numeric digits, and dashes but cannot start or end with dashes.' % field, None, None, None elif Formats[field] == 'lowercase': value = request.POST[field].lower() if request.POST[field] != value: return 1, 'value specified for "%s" must be all lower case.' % field, None, None, None elif Formats[field] == 'mandatory': if value.strip() == '': return 1, 'value specified for "%s" must not be an empty string.' % field, None, None, None elif Formats[field] == 'metadata': filename = '%s_name' % field if filename in request.POST: # Verify yaml files. if (len(request.POST[filename]) > 4 and request.POST[filename][-4:] == '.yml') or \ (len(request.POST[filename]) > 5 and request.POST[filename][-5:] == '.yaml') or \ (len(request.POST[filename]) > 7 and request.POST[filename][-7:] == '.yml.j2') or \ (len(request.POST[filename]) > 8 and request.POST[filename][-8:] == '.yaml.j2'): import yaml try: temp_data = yaml.load(value) except yaml.scanner.ScannerError as ex: return 1, 'yaml value specified for "%s (%s)" is invalid - scanner error - %s' % ( field, filename, ex), None, None, None except yaml.parser.ParserError as ex: return 1, 'yaml value specified for "%s (%s)" is invalid - parser error - %s' % ( field, filename, ex), None, None, None elif Formats[field] == 'password': rc, value = _validate_fields_pw_check(request.POST[field]) if rc != 0: return 1, value, None, None, None elif Formats[field] == 'password1': field_alias = '%s2' % field[:-1] pw2 = request.POST.get(field_alias) if not pw2: if not request.POST[field]: continue return 1, 'password update received a password but no verify password; both are required.', None, None, None rc, value = _validate_fields_pw_check(request.POST[field], pw2=pw2) if rc != 0: return 1, value, None, None, None field_alias = field[:-1] elif Formats[field] == 'password2': field_alias = '%s1' % field[:-1] if not request.POST.get(field_alias): if not request.POST[field]: continue return 1, 'password update received a verify password but no password; both are required.', None, None, None continue elif Formats[field] == 'reject': return 1, 'request contained a rejected/bad parameter "%s".' % field, None, None, None elif Formats[field] == 'uppercase': value = request.POST[field].upper() if request.POST[field] != value: return 1, 'value specified for "%s" must be all upper case.' % field, None, None, None if field_alias in all_columns: Fields[field_alias] = value else: array_field = field.split('.') if len(array_field) > 1 and (array_field[0] in all_columns or array_field[0] in Formats): if array_field[0] not in Fields: Fields[array_field[0]] = [] Fields[array_field[0]].append(value) else: if field in Formats: Fields[field] = value else: return 1, 'request contained a bad parameter "%s".' % field, None, None, None if Options['auto_active_group'] and 'group_name' not in Fields: Fields['group_name'] = active_user.active_group if Options['auto_active_user'] and 'username' not in Fields: Fields['username'] = active_user # Process booleans fields. for field in Formats: if Formats[field] == 'boolean': if request.POST.get(field): if request.POST[field] == 'invalid-unit-test': Fields[field] = 'invalid-unit-test' else: Fields[field] = True else: Fields[field] = False for field in primary_key_columns + Mandatory: if field not in Fields and (field not in Formats or Formats[field] != 'ignore'): return 1, 'request did not contain mandatory parameter "%s".' % field, None, None, None if NotEmpty: for field in Fields: if field in NotEmpty and Fields[field] == '': return 1, 'parameter "%s" contains an empty string which is specifically disallowed.' % field, None, None, None return 0, None, Fields, Tables, Columns
def get_jobs_status_from_components(user, topic_id, type_id): # List of job meaningfull job status for global overview # # ie. If current job status is running, we should retrieve status # from prior job. valid_status = ['failure', 'product-failure', 'deployment-failure', 'success'] topic_id = v1_utils.verify_existence_and_get(topic_id, _TABLE, get_id=True) v1_utils.verify_team_in_topic(user, topic_id) # Get list of all remotecis that are attached to a topic this type belongs # to fields = [models.REMOTECIS.c.id.label('remoteci_id'), models.REMOTECIS.c.name.label('remoteci_name'), models.TEAMS.c.id.label('team_id'), models.TEAMS.c.name.label('team_name'), models.TOPICS.c.name.label('topic_name'), models.COMPONENTS.c.id.label('component_id'), models.COMPONENTS.c.name.label('component_name'), models.COMPONENTS.c.type.label('component_type'), models.JOBS.c.id.label('job_id'), models.JOBS.c.status.label('job_status'), models.JOBS.c.created_at.label('job_created_at')] query = (sql.select(fields) .select_from( sql.join( models.REMOTECIS, models.JOBS, models.REMOTECIS.c.id == models.JOBS.c.remoteci_id, isouter=True) .join( models.JOIN_JOBS_COMPONENTS, models.JOIN_JOBS_COMPONENTS.c.job_id == models.JOBS.c.id) .join( models.COMPONENTS, models.COMPONENTS.c.id == models.JOIN_JOBS_COMPONENTS.c.component_id) # noqa .join( models.TOPICS, models.TOPICS.c.id == models.COMPONENTS.c.topic_id) .join( models.TEAMS, models.TEAMS.c.id == models.JOBS.c.team_id)) .where( sql.and_( models.REMOTECIS.c.state == 'active', models.TEAMS.c.external == True, # noqa models.JOBS.c.status.in_(valid_status), models.JOBS.c.state != 'archived', models.COMPONENTS.c.type == type_id, models.TOPICS.c.id == topic_id)) .order_by( models.REMOTECIS.c.id, models.JOBS.c.created_at.desc()) .distinct(models.REMOTECIS.c.id)) if not user.is_super_admin(): query.append_whereclause(models.TEAMS.c.id.in_(user.teams)) rcs = flask.g.db_conn.execute(query).fetchall() nb_row = len(rcs) return flask.jsonify({'jobs': rcs, '_meta': {'count': nb_row}})
def toonParams(m_email): class MyWindow(QDialog): def __init__(self, data_list, header, *args): QWidget.__init__( self, *args, ) self.setWindowTitle('Parameters wijzigen') self.setWindowIcon(QIcon('./images/logos/logo.jpg')) self.setWindowFlags(self.windowFlags() | Qt.WindowSystemMenuHint | Qt.WindowMinMaxButtonsHint) self.setFont(QFont('Arial', 10)) grid = QGridLayout() grid.setSpacing(20) table_model = MyTableModel(self, data_list, header) table_view = QTableView() table_view.setModel(table_model) font = QFont("Arial", 10) table_view.setFont(font) table_view.resizeColumnsToContents() table_view.setSelectionBehavior(QTableView.SelectRows) #table_view.clicked.connect(selectRow) table_view.clicked.connect(showSelection) grid.addWidget(table_view, 0, 0, 1, 7) lbl = QLabel() pixmap = QPixmap('./images/logos/verbinding.jpg') lbl.setPixmap(pixmap) grid.addWidget(lbl, 1, 0, 1, 2) logo = QLabel() pixmap = QPixmap('./images/logos/logo.jpg') logo.setPixmap(pixmap) grid.addWidget(logo, 1, 6, 1, 1, Qt.AlignRight) freshBtn = QPushButton('Verversen') freshBtn.clicked.connect(lambda: refresh(m_email, self)) freshBtn.setFont(QFont("Arial", 10)) freshBtn.setFixedWidth(100) freshBtn.setStyleSheet( "color: black; background-color: gainsboro") grid.addWidget(freshBtn, 1, 5, 1, 1, Qt.AlignRight | Qt.AlignBottom) sluitBtn = QPushButton('Sluiten') sluitBtn.clicked.connect(self.close) sluitBtn.setFont(QFont("Arial", 10)) sluitBtn.setFixedWidth(100) sluitBtn.setStyleSheet( "color: black; background-color: gainsboro") grid.addWidget(sluitBtn, 1, 4, 1, 1, Qt.AlignRight | Qt.AlignBottom) grid.addWidget( QLabel('\u00A9 2017 all rights reserved [email protected]'), 1, 2, 1, 1, Qt.AlignBottom) self.setLayout(grid) self.setGeometry(300, 50, 900, 900) self.setLayout(grid) class MyTableModel(QAbstractTableModel): def __init__(self, parent, mylist, header, *args): QAbstractTableModel.__init__(self, parent, *args) self.mylist = mylist self.header = header def rowCount(self, parent): return len(self.mylist) def columnCount(self, parent): return len(self.mylist[0]) def data(self, index, role): veld = self.mylist[index.row()][index.column()] if not index.isValid(): return None elif role == Qt.TextAlignmentRole and (type(veld) == float or type(veld) == int): return Qt.AlignRight | Qt.AlignVCenter elif role != Qt.DisplayRole: return None if type(veld) == float: return '{:12.2f}'.format(veld) else: return veld def headerData(self, col, orientation, role): if orientation == Qt.Horizontal and role == Qt.DisplayRole: return self.header[col] return None header = [ 'ParamID', 'Item', 'Tarief', 'Verrekening', 'Ondergrens', 'Bovengrens', 'Aanpassing', 'Lock', 'Tarieffactor' ] metadata = MetaData() params = Table('params', metadata, Column('paramID', Integer(), primary_key=True), Column('item', String), Column('tarief', Float), Column('verrekening', String), Column('ondergrens', Float), Column('bovengrens', Float), Column('datum', String), Column('lock', Boolean), Column('tarieffactor', Float)) engine = create_engine('postgresql+psycopg2://postgres@localhost/bisystem') con = engine.connect() sel = select([params]).order_by(params.c.paramID.asc()) rp = con.execute(sel) data_list = [] for row in rp: data_list += [(row)] def showSelection(idx): paramnr = idx.data() if idx.column() == 0: engine = create_engine( 'postgresql+psycopg2://postgres@localhost/bisystem') con = engine.connect() selpar = select([params]).where(params.c.paramID == paramnr) rppar = con.execute(selpar).first() class MainWindow(QDialog): def __init__(self): QDialog.__init__(self) grid = QGridLayout() grid.setSpacing(20) self.setWindowTitle("Wijzigen parameters") self.setWindowIcon(QIcon('./images/logos/logo.jpg')) self.setFont(QFont('Arial', 10)) self.Item = QLabel() q1Edit = QLineEdit(rppar[1]) q1Edit.setCursorPosition(0) q1Edit.setFixedWidth(150) q1Edit.setFont(QFont("Arial", 10)) q1Edit.textChanged.connect(self.q1Changed) reg_ex = QRegExp("^.{0,20}$") input_validator = QRegExpValidator(reg_ex, q1Edit) q1Edit.setValidator(input_validator) self.Tarief = QLabel() q2Edit = QLineEdit(str(round(float(rppar[2]), 2))) q2Edit.setFixedWidth(100) q2Edit.setAlignment(Qt.AlignRight) q2Edit.setFont(QFont("Arial", 10)) q2Edit.textChanged.connect(self.q2Changed) reg_ex = QRegExp("^[-+]?[0-9]*\.?[0-9]+$") input_validator = QRegExpValidator(reg_ex, q2Edit) q2Edit.setValidator(input_validator) self.Verrekening = QLabel() q3Edit = QLineEdit(rppar[3]) q3Edit.setFixedWidth(200) q3Edit.setFont(QFont("Arial", 10)) q3Edit.textChanged.connect(self.q3Changed) reg_ex = QRegExp("^.{0,20}$") input_validator = QRegExpValidator(reg_ex, q3Edit) q3Edit.setValidator(input_validator) self.Ondergrens = QLabel() q4Edit = QLineEdit(str(round(float(rppar[4]), 2))) q4Edit.setFixedWidth(100) q4Edit.setFont(QFont("Arial", 10)) q4Edit.setAlignment(Qt.AlignRight) q4Edit.textChanged.connect(self.q4Changed) reg_ex = QRegExp("^[-+]?[0-9]*\.?[0-9]+$") input_validator = QRegExpValidator(reg_ex, q4Edit) q4Edit.setValidator(input_validator) self.Bovengrens = QLabel() q5Edit = QLineEdit(str(round(float(rppar[5]), 2))) q5Edit.setFixedWidth(100) q5Edit.setAlignment(Qt.AlignRight) q5Edit.setFont(QFont("Arial", 10)) q5Edit.textChanged.connect(self.q5Changed) reg_ex = QRegExp("^[-+]?[0-9]*\.?[0-9]+$") input_validator = QRegExpValidator(reg_ex, q5Edit) q5Edit.setValidator(input_validator) self.Tarieffactor = QLabel() q6Edit = QLineEdit(str(round(float(rppar[8]), 2))) q6Edit.setFixedWidth(100) q6Edit.setAlignment(Qt.AlignRight) q6Edit.setFont(QFont("Arial", 10)) q6Edit.textChanged.connect(self.q6Changed) reg_ex = QRegExp("^[-+]?[0-9]*\.?[0-9]+$") input_validator = QRegExpValidator(reg_ex, q6Edit) q5Edit.setValidator(input_validator) grid = QGridLayout() grid.setSpacing(20) lbl1 = QLabel('Parameternummer') grid.addWidget(lbl1, 1, 0) lbl2 = QLabel(str(paramnr)) grid.addWidget(lbl2, 1, 1) lbl3 = QLabel('Item') grid.addWidget(lbl3, 2, 0) grid.addWidget(q1Edit, 2, 1, 1, 2) lbl4 = QLabel('Tarief') grid.addWidget(lbl4, 3, 0) grid.addWidget(q2Edit, 3, 1) lbl5 = QLabel('Verrekening') grid.addWidget(lbl5, 4, 0) grid.addWidget(q3Edit, 4, 1, 1, 2) lbl6 = QLabel('Ondergrens') grid.addWidget(lbl6, 5, 0) grid.addWidget(q4Edit, 5, 1) lbl7 = QLabel('Bovengrens') grid.addWidget(lbl7, 6, 0) grid.addWidget(q5Edit, 6, 1) lbl8 = QLabel('Aanpassing') grid.addWidget(lbl8, 7, 0) lbl9 = QLabel(rppar[6]) grid.addWidget(lbl9, 7, 1) lbl10 = QLabel('Lock: ' + str(rppar[7])) grid.addWidget(lbl10, 7, 2) lbl11 = QLabel('Tarieffactor') grid.addWidget(lbl11, 8, 0) grid.addWidget(q6Edit, 8, 1) lbl = QLabel() pixmap = QPixmap('./images/logos/verbinding.jpg') lbl.setPixmap(pixmap) grid.addWidget(lbl, 0, 0, 1, 2) logo = QLabel() pixmap = QPixmap('./images/logos/logo.jpg') logo.setPixmap(pixmap) grid.addWidget(logo, 0, 2, 1, 1, Qt.AlignRight) grid.addWidget( QLabel( '\u00A9 2017 all rights reserved [email protected]' ), 10, 0, 1, 3, Qt.AlignCenter) self.setLayout(grid) self.setGeometry(500, 300, 150, 150) applyBtn = QPushButton('Wijzig') applyBtn.clicked.connect(self.accept) grid.addWidget(applyBtn, 9, 2, 1, 1, Qt.AlignRight) applyBtn.setFont(QFont("Arial", 10)) applyBtn.setFixedWidth(100) applyBtn.setStyleSheet( "color: black; background-color: gainsboro") cancelBtn = QPushButton('Sluiten') cancelBtn.clicked.connect(self.close) grid.addWidget(cancelBtn, 9, 1, 1, 1, Qt.AlignRight) cancelBtn.setFont(QFont("Arial", 10)) cancelBtn.setFixedWidth(100) cancelBtn.setStyleSheet( "color: black; background-color: gainsboro") def q1Changed(self, text): self.Item.setText(text) def q2Changed(self, text): self.Tarief.setText(text) def q3Changed(self, text): self.Verrekening.setText(text) def q4Changed(self, text): self.Ondergrens.setText(text) def q5Changed(self, text): self.Bovengrens.setText(text) def q6Changed(self, text): self.Tarieffactor.setText(text) def returnq1(self): return self.Item.text() def returnq2(self): return self.Tarief.text() def returnq3(self): return self.Verrekening.text() def returnq4(self): return self.Ondergrens.text() def returnq5(self): return self.Bovengrens.text() def returnq6(self): return self.Tarieffactor.text() @staticmethod def getData(parent=None): dialog = MainWindow() dialog.exec_() return [dialog.returnq1(), dialog.returnq2(), dialog.returnq3(),\ dialog.returnq4(), dialog.returnq5(), dialog.returnq6()] mainWin = MainWindow() data = mainWin.getData() flag = 0 for k in range(0, 6): if data[k]: flag = 1 if flag == 0: return if data[0]: mf0 = data[0] else: mf0 = rppar[1] if data[1]: mf1 = float(data[1]) else: mf1 = rppar[2] if data[2]: mf2 = data[2] else: mf2 = rppar[3] if data[3]: mf3 = float(data[3]) else: mf3 = rppar[4] if data[4]: mf4 = float(data[4]) else: mf4 = rppar[5] if data[5]: mf5 = float(data[5]) else: mf5 = rppar[8] dt = str(datetime.datetime.now())[0:10] engine = create_engine( 'postgresql+psycopg2://postgres@localhost/bisystem') con = engine.connect() updpar = update(params).where(params.c.paramID==paramnr).values(item = mf0,\ tarief = mf1, verrekening = mf2, ondergrens = mf3, bovengrens = mf4,\ tarieffactor = mf5, datum = dt, lock = False) con.execute(updpar) con.close() wijzigOK() win = MyWindow(data_list, header) win.exec_() hoofdMenu(m_email)
def _populate_metadata(meta, metadata_path=None, merge=False, prefer_new=False, overwrite=False): if not metadata_path: metadata_path = CONF.metadata_source_path try: if isfile(metadata_path): json_schema_files = [metadata_path] else: json_schema_files = [ f for f in os.listdir(metadata_path) if isfile(join(metadata_path, f)) and f.endswith('.json') ] except OSError as e: LOG.error(utils.exception_to_str(e)) return if not json_schema_files: LOG.error(_LE("Json schema files not found in %s. Aborting."), metadata_path) return namespaces_table = get_metadef_namespaces_table(meta) namespace_rt_table = get_metadef_namespace_resource_types_table(meta) objects_table = get_metadef_objects_table(meta) tags_table = get_metadef_tags_table(meta) properties_table = get_metadef_properties_table(meta) resource_types_table = get_metadef_resource_types_table(meta) for json_schema_file in json_schema_files: try: file = join(metadata_path, json_schema_file) with open(file) as json_file: metadata = json.load(json_file) except Exception as e: LOG.error(utils.exception_to_str(e)) continue values = { 'namespace': metadata.get('namespace', None), 'display_name': metadata.get('display_name', None), 'description': metadata.get('description', None), 'visibility': metadata.get('visibility', None), 'protected': metadata.get('protected', None), 'owner': metadata.get('owner', 'admin') } db_namespace = select([namespaces_table.c.id]).where( namespaces_table.c.namespace == values['namespace']).select_from( namespaces_table).execute().fetchone() if db_namespace and overwrite: LOG.info(_LI("Overwriting namespace %s"), values['namespace']) _clear_namespace_metadata(meta, db_namespace[0]) db_namespace = None if not db_namespace: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(namespaces_table, values) db_namespace = select([ namespaces_table.c.id ]).where(namespaces_table.c.namespace == values['namespace'] ).select_from(namespaces_table).execute().fetchone() elif not merge: LOG.info( _LI("Skipping namespace %s. It already exists in the " "database."), values['namespace']) continue elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(namespaces_table, values, namespaces_table.c.id, db_namespace[0]) namespace_id = db_namespace[0] for resource_type in metadata.get('resource_type_associations', []): rt_id = _get_resource_type_id(meta, resource_type['name']) if not rt_id: val = { 'name': resource_type['name'], 'created_at': timeutils.utcnow(), 'protected': True } _insert_data_to_db(resource_types_table, val) rt_id = _get_resource_type_id(meta, resource_type['name']) elif prefer_new: val = {'updated_at': timeutils.utcnow()} _update_data_in_db(resource_types_table, val, resource_types_table.c.id, rt_id) values = { 'namespace_id': namespace_id, 'resource_type_id': rt_id, 'properties_target': resource_type.get('properties_target', None), 'prefix': resource_type.get('prefix', None) } namespace_resource_type = _get_namespace_resource_type_by_ids( meta, namespace_id, rt_id) if not namespace_resource_type: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(namespace_rt_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_rt_association(namespace_rt_table, values, rt_id, namespace_id) for property, schema in six.iteritems(metadata.get('properties', {})): values = { 'name': property, 'namespace_id': namespace_id, 'json_schema': json.dumps(schema) } property_id = _get_resource_id(properties_table, namespace_id, property) if not property_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(properties_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(properties_table, values, properties_table.c.id, property_id) for object in metadata.get('objects', []): values = { 'name': object['name'], 'description': object.get('description', None), 'namespace_id': namespace_id, 'json_schema': json.dumps(object.get('properties', None)) } object_id = _get_resource_id(objects_table, namespace_id, object['name']) if not object_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(objects_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(objects_table, values, objects_table.c.id, object_id) for tag in metadata.get('tags', []): values = { 'name': tag.get('name'), 'namespace_id': namespace_id, } tag_id = _get_resource_id(tags_table, namespace_id, tag['id']) if not tag_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(tags_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(tags_table, values, tags_table.c.id, tag_id) LOG.info(_LI("File %s loaded to database."), file) LOG.info(_LI("Metadata loading finished"))
db_conn = engine.connect() result = db_conn.execute(insert_stmt) result.inserted_primary_key # print out the primary key it was assigned keyword2 = 'pence' search2 = mc.sentenceCount(keyword2, solr_filter=[ mc.publish_date_query( datetime.date(2016, 9, 1), datetime.date(2016, 10, 1)), 'tags_id_media:1' ]) print search2['count'] # prints the number of sentences found insert_stmt = queries.insert().values(keywords=keyword2, sentcount=search2['count']) str(insert_stmt) db_conn = engine.connect() result = db_conn.execute(insert_stmt) result.inserted_primary_key # print out the primary key it was assigned from sqlalchemy.sql import select select_stmt = select([queries.c.sentcount]) results = db_conn.execute(select_stmt) summation = 0 for row in results: summation = summation + row[0] print summation
def manage_user_groups(config, tables, user, groups, option=None): """ Ensure all the specified groups and only the specified groups are have the specified user as a member. The specified user and groups have all been pre-verified. """ from sqlalchemy.sql import select table = tables['csv2_user_groups'] # if there is only one group, make it a list anyway if groups: if isinstance(groups, str): group_list = groups.split(',') else: group_list = groups else: group_list = [] # Retrieve the list of groups the user already has. db_groups = [] s = select([table]).where(table.c.username == user) user_groups_list = qt(config.db_connection.execute(s)) for row in user_groups_list: db_groups.append(row['group_name']) if not option or option == 'add': # Get the list of groups specified that the user doesn't already have. add_groups = diff_lists(group_list, db_groups) # Add the missing groups. for group in add_groups: rc, msg = config.db_session_execute(table.insert().values( username=user, group_name=group)) if rc != 0: return 1, msg if not option: # Get the list of groups that the user currently has but were not specified. remove_groups = diff_lists(db_groups, group_list) # Remove the extraneous groups. for group in remove_groups: rc, msg = config.db_session_execute( table.delete((table.c.username == user) & (table.c.group_name == group))) if rc != 0: return 1, msg elif option == 'delete': # Get the list of groups that the user currently has and were specified. remove_groups = diff_lists(group_list, db_groups, option='and') # Remove the extraneous groups. for group in remove_groups: rc, msg = config.db_session_execute( table.delete((table.c.username == user) & (table.c.group_name == group))) if rc != 0: return 1, msg return 0, None
def read(self) -> list: try: return self.conn.execute(select([self.table])) except Exception as ex: print("Error while read data from weather database") return []
def showSelection(idx): paramnr = idx.data() if idx.column() == 0: engine = create_engine( 'postgresql+psycopg2://postgres@localhost/bisystem') con = engine.connect() selpar = select([params]).where(params.c.paramID == paramnr) rppar = con.execute(selpar).first() class MainWindow(QDialog): def __init__(self): QDialog.__init__(self) grid = QGridLayout() grid.setSpacing(20) self.setWindowTitle("Wijzigen parameters") self.setWindowIcon(QIcon('./images/logos/logo.jpg')) self.setFont(QFont('Arial', 10)) self.Item = QLabel() q1Edit = QLineEdit(rppar[1]) q1Edit.setCursorPosition(0) q1Edit.setFixedWidth(150) q1Edit.setFont(QFont("Arial", 10)) q1Edit.textChanged.connect(self.q1Changed) reg_ex = QRegExp("^.{0,20}$") input_validator = QRegExpValidator(reg_ex, q1Edit) q1Edit.setValidator(input_validator) self.Tarief = QLabel() q2Edit = QLineEdit(str(round(float(rppar[2]), 2))) q2Edit.setFixedWidth(100) q2Edit.setAlignment(Qt.AlignRight) q2Edit.setFont(QFont("Arial", 10)) q2Edit.textChanged.connect(self.q2Changed) reg_ex = QRegExp("^[-+]?[0-9]*\.?[0-9]+$") input_validator = QRegExpValidator(reg_ex, q2Edit) q2Edit.setValidator(input_validator) self.Verrekening = QLabel() q3Edit = QLineEdit(rppar[3]) q3Edit.setFixedWidth(200) q3Edit.setFont(QFont("Arial", 10)) q3Edit.textChanged.connect(self.q3Changed) reg_ex = QRegExp("^.{0,20}$") input_validator = QRegExpValidator(reg_ex, q3Edit) q3Edit.setValidator(input_validator) self.Ondergrens = QLabel() q4Edit = QLineEdit(str(round(float(rppar[4]), 2))) q4Edit.setFixedWidth(100) q4Edit.setFont(QFont("Arial", 10)) q4Edit.setAlignment(Qt.AlignRight) q4Edit.textChanged.connect(self.q4Changed) reg_ex = QRegExp("^[-+]?[0-9]*\.?[0-9]+$") input_validator = QRegExpValidator(reg_ex, q4Edit) q4Edit.setValidator(input_validator) self.Bovengrens = QLabel() q5Edit = QLineEdit(str(round(float(rppar[5]), 2))) q5Edit.setFixedWidth(100) q5Edit.setAlignment(Qt.AlignRight) q5Edit.setFont(QFont("Arial", 10)) q5Edit.textChanged.connect(self.q5Changed) reg_ex = QRegExp("^[-+]?[0-9]*\.?[0-9]+$") input_validator = QRegExpValidator(reg_ex, q5Edit) q5Edit.setValidator(input_validator) self.Tarieffactor = QLabel() q6Edit = QLineEdit(str(round(float(rppar[8]), 2))) q6Edit.setFixedWidth(100) q6Edit.setAlignment(Qt.AlignRight) q6Edit.setFont(QFont("Arial", 10)) q6Edit.textChanged.connect(self.q6Changed) reg_ex = QRegExp("^[-+]?[0-9]*\.?[0-9]+$") input_validator = QRegExpValidator(reg_ex, q6Edit) q5Edit.setValidator(input_validator) grid = QGridLayout() grid.setSpacing(20) lbl1 = QLabel('Parameternummer') grid.addWidget(lbl1, 1, 0) lbl2 = QLabel(str(paramnr)) grid.addWidget(lbl2, 1, 1) lbl3 = QLabel('Item') grid.addWidget(lbl3, 2, 0) grid.addWidget(q1Edit, 2, 1, 1, 2) lbl4 = QLabel('Tarief') grid.addWidget(lbl4, 3, 0) grid.addWidget(q2Edit, 3, 1) lbl5 = QLabel('Verrekening') grid.addWidget(lbl5, 4, 0) grid.addWidget(q3Edit, 4, 1, 1, 2) lbl6 = QLabel('Ondergrens') grid.addWidget(lbl6, 5, 0) grid.addWidget(q4Edit, 5, 1) lbl7 = QLabel('Bovengrens') grid.addWidget(lbl7, 6, 0) grid.addWidget(q5Edit, 6, 1) lbl8 = QLabel('Aanpassing') grid.addWidget(lbl8, 7, 0) lbl9 = QLabel(rppar[6]) grid.addWidget(lbl9, 7, 1) lbl10 = QLabel('Lock: ' + str(rppar[7])) grid.addWidget(lbl10, 7, 2) lbl11 = QLabel('Tarieffactor') grid.addWidget(lbl11, 8, 0) grid.addWidget(q6Edit, 8, 1) lbl = QLabel() pixmap = QPixmap('./images/logos/verbinding.jpg') lbl.setPixmap(pixmap) grid.addWidget(lbl, 0, 0, 1, 2) logo = QLabel() pixmap = QPixmap('./images/logos/logo.jpg') logo.setPixmap(pixmap) grid.addWidget(logo, 0, 2, 1, 1, Qt.AlignRight) grid.addWidget( QLabel( '\u00A9 2017 all rights reserved [email protected]' ), 10, 0, 1, 3, Qt.AlignCenter) self.setLayout(grid) self.setGeometry(500, 300, 150, 150) applyBtn = QPushButton('Wijzig') applyBtn.clicked.connect(self.accept) grid.addWidget(applyBtn, 9, 2, 1, 1, Qt.AlignRight) applyBtn.setFont(QFont("Arial", 10)) applyBtn.setFixedWidth(100) applyBtn.setStyleSheet( "color: black; background-color: gainsboro") cancelBtn = QPushButton('Sluiten') cancelBtn.clicked.connect(self.close) grid.addWidget(cancelBtn, 9, 1, 1, 1, Qt.AlignRight) cancelBtn.setFont(QFont("Arial", 10)) cancelBtn.setFixedWidth(100) cancelBtn.setStyleSheet( "color: black; background-color: gainsboro") def q1Changed(self, text): self.Item.setText(text) def q2Changed(self, text): self.Tarief.setText(text) def q3Changed(self, text): self.Verrekening.setText(text) def q4Changed(self, text): self.Ondergrens.setText(text) def q5Changed(self, text): self.Bovengrens.setText(text) def q6Changed(self, text): self.Tarieffactor.setText(text) def returnq1(self): return self.Item.text() def returnq2(self): return self.Tarief.text() def returnq3(self): return self.Verrekening.text() def returnq4(self): return self.Ondergrens.text() def returnq5(self): return self.Bovengrens.text() def returnq6(self): return self.Tarieffactor.text() @staticmethod def getData(parent=None): dialog = MainWindow() dialog.exec_() return [dialog.returnq1(), dialog.returnq2(), dialog.returnq3(),\ dialog.returnq4(), dialog.returnq5(), dialog.returnq6()] mainWin = MainWindow() data = mainWin.getData() flag = 0 for k in range(0, 6): if data[k]: flag = 1 if flag == 0: return if data[0]: mf0 = data[0] else: mf0 = rppar[1] if data[1]: mf1 = float(data[1]) else: mf1 = rppar[2] if data[2]: mf2 = data[2] else: mf2 = rppar[3] if data[3]: mf3 = float(data[3]) else: mf3 = rppar[4] if data[4]: mf4 = float(data[4]) else: mf4 = rppar[5] if data[5]: mf5 = float(data[5]) else: mf5 = rppar[8] dt = str(datetime.datetime.now())[0:10] engine = create_engine( 'postgresql+psycopg2://postgres@localhost/bisystem') con = engine.connect() updpar = update(params).where(params.c.paramID==paramnr).values(item = mf0,\ tarief = mf1, verrekening = mf2, ondergrens = mf3, bovengrens = mf4,\ tarieffactor = mf5, datum = dt, lock = False) con.execute(updpar) con.close() wijzigOK()
def fetch_unique_states(self): command = select([self.geolocation.c.state]).distinct() result = self.connection.execute(command) return result
def package_dictize(pkg, context): ''' Given a Package object, returns an equivalent dictionary. Normally this is the most recent version, but you can provide revision_id or revision_date in the context and it will filter to an earlier time. May raise NotFound if: * the specified revision_id doesn't exist * the specified revision_date was before the package was created ''' model = context['model'] is_latest_revision = not (context.get('revision_id') or context.get('revision_date')) execute = _execute if is_latest_revision else _execute_with_revision #package if is_latest_revision: if isinstance(pkg, model.PackageRevision): pkg = model.Package.get(pkg.id) result = pkg else: package_rev = model.package_revision_table q = select([package_rev]).where(package_rev.c.id == pkg.id) result = execute(q, package_rev, context).first() if not result: raise logic.NotFound result_dict = d.table_dictize(result, context) #strip whitespace from title if result_dict.get('title'): result_dict['title'] = result_dict['title'].strip() #resources if is_latest_revision: res = model.resource_table else: res = model.resource_revision_table q = select([res]).where(res.c.package_id == pkg.id) result = execute(q, res, context) result_dict["resources"] = resource_list_dictize(result, context) result_dict['num_resources'] = len(result_dict.get('resources', [])) #tags tag = model.tag_table if is_latest_revision: pkg_tag = model.package_tag_table else: pkg_tag = model.package_tag_revision_table q = select([tag, pkg_tag.c.state], from_obj=pkg_tag.join(tag, tag.c.id == pkg_tag.c.tag_id)).where( pkg_tag.c.package_id == pkg.id) result = execute(q, pkg_tag, context) result_dict["tags"] = d.obj_list_dictize(result, context, lambda x: x["name"]) result_dict['num_tags'] = len(result_dict.get('tags', [])) # Add display_names to tags. At first a tag's display_name is just the # same as its name, but the display_name might get changed later (e.g. # translated into another language by the multilingual extension). for tag in result_dict['tags']: assert not 'display_name' in tag tag['display_name'] = tag['name'] #extras if is_latest_revision: extra = model.package_extra_table else: extra = model.extra_revision_table q = select([extra]).where(extra.c.package_id == pkg.id) result = execute(q, extra, context) result_dict["extras"] = extras_list_dictize(result, context) #groups if is_latest_revision: member = model.member_table else: member = model.member_revision_table group = model.group_table q = select([group, member.c.capacity], from_obj=member.join(group, group.c.id == member.c.group_id) ).where(member.c.table_id == pkg.id)\ .where(member.c.state == 'active') \ .where(group.c.is_organization == False) result = execute(q, member, context) context['with_capacity'] = False ## no package counts as cannot fetch from search index at the same ## time as indexing to it. ## tags, extras and sub-groups are not included for speed result_dict["groups"] = group_list_dictize(result, context, with_package_counts=False) #owning organization if is_latest_revision: group = model.group_table else: group = model.group_revision_table q = select([group] ).where(group.c.id == pkg.owner_org) \ .where(group.c.state == 'active') result = execute(q, group, context) organizations = d.obj_list_dictize(result, context) if organizations: result_dict["organization"] = organizations[0] else: result_dict["organization"] = None #relations if is_latest_revision: rel = model.package_relationship_table else: rel = model.package_relationship_revision_table q = select([rel]).where(rel.c.subject_package_id == pkg.id) result = execute(q, rel, context) result_dict["relationships_as_subject"] = \ d.obj_list_dictize(result, context) q = select([rel]).where(rel.c.object_package_id == pkg.id) result = execute(q, rel, context) result_dict["relationships_as_object"] = \ d.obj_list_dictize(result, context) # Extra properties from the domain object # We need an actual Package object for this, not a PackageRevision if isinstance(pkg, model.PackageRevision): pkg = model.Package.get(pkg.id) # isopen result_dict['isopen'] = pkg.isopen if isinstance(pkg.isopen, bool) \ else pkg.isopen() # type # if null assign the default value to make searching easier result_dict['type'] = pkg.type or u'dataset' # license if pkg.license and pkg.license.url: result_dict['license_url'] = pkg.license.url result_dict['license_title'] = pkg.license.title.split('::')[-1] elif pkg.license: result_dict['license_title'] = pkg.license.title else: result_dict['license_title'] = pkg.license_id # creation and modification date result_dict['metadata_modified'] = pkg.metadata_modified.isoformat() result_dict['metadata_created'] = pkg.metadata_created.isoformat() \ if pkg.metadata_created else None return result_dict
def apply(self, context, clear, split, check_for_existing=True, **kwargs): """Extract Candidates from a Context""" # For now, just handle Sentences if not isinstance(context, Sentence): raise NotImplementedError( "%s is currently only implemented for Sentence contexts." % self.__name__) # Do a first pass to collect all mentions by entity type / cid entity_idxs = dict( (et, defaultdict(list)) for et in set(self.entity_types)) L = len(context.words) for i in range(L): if context.entity_types[i] is not None: ets = context.entity_types[i].split(self.entity_sep) cids = context.entity_cids[i].split(self.entity_sep) for et, cid in zip(ets, cids): if et in entity_idxs: entity_idxs[et][cid].append(i) # Form entity Spans entity_spans = defaultdict(list) entity_cids = {} for et, cid_idxs in iteritems(entity_idxs): for cid, idxs in iteritems(entity_idxs[et]): while len(idxs) > 0: i = idxs.pop(0) char_start = context.char_offsets[i] char_end = char_start + len(context.words[i]) - 1 while len(idxs) > 0 and idxs[0] == i + 1: i = idxs.pop(0) char_end = context.char_offsets[i] + len( context.words[i]) - 1 # Insert / load temporary span, also store map to entity CID tc = TemporarySpan(char_start=char_start, char_end=char_end, sentence=context) tc.load_id_or_insert(self.session) entity_cids[tc.id] = cid entity_spans[et].append(tc) # Generates and persists candidates candidate_args = {'split': split} for args in product( *[enumerate(entity_spans[et]) for et in self.entity_types]): # TODO: Make this work for higher-order relations if self.arity == 2: ai, a = args[0] bi, b = args[1] # Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate # "symmetric" relations if not self.self_relations and a == b: continue elif not self.nested_relations and (a in b or b in a): continue elif not self.symmetric_relations and ai > bi: continue # Assemble candidate arguments for i, arg_name in enumerate(self.candidate_class.__argnames__): candidate_args[arg_name + '_id'] = args[i][1].id candidate_args[arg_name + '_cid'] = entity_cids[args[i][1].id] # Checking for existence if check_for_existing: q = select([self.candidate_class.id]) for key, value in iteritems(candidate_args): q = q.where(getattr(self.candidate_class, key) == value) candidate_id = self.session.execute(q).first() if candidate_id is not None: continue # Add Candidate to session yield self.candidate_class(**candidate_args)
def fetch_unique_cities(self): command = select([self.geolocation.c.city]).distinct() result = self.connection.execute(command).fetchall() return result
def test_upgrade_bulk(self): dt = datetime.datetime(1970, 1, 1) self.connection.execute( self.subnets.insert(), dict(id="000", tenant_id="foo", _cidr="192.168.10.0/24", ip_policy_id="111"), dict(id="001", tenant_id="foo", _cidr="192.168.10.0/24", ip_policy_id=None), dict(id="002", tenant_id="foo", _cidr="fd00::/64", ip_policy_id=None)) self.connection.execute(self.ip_policy.insert(), dict(id="111", tenant_id="foo", created_at=dt)) self.connection.execute( self.ip_policy_cidrs.insert(), dict(id="221", created_at=dt, ip_policy_id="111", cidr="192.168.10.13/32")) with mock.patch("oslo_utils.uuidutils") as uuid, \ mock.patch("oslo_utils.timeutils") as tu: tu.utcnow.return_value = dt uuid.generate_uuid.side_effect = ("5", "6", "7", "8", "9", "10") alembic_command.upgrade(self.config, '552b213c2b8c') results = self.connection.execute( select([self.ip_policy ]).where(self.ip_policy.c.id == "111")).fetchall() self.assertEqual(len(results), 1) result = results[0] self.assertEqual(result["id"], "111") self.assertEqual(result["tenant_id"], "foo") self.assertEqual(result["created_at"], dt) results = self.connection.execute( select([ self.ip_policy_cidrs ]).where(self.ip_policy_cidrs.c.ip_policy_id == "111")).fetchall() self.assertEqual(len(results), 1) result = results[0] self.assertEqual(result["id"], "221") self.assertEqual(result["created_at"], dt) self.assertEqual(result["ip_policy_id"], "111") self.assertEqual(result["cidr"], "192.168.10.13/32") results = self.connection.execute( select([self.subnets ]).where(self.subnets.c.ip_policy_id == "111")).fetchall() self.assertEqual(len(results), 1) self.assertEqual(results[0]["ip_policy_id"], "111") results = self.connection.execute( select([self.ip_policy ]).where(self.ip_policy.c.id != "111")).fetchall() self.assertEqual(len(results), 2) for result in results: self.assertIn(int(result["id"]), range(5, 11)) self.assertEqual(result["tenant_id"], "foo") self.assertEqual(result["created_at"], dt) results = self.connection.execute( select([ self.ip_policy_cidrs ]).where(self.ip_policy_cidrs.c.ip_policy_id != "111")).fetchall() self.assertEqual(len(results), 4) for result in results: self.assertIn(int(result["id"]), range(5, 11)) self.assertEqual(result["created_at"], dt) self.assertIn(int(result["ip_policy_id"]), range(5, 11)) self.assertIn(result["cidr"], ("192.168.10.0/32", "192.168.10.255/32", "fd00::/128", "fd00::ffff:ffff:ffff:ffff/128")) results = self.connection.execute( select([self.subnets ]).where(self.subnets.c.ip_policy_id != "111")).fetchall() self.assertEqual(len(results), 2) for subnet in results: self.assertIn(int(subnet["ip_policy_id"]), range(5, 11))
def verify_registration_not_in_db(self, reg_id): with DBConnection(name='hpz') as connector: table = connector.get_table(self.reg_table) query = select([table]).where(table.c.registration_id == reg_id) result = connector.execute(query).fetchall() self.assertFalse(result, 'Registration is stored in db')
def test_upgrade_bulk(self): self.connection.execute( self.subnets.insert(), dict(id="000", _cidr="192.168.10.0/24", ip_policy_id=None), dict(id="001", _cidr="192.168.10.0/24", ip_policy_id="111"), dict(id="002", _cidr="192.168.10.0/24", ip_policy_id="112"), dict(id="003", _cidr="192.168.10.0/24", ip_policy_id="113")) dt = datetime.datetime(1970, 1, 1) self.connection.execute( self.ip_policy_cidrs.insert(), dict(id="221", created_at=dt, ip_policy_id="112", cidr="192.168.10.13/32"), dict(id="222", created_at=dt, ip_policy_id="113", cidr="192.168.10.0/32"), dict(id="223", created_at=dt, ip_policy_id="113", cidr="192.168.10.255/32")) alembic_command.upgrade(self.config, '45a07fac3d38') results = self.connection.execute( select([self.ip_policy_cidrs ]).where(self.ip_policy_cidrs.c.ip_policy_id == None)).fetchall() # noqa self.assertEqual(len(results), 0) results = self.connection.execute( select([ self.ip_policy_cidrs ]).where(self.ip_policy_cidrs.c.ip_policy_id == "111")).fetchall() self.assertEqual(len(results), 2) default_cidrs = ["192.168.10.0/32", "192.168.10.255/32"] self.assertIn(results[0]["cidr"], default_cidrs) self.assertIn(results[1]["cidr"], default_cidrs) self.assertNotEqual(results[0]["cidr"], results[1]["cidr"]) results = self.connection.execute( select([ self.ip_policy_cidrs ]).where(self.ip_policy_cidrs.c.ip_policy_id == "112")).fetchall() self.assertEqual(len(results), 3) default_cidrs = [ "192.168.10.0/32", "192.168.10.255/32", "192.168.10.13/32" ] for result in results: self.assertIn(result["cidr"], default_cidrs) self.assertNotEqual(results[0]["cidr"], results[1]["cidr"]) self.assertNotEqual(results[0]["cidr"], results[2]["cidr"]) self.assertNotEqual(results[1]["cidr"], results[2]["cidr"]) results = self.connection.execute( select([ self.ip_policy_cidrs ]).where(self.ip_policy_cidrs.c.ip_policy_id == "113")).fetchall() self.assertEqual(len(results), 2) default_cidrs = ["192.168.10.0/32", "192.168.10.255/32"] self.assertIn(results[0]["cidr"], default_cidrs) self.assertIn(results[1]["cidr"], default_cidrs) self.assertTrue(results[0]["id"] == "222" or results[0]["id"] == "223") self.assertTrue(results[1]["id"] == "222" or results[1]["id"] == "223") self.assertEqual(results[0]["created_at"], dt) self.assertEqual(results[1]["created_at"], dt)
def test_upgrade_empty(self): alembic_command.upgrade(self.config, '1664300cb03a') results = self.connection.execute(select([self.ip_policy_cidrs ])).fetchall() self.assertEqual(len(results), 0)
def test_geoalchemy2_core(bigquery_dataset): """Make sure GeoAlchemy 2 Core Tutorial works as adapted to only having geography https://geoalchemy-2.readthedocs.io/en/latest/core_tutorial.html Note: - Bigquery doesn't have ST_BUFFER """ # Connect to the DB from sqlalchemy import create_engine engine = create_engine(f"bigquery:///{bigquery_dataset}") # Create the Table from sqlalchemy import Table, Column, String, MetaData from sqlalchemy_bigquery import GEOGRAPHY metadata = MetaData() lake_table = Table("lake", metadata, Column("name", String), Column("geog", GEOGRAPHY)) lake_table.create(engine) # Insertions conn = engine.connect() conn.execute(lake_table.insert().values( name="Majeur", geog="POLYGON((0 0,1 0,1 1,0 1,0 0))", )) conn.execute( lake_table.insert(), [ { "name": "Garde", "geog": "POLYGON((1 0,3 0,3 2,1 2,1 0))" }, { "name": "Orta", "geog": "POLYGON((3 0,6 0,6 3,3 3,3 0))" }, ], ) # Selections from sqlalchemy.sql import select assert sorted( (r.name, r.geog.desc[:4]) for r in conn.execute(select([lake_table]))) == [("Garde", "0103"), ("Majeur", "0103"), ("Orta", "0103")] # Spatial query from sqlalchemy import func [[result]] = conn.execute( select([lake_table.c.name], func.ST_Contains(lake_table.c.geog, "POINT(4 1)"))) assert result == "Orta" assert sorted((r.name, int(r.area)) for r in conn.execute( select([lake_table.c.name, lake_table.c.geog.ST_AREA().label("area")]))) == [ ("Garde", 49452374328), ("Majeur", 12364036567), ("Orta", 111253664228) ] # Extra: Make sure we can save a retrieved value back: [[geog] ] = conn.execute(select([lake_table.c.geog], lake_table.c.name == "Garde")) conn.execute(lake_table.insert().values(name="test", geog=geog)) assert (int( list( conn.execute( select([lake_table.c.geog.st_area()], lake_table.c.name == "test")))[0][0]) == 49452374328) # and, while we're at it, that we can insert WKTs, although we # normally wouldn't want to. from sqlalchemy_bigquery import WKT conn.execute(lake_table.insert().values( name="test2", geog=WKT("POLYGON((1 0,3 0,3 2,1 2,1 0))"), )) assert (int( list( conn.execute( select([lake_table.c.geog.st_area()], lake_table.c.name == "test2")))[0][0]) == 49452374328)
def test_upgrade_no_subnets(self): alembic_command.upgrade(self.config, '552b213c2b8c') results = self.connection.execute(select([self.ip_policy_cidrs ])).fetchall() self.assertEqual(len(results), 0)
from sqlalchemy import MetaData from sqlalchemy.sql import text, select from sqlalchemy.exc import NoSuchTableError from collections import OrderedDict engine = create_engine( 'oracle+cx_oracle://SIT_XM122_ORDERS1:[email protected]:1521/hbqa', echo=True) metadata = MetaData(bind=engine) cm_blacklist_direct = Table('cm_blacklist_direct', metadata, autoload=True) ins = cm_blacklist_direct.insert().values(fundcode='PE0461', hboneno='000002') ins01 = cm_blacklist_direct.insert() ins02 = "insert into cm_blacklist_direct values (:fundcode, :hboneno)" result = engine.execute(text(ins02), fundcode='PE0463', hboneno='000005') print(result.is_insert) s = select([cm_blacklist_direct]) result = engine.execute(s) for row in result: print(row[cm_blacklist_direct.c.fundcode]) result.close()
def related_to_metric_groups(current_skyline_app, base_name, metric_id): """ Returns a dict of all the metric_groups that a metric is part of. """ current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) related_to_metric_groups_dict = {} related_to_metric_groups_dict['metric'] = base_name related_to_metric_groups_dict['metric_id'] = metric_id related_to_metric_groups_dict['related_to_metrics'] = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) if fail_msg != 'got MySQL engine': current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine - %s' % str(err)) if engine: try: metric_group_table, fail_msg, trace = metric_group_table_meta(current_skyline_app, engine) if fail_msg != 'metric_group meta reflected OK': current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error('error :: related_to_metric_groups :: metric_group_table_meta - %s' % str(err)) try: connection = engine.connect() if metric_id: stmt = select([metric_group_table]).where(metric_group_table.c.related_metric_id == metric_id).order_by(metric_group_table.c.avg_coefficient.desc()) else: stmt = select([metric_group_table]) results = connection.execute(stmt) for row in results: group_metric_id = row['metric_id'] group_base_name = None try: group_base_name = get_base_name_from_metric_id(current_skyline_app, group_metric_id) except Exception as err: current_logger.error('error :: related_to_metric_groups :: base_name_from_metric_id failed to determine base_name from metric_id: %s - %s' % ( str(group_metric_id), str(err))) if group_base_name: related_to_metric_groups_dict['related_to_metrics'][group_base_name] = dict(row) connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error('error :: related_to_metric_groups :: failed to build metric_groups dict - %s' % str(err)) if engine: engine_disposal(current_skyline_app, engine) for related_metric in list(related_to_metric_groups_dict['related_to_metrics'].keys()): for key in list(related_to_metric_groups_dict['related_to_metrics'][related_metric].keys()): if 'decimal.Decimal' in str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])): related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = float(related_to_metric_groups_dict['related_to_metrics'][related_metric][key]) if 'datetime.datetime' in str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])): related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = str(related_to_metric_groups_dict['related_to_metrics'][related_metric][key]) if key == 'shifted_counts': try: shifted_counts_str = related_to_metric_groups_dict['related_to_metrics'][related_metric][key].decode('utf-8') shifted_counts = literal_eval(shifted_counts_str) except AttributeError: shifted_counts = related_to_metric_groups_dict['related_to_metrics'][related_metric][key] related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = shifted_counts # Remap the metric_id and related_metric_id for clarity related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_to_metric_id'] = related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id'] related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id'] = metric_id del related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_metric_id'] return related_to_metric_groups_dict
def test_admin_tools(self): """Test the battle functionality.""" from animeu.app import db from animeu.models import \ User, FavouritedWaifu, WaifuPickBattle, ELORankingCalculation from animeu.auth.logic import hash_password with self.server_thread.app.app_context(): user = User(email=AdminToolsTests.TEST_EMAIL, username="******", is_admin=True, password_hash=hash_password( AdminToolsTests.TEST_PASSWORD)) db.session.add(user) db.session.commit() user = User.query.get(user.id) self.browser.get(self.url_for("auth_bp.login")) perform_login_expecting_success(self.browser, AdminToolsTests.TEST_EMAIL, AdminToolsTests.TEST_PASSWORD) self.browser.find_element_by_xpath( "//a[contains(., 'Admin')]").click() wait_for_visible(self.browser, ".admin-page") with self.subTest("Can seed database with battles"): self.move_to_admin_tab(self.browser, "Battles") self.perform_action_and_assert_completed() self.browser.refresh() number_of_entries = \ self.maybe_get_number_of_entries_in_table(self.browser) self.assertIsNotNone( number_of_entries, "Could not find the table summary information.") self.assertGreaterEqual(1000, number_of_entries) db_entry_count = db.engine.execute( select([func.count(WaifuPickBattle.id)])).scalar() self.assertEqual(number_of_entries, db_entry_count) with self.subTest("Can use all the sorting controls"): self.assert_all_sort_controls_work_in_table() with self.subTest("Can generate the ELO rankings"): self.move_to_admin_tab(self.browser, "ELO") self.perform_action_and_assert_completed() self.browser.refresh() rankings_count = db.engine.execute( select([func.count(ELORankingCalculation.id)])).scalar() self.assertEqual(1, rankings_count) with self.subTest("Can see users table"): self.move_to_admin_tab(self.browser, "Users") self.assert_all_sort_controls_work_in_table() with self.subTest("Can see favourited waifus"): db.session.add( FavouritedWaifu(user_id=user.id, date=datetime.now(), character_name="Tim", order=1)) db.session.commit() self.move_to_admin_tab(self.browser, "Favourited Waifus") tims_name = self.browser.find_element_by_xpath( "//td[contains(., 'Tim')]") self.assertIsNotNone( tims_name, "Expected to find the favourited 'Tim' in table") with self.subTest("Can delete favourited character"): self.browser.find_element_by_css_selector( "button.delete-button").click() wait_for_visible(self.browser, "button.delete-button", invert=True) favourited_characters_count = db.engine.execute( select([func.count(FavouritedWaifu.id)])).scalar() self.assertEqual(0, favourited_characters_count)
def addNotification(conn, uid, server, metric, rowid, device, windowsize, timestamp, acknowledged, seen): """Add Notification :param conn: SQLAlchemy Connection object for executing SQL :type conn: sqlalchemy.engine.Connection :param uid: Notification uid :param server: Metric server :param metric: Metric uid :param rowid: Metric Data row id :param device: Device id (notification_settings.uid) :param windowsize: Window size (seconds) :param timestamp: Metric Data timestamp :param acknowledged: :param seen: :returns: Result :rtype: sqlalchemy.engine.ResultProxy """ result = None with conn.begin(): # Secure a write lock on notification table. Other processes attempting to # access the notification table will be blocked until the lock is released. # This is to ensure that in the time between the first SELECT and the # followup INSERT there are no other potentially conflicting INSERTS that # could result in duplicated notifications. Meanwhile, other processes may # execute concurrent inserts to metric table. conn.execute("LOCK TABLES notification WRITE, metric READ;") try: # New notification is potentially a duplicate if there exists an unseen # notification for the same metric and server created within the # requested windowsize. query = (select([func.count( schema.notification.c.uid)]).select_from( schema.notification.outerjoin( schema.metric, schema.metric.c.uid == schema.notification.c.metric) ).where((schema.metric.c.server == server) & (schema.notification.c.device == device) & (schema.notification.c.seen == 0) & (func.date_add( schema.notification.c.timestamp, text("INTERVAL :windowsize SECOND")) > timestamp))) if conn.execute(query, windowsize=windowsize).scalar() == 0: # Previous query yielded no results, notification is unique according # to our constraints. Insert new notification details. ins = ( schema.notification.insert().values( #pylint: disable=E1120 uid=uid, metric=metric, device=device, windowsize=windowsize, timestamp=timestamp, acknowledged=acknowledged, seen=seen, rowid=rowid)) try: result = conn.execute(ins) except IntegrityError: result = None finally: conn.execute("UNLOCK TABLES;") # Release table lock. return result