async def fastraid(self, ctx, tier: typing.Optional[Tier], *, time: Time()): """Shortcut to schedule a raid""" res = count_rows(self.conn, "Raids", ctx.guild.id) if self.host_id and res >= self.event_limit: # host_id will not be set for private bots msg = _( "Due to limited resources you may only post up to {0} concurrent raids." ).format(self.event_limit) await ctx.send(msg) return name = ctx.invoked_with if name == "fastraid": name = _("unknown raid") if tier is None: tier = await Tier.channel_converter(ctx.channel) if '1' in tier or '2' in tier: roster = False else: roster = True raid_id = await self.raid_command(ctx, name, tier, "", time, roster=roster) self.raids.append(raid_id) await self.bot.get_cog('CalendarCog').update_calendar(ctx.guild.id)
async def fastraid(self, ctx, *, time: Time()): """Shortcut to schedule a raid""" res = count_rows(self.conn, "Raids", ctx.guild.id) if self.host_id and res > 4: # host_id will not be set for private bots await ctx.send( _("Due to limited resources you may only post up to 5 concurrent raids." )) return name = ctx.invoked_with if name == "fastraid": name = _("unknown raid") try: tier = Tier().converter(ctx.channel.name) except commands.BadArgument: msg = _("Channel name does not specify tier.") + "\n" + _( "Defaulting to tier 1.") await ctx.send(msg, delete_after=10) tier = 'T1' if '1' in tier or '2' in tier: roster = False else: roster = True raid_id = await self.raid_command(ctx, name, tier, _("All"), time, roster=roster) self.raids.append(raid_id)
def test_count_rows(self): with mock.patch('database.sqlite3') as mocksql: conn = mocksql.connect() mocksql.connect().cursor().fetchone.return_value = (256, ) count = database.count_rows(conn, sql.count_table_rows(), 'items') mocksql.connect().cursor().execute.assert_called_with( 'SELECT count(*) FROM items;', ()) self.assertEqual(count, 256)
def tell_me_progress(): global progress number_of_headers = database.count_rows("http_headers") progress_bar = ProgressBar(number_of_headers, bar_length=100) progress_bar.start() while True: progress_bar.update(progress) time.sleep(0.5)
async def raid(self, ctx, name, tier: Tier, *, time: Time()): """Schedules a raid""" res = count_rows(self.conn, "Raids", ctx.guild.id) if self.host_id and res > 4: # host_id will not be set for private bots await ctx.send( _("Due to limited resources you may only post up to 5 concurrent raids." )) return raid_id = await self.raid_command(ctx, name, tier, _("All"), time) self.raids.append(raid_id)
async def meetup(self, ctx, name, *, time: Time()): """Schedules a meetup""" res = count_rows(self.conn, "Raids", ctx.guild.id) if self.host_id and res >= self.event_limit: # host_id will not be set for private bots msg = _( "Due to limited resources you may only post up to {0} concurrent raids." ).format(self.event_limit) await ctx.send(msg) return raid_id = await self.raid_command(ctx, name, "", "", time) self.raids.append(raid_id) await self.bot.get_cog('CalendarCog').update_calendar(ctx.guild.id)
async def raid(self, ctx, name, tier: typing.Optional[Tier], *, time: Time()): """Schedules a raid""" res = count_rows(self.conn, "Raids", ctx.guild.id) if self.host_id and res >= self.event_limit: # host_id will not be set for private bots msg = _( "Due to limited resources you may only post up to {0} concurrent raids." ).format(self.event_limit) await ctx.send(msg) return if tier is None: tier = await Tier.channel_converter(ctx.channel) raid_id = await self.raid_command(ctx, name, tier, "", time) self.raids.append(raid_id) await self.bot.get_cog('CalendarCog').update_calendar(ctx.guild.id)
def run(): global total number_of_headers = database.count_rows('domain_has_javascripts', True) db, cursor = database.get_mysql_db_cursor() cursor.execute("SELECT domain_id, url " "FROM " + database.database + ".domain_has_javascripts WHERE is_external = 1 AND javascript_hash NOT IN (SELECT javascript_hash FROM aau.tag_cms_history) ORDER BY javascript_hash DESC") rows = cursor.fetchall() cursor.close() db.close() threading.Thread(target=tell_me_progress, args=(number_of_headers,), ).start() total = number_of_headers - len(rows) while True: if threading.active_count() - 1 < 10: threading.Thread(target=run_process, args=(rows.pop()), ).start()
def populate_tree(some_sql, conn, some_tree, some_store): for number in range( database.count_rows(conn, sql.count_table_rows(), 'items')): temp_dict = {} number += 1 item_info_tuple = sql.execute_fetchone_sql(conn, some_sql, str(number), some_store) try: temp_dict['id'] = item_info_tuple[0] temp_dict['name'] = item_info_tuple[1] temp_dict['value'] = item_info_tuple[2] converted_value = convert_currency(temp_dict['value']) cur_type = inspecto_gadget(converted_value) some_tree.insert('', 'end', temp_dict['id'], text=temp_dict['name']) img_tag(some_tree, temp_dict['id'], cur_type) some_tree.set(temp_dict['id'], 'price', converted_value[cur_type]) # TODO: Consider better error handling. This is a silent pass. Not good. except TypeError: continue
def import_traffic(year: int, traffic_type: int): """ Replaces the table traffic_source with new data. Truncates first all data, then loads the all data from data.bs.ch in a dataframe and filters for rows with year > currentyear -2. this is to reduce the amount of data replaced in the target table miv. """ def transfer_staging2fact(): ok = True tools.log('Copy miv traffic records from staging to miv_traffic table') cmd = qry['traffic_staging_fact'] ok = db.execute_non_query(cmd, db.conn) if ok: cmd = qry[update_info_query[traffic_type]] ok = db.execute_non_query(cmd, db.conn) if ok: cmd = qry['reset_station_flags'] ok = db.execute_non_query(cmd, db.conn) if ok: cmd = qry['update_station_flags'].format('miv_flag', 1) ok = db.execute_non_query(cmd, db.conn) cmd = qry['update_station_flags'].format('velo_data_flag', 2) ok = db.execute_non_query(cmd, db.conn) cmd = qry['update_station_flags'].format('fuss_data_flag', 3) ok = db.execute_non_query(cmd, db.conn) if ok: st.info('Statement executed.') else: st.error('Statement failed.') return ok # cmd = qry['last_miv_observation'] # result = get_single_value(cmd, conn, 'max_dat') # st.info('Most recent observation in miv_traffic: ' + result) ### Main ok = True update_info_query = {1: 'update_miv_info', 2: 'update_slow_info'} source_table = {1: 'miv_traffic_source', 2: 'slow_traffic_source'} source_file = { 1: cn.source_miv_file_name.format(year), 2: cn.source_slow_file_name.format(year) } source_fields = { 1: [ 'SiteCode', 'SiteName', 'DirectionName', 'LaneCode', 'LaneName', 'Date', 'TimeFrom', 'TimeTo', 'ValuesApproved', 'ValuesEdited', 'TrafficType', 'Total', 'MR', 'PW', 'PW+', 'Lief', 'Lief+', 'Lief+Aufl.', 'LW', 'LW+', 'Sattelzug', 'Bus', 'andere', 'Year', 'Month', 'Weekday', 'HourFrom' ], 2: [ 'SiteCode', 'SiteName', 'DirectionName', 'LaneCode', 'LaneName', 'Date', 'TimeFrom', 'TimeTo', 'ValuesApproved', 'ValuesEdited', 'TrafficType', 'Total', 'Year', 'Month', 'Weekday', 'HourFrom' ] } source_staging_transfer_query = { 1: 'miv_traffic_source_staging', 2: 'slow_traffic_source_staging' } traffic_type_criteria = {1: 'traffic_type = 1', 2: traffic_type > 1} row_count_start = db.count_rows("select * from traffic_fact", db.conn) # delete all records from the miv_traffic_source table if ok: cmd = all_qry.qry['truncate_table'].format(source_table[traffic_type]) ok = db.execute_non_query(cmd, db.conn) if ok: st.info(f'Table {source_table[traffic_type]} was initialized.') else: st.error( f'Table {source_table[traffic_type]} could not be deleted.') if ok: df, ok = read_source_file(source_file[traffic_type]) if ok: ok = save_db_table(source_table[traffic_type], df, source_fields[traffic_type]) # delete all rows from the staging table if ok: cmd = all_qry.qry['truncate_table'].format('traffic_staging') ok = db.execute_non_query(cmd, db.conn) if ok: st.info(f'Table {"traffic_staging"} was initialized.') else: st.error(f'Table {"traffic_staging"} could not be deleted.') # copy the source data to the staging table, some fields are removed and counts are simplified, e.g. pw and pw with anhänger are summed # there is a new count for pw and lieferwagen and for lastwagen, lastwagen with anhänger and sattelschlepper so light and heavy traffic can be easily # distinguished. if ok: ok = transfer_source2staging( source_staging_transfer_query[traffic_type]) # get the station_id from the station table if ok: cmd = qry['traffic_update_station_id'] ok = db.execute_non_query(cmd, db.conn) # append new direction names to the lookup table if ok: cmd = qry['traffic_update_direction_codes'] ok = db.execute_non_query(cmd, db.conn) # update direction id field in traffic_staging table if ok: cmd = qry['traffic_update_direction_id'] ok = db.execute_non_query(cmd, db.conn) # update time fields if ok: cmd = qry['update_traffic_time_columns'] ok = db.execute_non_query(cmd, db.conn) ok = True if ok: cmd = all_qry.qry['delete_rows_where'].format( 'traffic_fact', f'{traffic_type_criteria[traffic_type]} and year = {year}') st.write(cmd) ok = db.execute_non_query(cmd, db.conn) if ok: st.info( f'Table {"traffic_fact"} was initialized for year and traffic type.' ) else: st.error( f'Table {"traffic_staging"} could not be initialized for year and traffic type.' ) if ok: ok = transfer_staging2fact() if ok: row_count_end = db.count_rows("select * from traffic_fact", db.conn) st.info( f'{row_count_end - row_count_start} rows where successfully imported' ) df = db.execute_query(qry['import_result_summary'], db.conn) st.write("Summary") st.table(df) else: st.error( "The import could not be completed, check log above for error messages" )