def __init__(self, parent, id, controller): resources = controller.resources self.codes = controller.codes wx.Dialog.__init__(self, parent, title=self.codes.get('dLogs_title'), style=wx.CAPTION | wx.YES_NO | wx.YES_DEFAULT) self._log = Log(resources.getDbManager()) self._list = self.CreateLists(self.codes) searchPanel = self.CreateSearchPanel(self.codes) buttons = wx.BoxSizer(wx.HORIZONTAL) buttons.Add(wx.Button(self, wx.ID_FILE, label=self.codes.get('dLogs_export')), flag=wx.RIGHT, border=10) buttons.Add(wx.Button(self, wx.ID_CANCEL, label=self.codes.get('dLogs_close'))) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(searchPanel, flag=wx.ALL | wx.EXPAND, border=10) sizer.Add(self._list, flag=wx.EXPAND) sizer.Add(buttons, flag=wx.ALL, border=10) self.SetFocus() self.SetSizer(sizer) self.Fit() self.Bind(wx.EVT_BUTTON, self.OnFind, id=wx.ID_FIND) self.Bind(wx.EVT_BUTTON, self.OnCancel, id=wx.ID_CANCEL) self.Bind(wx.EVT_BUTTON, self.OnExport, id=wx.ID_FILE) self.Bind(wx.EVT_KEY_UP, self.OnListCharacter)
def log(self, type_log, item='0'): log = Log(client=self.id, type=type_log, item=item) logging.debug(f'type: {type_log} files: {item}') # change this time if you want to receive repeated log more often interval_min = datetime.now() - timedelta(minutes=INTERVAL_LOG) previous_log = Log.objects( client=self.id, type=type_log, item=str( ','.join(item)), date__gte=interval_min) if not previous_log: new_log = Log.objects.create(client=self.id, type=type_log, item=str(','.join(item))) attrs = [self.mail, self.hostname, str( type_log), str(','.join(item))] logging.debug(attrs) launcher(attrs, remote=self.remote)
def save_log(message, created_at, user_id): """Сохранение лога в БД""" logs_exists = session.query(Log).filter( created_at == Log.created_at).count() if not logs_exists: new_log = Log(message=message, created_at=created_at, user_id=int(user_id)) session.add(new_log) session.commit() logger.debug( f'Лог созданный в {created_at} пользователем {user_id} сохранен!')
def __init__(self, resources): self._mutex = threading.RLock() logging.info('Starting logging thread') self._object = resources.object self._resources = resources db = self._resources.getDbManager() self._log = Log(db) self._message = Message(db) self._plc = self._resources.plcManager self._position = self._resources.plcManager.getPositionHelper() self._period = resources.config.getLoggingTime() self._altitude = None self._start()
def create_log(timestamp, challenger_hp, opponent_hp, action, bid): if get_battle(bid) is None: return None new_log = Log(timestamp=timestamp, challenger_hp=challenger_hp, opponent_hp=opponent_hp, action=action, bid=bid) db.session.add(new_log) db.session.commit() return new_log
def log(): ''' this method show all logs ''' logs = Log.objects() print( tabulate( map( lambda x: [ str(x[0].id), Client.objects(id=str(x[0].id), name__ne=None).first(), x[1], x[2], x[3] ], logs.all().no_dereference().values_list( 'client', 'date', 'item', 'type')), ['Client', 'Date', 'Item']))
def update(tile_id): if request.method == 'POST': tile = db.session.query(Tile).filter_by(id=tile_id).first() if tile != None: tile.color_id = (tile.color_id + 1) % 2 db.session.add(tile) log = Log( ip_address=request.remote_addr, date_time=datetime.utcnow().isoformat(), tile_id=tile_id, new_color_id=tile.color_id ) db.session.add(log) db.session.commit() return jsonMain()
def schedule_next_run(): time_str = ':{:02d}'.format(random.randint(0, 59)) # time_str = ':10' schedule.clear() Log.debug(function='reliability', message=f"Scheduled for {time_str}") schedule.every().hour.at(time_str).do(metric_report) if __name__ == "__main__": schedule_next_run() while True: try: schedule.run_pending() except Exception: # Script crashed, lets restart it! traceback.print_exc() exc_type, exc_value, exc_traceback = sys.exc_info() Log.error('realtime', '\n'.join(traceback.format_exception_only(exc_type, exc_value))) print('Restarting script') curr_time = datetime.datetime.now().time().strftime("%H:%M:%S") min_time = datetime.datetime.strptime(curr_time, '%H:%M:%S').time().minute sleep_time = (60 - min_time)*60 time.sleep(sleep_time) pass
def import_to_db(log_data_list): for dict_row in log_data_list: log = Log(dict_row.get('ip'), dict_row.get('method'), dict_row.get('code')) db_session.add(log) db_session.commit()
def observer(self, text): obs = Log(log=text) db.session.add(obs) db.session.commit()
async def clan(self, context, clanname, recheck=True, send_message=True, channel_name: str = None): channel = (await self.get_channel( context, channel_name)) if channel_name else context.message.channel tocheck = None for clan in self.allclans: if clanname.lower() in clan[0].lower() or clanname.lower( ) in clan[2]: tocheck = clan break if recheck: await channel.send("Checking raidlogs, back in a bit!") await self.monitor_clans([tocheck]) message = "" for raid in Raid.select().where(Raid.name == "dreadsylvania", Raid.end == None, Raid.clan_name == tocheck[0]): message = "__**STATUS UPDATE FOR {}**__ \n".format( tocheck[0].upper()) summary = json.loads(raid.summary) kills = {"forest": 1000, "village": 1000, "castle": 1000} for line in summary: m = kills_pattern.match(line.replace(",", "")) if m: kills[m.group(2).lower()] -= int(m.group(1)) message += "{}/{}/{} kills remaining\n\n".format( kills["forest"], kills["village"], kills["castle"]) message += "__FOREST__ \n" if kills["forest"]: if not Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadUnlock, Log.data == "{\"location\": \"attic of the cabin\"}").exists(): message += "**Cabin attic needs unlocking** \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadUnlock, Log.data == "{\"location\": \"fire watchtower\"}").exists(): message += "Watchtower open, you can grab freddies if you like \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadGotItem, Log.data == "{\"item\": \"Dreadsylvanian auditor's badge\"}" ).exists(): message += "~~Auditor's badge claimed~~ \n" else: message += "Auditor's badge available (Cabin -> Basement -> Lockbox) \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadBanishElement, Log.data == "{\"element\": \"spooky\", \"location\": \"forest\"}" ).exists(): message += "~~Intricate music box parts claimed~~ \n" else: message += "Intricate music box parts available (Cabin -> Attic -> Music Box as AT (also banishes spooky from forest)) \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadGotItem, Log.data == "{\"item\": \"blood kiwi\"}").exists(): message += "~~Blood kiwi claimed~~ \n" else: message += "Blood kiwi available (Tree, Root Around -> Look Up + Climb -> Stomp) \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadGotItem, Log.data == "{\"item\": \"chunk of moon-amber\"}").exists(): message += "~~Moon-amber claimed~~ \n" else: message += "Moon-amber available (Tree -> Climb -> Shiny Thing (requires muscle class)) \n" else: message += "~~Forest fully cleared~~ \n" message += "__VILLAGE__ \n" if kills["village"]: if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadUnlock, Log.data == "{\"location\": \"schoolhouse\"}").exists(): message += "Schoolhouse is open, go get your pencils! \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadUnlock, Log.data == "{\"location\": \"master suite\"}").exists(): message += "Master suite is open, grab some eau de mort? \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadHangee).exists(): message += "~~Hanging complete~~ \n" else: message += "Hanging available (Square, Gallows -> Stand on Trap Door + Gallows -> Pull Lever) \n" else: message += "~~Village fully cleared~~ \n" message += "__CASTLE__ \n" if kills["castle"]: if not Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadUnlock, Log.data == "{\"location\": \"lab\"}").exists(): message += "**Lab needs unlocking** \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadMachineFix).exists(): machine_uses = Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadMachineUse).count() left = 3 - machine_uses if left: message += "{} skill{} available.\n".format( left, "" if left == 1 else "s") else: message += "~~All skills claimed~~ \n" else: message += "Machine needs repairing (with skull capacitor) \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadGotItem, Log.data == "{\"item\": \"roast beast\"}").exists(): message += "~~Dreadful roast claimed~~ \n" else: message += "Dreadful roast available (Great Hall -> Dining Room -> Grab roast) \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadGotItem, Log.data == "{\"item\": \"wax banana\"}").exists(): message += "~~Wax banana claimed~~ \n" else: message += "Wax banana available (Great Hall -> Dining Room -> Levitate (requires myst class) \n" if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadGotItem, Log.data == "{\"item\": \"stinking agaric\"}").exists(): message += "~~Stinking agaricus claimed~~ \n" else: message += "Stinking agaricus available (Dungeons -> Guard Room -> Break off bits) \n" else: message += "~~Castle fully cleared~~ \n" if channel_name: await context.send("Sending summary to {}".format(channel.name)) if send_message: await channel.send(message) else: return message
async def summary(self, context, recheck=True, send_message=True, channel_name: str = None, description: str = None): """ Post a summary of all the Dreadsylvania instances currently being monitored. :param channel_name: Channel to post the summary to. If not specified, the bot will respond to you in a PM :param description: Text to appear inline with the summary. :return: """ channel = (await self.get_channel( context, channel_name)) if channel_name else context.message.channel if recheck: await channel.send("Checking raidlogs, back in a bit!") await self.monitor_clans(self.clans) message = "__DREAD STATUS__\n" if description is not None: message += "{}\n\n".format(description) for raid in Raid.select().where( Raid.name == "dreadsylvania", Raid.end == None, Raid.clan_name << [x[0] for x in self.clans]): skip_clan = False for clan in excluded_clans: if raid.clan_id in clan: skip_clan = True if skip_clan is True: print("Skipping " + raid.clan_name) continue else: summary = json.loads(raid.summary) kills = {"forest": 1000, "village": 1000, "castle": 1000} for line in summary: m = kills_pattern.match(line.replace(",", "")) if m: kills[m.group(2).lower()] -= int(m.group(1)) extra = None if Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadMachineFix).exists(): machine_uses = Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadMachineUse).count() left = 3 - machine_uses extra = " ({} skill{} left)".format( left, "" if left == 1 else "s") else: extra = " (needs capacitor)" message += "**{}**: {}/{}/{}{}\n".format( raid.clan_name, kills["forest"], kills["village"], kills["castle"], extra or "") message += "\n" if channel_name: await context.send("Sending summary to {}".format(channel.name)) if send_message: await channel.send(message) else: return message
async def skills(self, context, recheck=True, send_message=True, since: str = "2019-06-06", limit: int = None, channel_name: str = None): channel = await self.get_channel( context, channel_name) if channel_name else context.channel if recheck: await channel.send("Checking raidlogs, back in a bit!") await self.monitor_clans(self.clans) since = datetime.strptime( since, "%Y-%m-%d") if since is not None else datetime.now() - timedelta( days=365) if limit is None: limit = len(self.clans) * 3 elif limit == 0: limit = None Skills = Log.alias() skills_query = Skills.select(Skills.user_id, (fn.COUNT(Skills.id) + (fn.IFNULL(PriorActivity.skills, 0))).alias("skills"))\ .join_from(Skills, PriorActivity, JOIN.LEFT_OUTER, on=(Skills.user_id == PriorActivity.id))\ .join_from(Skills, Raid)\ .where(Skills.action == RaidAction.DreadMachineUse, Raid.start >= since, Raid.clan_name << [x[0] for x in self.clans])\ .group_by(Skills.user_id)\ .alias("sq") right_joined_skills_query = PriorActivity.select((PriorActivity.id).alias("user_id"), (fn.IFNULL(PriorActivity.skills, skills_query.c.skills)).alias("skills"))\ .join_from(PriorActivity, skills_query, JOIN.LEFT_OUTER, on=(skills_query.c.user_id == PriorActivity.id)) skills_query = skills_query | right_joined_skills_query #DIY FULL OUTER JOIN kills_query = Log.select(Log.user_id, Log.username.alias("Username"), (fn.SUM(Log.turns)+ (fn.IFNULL(PriorActivity.kills, 0))).alias("kills"))\ .join_from(Log, PriorActivity, JOIN.LEFT_OUTER, on=(Log.user_id == PriorActivity.id))\ .join_from(Log, Raid)\ .where(Log.action == RaidAction.Victory, Raid.name == "dreadsylvania", Raid.start >= since, Raid.clan_name in [x[0] for x in self.clans])\ .group_by(Log.user_id) rankings_query = Log.select(kills_query.c.username.alias("Username"), kills_query.c.kills.alias("Kills"), fn.IFNULL(skills_query.c.skills, 0).alias("Skills"), (kills_query.c.kills / (fn.IFNULL(skills_query.c.skills, 0) + 0.5)).alias("KillsPerSkill"))\ .join_from(Log, skills_query, JOIN.LEFT_OUTER, on=(Log.user_id == skills_query.c.user_id))\ .join_from(Log, kills_query, JOIN.LEFT_OUTER, on=(Log.user_id == kills_query.c.user_id))\ .group_by(kills_query.c.user_id)\ .order_by(SQL("KillsPerSkill").desc())\ rankings = [ x for x in [r for r in rankings_query.dicts()] if x["Username"] and not x["Username"].lower() in excluded_list ] table = tabulate(rankings, headers="keys") table = table[:1900] message = "__SKILL RANKINGS__ \n```\n{}\n```".format(table) if channel_name: await context.send("Sending skills to {}".format(channel.name)) if send_message: await channel.send(message) else: return message
class LogsDialog(wx.Dialog, SimplePanel): ONE_DAY = 24 * 60 * 60 - 1 #23hour 59 minutes 59 seconds def __init__(self, parent, id, controller): resources = controller.resources self.codes = controller.codes wx.Dialog.__init__(self, parent, title=self.codes.get('dLogs_title'), style=wx.CAPTION | wx.YES_NO | wx.YES_DEFAULT) self._log = Log(resources.getDbManager()) self._list = self.CreateLists(self.codes) searchPanel = self.CreateSearchPanel(self.codes) buttons = wx.BoxSizer(wx.HORIZONTAL) buttons.Add(wx.Button(self, wx.ID_FILE, label=self.codes.get('dLogs_export')), flag=wx.RIGHT, border=10) buttons.Add(wx.Button(self, wx.ID_CANCEL, label=self.codes.get('dLogs_close'))) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(searchPanel, flag=wx.ALL | wx.EXPAND, border=10) sizer.Add(self._list, flag=wx.EXPAND) sizer.Add(buttons, flag=wx.ALL, border=10) self.SetFocus() self.SetSizer(sizer) self.Fit() self.Bind(wx.EVT_BUTTON, self.OnFind, id=wx.ID_FIND) self.Bind(wx.EVT_BUTTON, self.OnCancel, id=wx.ID_CANCEL) self.Bind(wx.EVT_BUTTON, self.OnExport, id=wx.ID_FILE) self.Bind(wx.EVT_KEY_UP, self.OnListCharacter) def CreateLists(self, codes): list = wx.ListCtrl(self, id=wx.ID_ANY, style=wx.LC_REPORT | wx.LC_SINGLE_SEL) list.SetMinSize((1100, 300)) list.InsertColumn(col=0, heading=codes.get('dLogs_ID'), format=wx.LIST_FORMAT_LEFT, width=40) list.InsertColumn(col=1, heading=codes.get('dLogs_time'), format=wx.LIST_FORMAT_LEFT, width=200) list.InsertColumn(col=2, heading=codes.get('dLogs_star_name'), format=wx.LIST_FORMAT_LEFT, width=120) list.InsertColumn(col=3, heading=codes.get('dLogs_star_ra'), format=wx.LIST_FORMAT_LEFT, width=80) list.InsertColumn(col=4, heading=codes.get('dLogs_star_dec'), format=wx.LIST_FORMAT_LEFT, width=80) list.InsertColumn(col=5, heading=codes.get('dLogs_text'), format=wx.LIST_FORMAT_LEFT, width=200) list.InsertColumn(col=6, heading=codes.get('dLogs_ra'), format=wx.LIST_FORMAT_LEFT, width=80) list.InsertColumn(col=7, heading=codes.get('dLogs_dec'), format=wx.LIST_FORMAT_LEFT, width=80) list.InsertColumn(col=8, heading=codes.get('dLogs_alt'), format=wx.LIST_FORMAT_LEFT, width=80) list.InsertColumn(col=9, heading=codes.get('dLogs_focus'), format=wx.LIST_FORMAT_LEFT) list.InsertColumn(col=10, heading=codes.get('dLogs_temp_in'), format=wx.LIST_FORMAT_LEFT) list.InsertColumn(col=11, heading=codes.get('dLogs_temp_out'), format=wx.LIST_FORMAT_LEFT) list.InsertColumn(col=12, heading=codes.get('dLogs_status'), format=wx.LIST_FORMAT_LEFT, width=300) return list def CreateSearchPanel(self, codes): sizer = wx.FlexGridSizer(1, 12, 5, 5) self.name = wx.TextCtrl(self, size=(120, -1)) self.startDate = wx.DatePickerCtrl(self, dt=wx.DateTime().UNow(), size=(120, -1), style=wx.DP_DEFAULT | wx.DP_ALLOWNONE | wx.DP_SHOWCENTURY) self.endDate = wx.DatePickerCtrl(self, dt=wx.DateTime.UNow(), size=(120, -1), style=wx.DP_DEFAULT | wx.DP_ALLOWNONE | wx.DP_SHOWCENTURY) sizer.Add(self.CreateCaption(codes.get('dLogs_name')), flag=wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT) sizer.Add(self.name) sizer.AddSpacer((40, -1)) sizer.Add(self.CreateCaption(codes.get('dLogs_from')), flag=wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT) sizer.Add(self.startDate) sizer.AddSpacer((40, -1)) sizer.Add(self.CreateCaption(codes.get('dLogs_to')), flag=wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT) sizer.Add(self.endDate) sizer.AddSpacer((20, -1)) sizer.Add(wx.Button(self, wx.ID_FIND, label=codes.get('dLogs_find')), proportion=2, flag=wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT) return sizer def FillList(self, logs): self._list.DeleteAllItems() for log in logs: index = self._list.InsertStringItem(sys.maxint, str(log['id'])) self._list.SetStringItem(index, 1, str(log['time'])) self._list.SetStringItem(index, 2, unicode(log['name'])) self._list.SetStringItem(index, 3, str(log['sRa'])) self._list.SetStringItem(index, 4, str(log['sDec'])) self._list.SetStringItem(index, 5, unicode(log['msg'])) self._list.SetStringItem(index, 6, str(log['ra'])) self._list.SetStringItem(index, 7, str(log['dec'])) self._list.SetStringItem(index, 8, str(log['alt'])) self._list.SetStringItem(index, 9, str(log['focus'])) self._list.SetStringItem(index, 10, str(log['temp_in'])) self._list.SetStringItem(index, 11, str(log['temp_out'])) self._list.SetStringItem(index, 12, self.parseAlarms(log)) def parseAlarms(self, log): status = str(log['status']) if status: statuses = status.split(',') else: statuses = [] list = [] for alarm in statuses: list.append(self.codes.get('al'+alarm)) alarms = ",".join(list) return alarms def getStartDay(self, dateTime): """ Return first second of the day """ return self.getUnixTimeStamp(dateTime) def getEndDay(self, dateTime): """ Return last second of the day """ return self.getUnixTimeStamp(dateTime) + self.ONE_DAY def getUnixTimeStamp(self, dateTime): day, month, year = dateTime.GetDay(), dateTime.GetMonth() + 1, dateTime.GetYear() date = datetime.date(year, month, day) return time.mktime(date.timetuple()) def OnFind(self, event): event.Skip() self.findInLog() def OnListCharacter (self, event): event.Skip() if event.GetKeyCode() == wx.WXK_RETURN: self.findInLog() def getPeriod(self): start = self.startDate.GetValue() end = self.endDate.GetValue() startDate = self.getStartDay(start) endDate = self.getEndDay(end) return startDate, endDate def findInLog(self): name = self.name.GetValue() startDate, endDate = self.getPeriod() logs = self._log.readLog(name, startDate, endDate) self.FillList(logs) def OnCancel(self, event): event.Skip() self.EndModal(wx.ID_CANCEL) def OnExport(self, event): event.Skip() path = join(os.getenv('HOME'), 'Desktop') dialog = wx.FileDialog(self, message="File select", defaultDir=path, defaultFile="temp.log", style=wx.SAVE | wx.OVERWRITE_PROMPT) if dialog.ShowModal() == wx.ID_OK: # Open the file for write, write, close self.filename = dialog.GetFilename() self.dirname = dialog.GetDirectory() filehandle = codecs.open(os.path.join(self.dirname, self.filename), 'w', 'utf-8') for data in self._log._log: line = self.parseDict(data) filehandle.write(';'.join(line)) filehandle.write('\n') filehandle.close() dialog.Destroy() def parseDict(self, log): line = [] line.append(str(log['id'])) line.append(str(log['time'])) line.append(unicode(log['name'])) line.append(str(log['sRa'])) line.append(str(log['sDec'])) line.append(unicode(log['msg'])) line.append(str(log['ra'])) line.append(str(log['dec'])) line.append(str(log['alt'])) line.append(str(log['focus'])) line.append(str(log['temp_in'])) line.append(str(log['temp_out'])) line.append(str(log['status'])) return line
def metric_report(): #Get timestamp date_now = date.today().strftime("%Y%m%d") time_now = datetime.datetime.now().time().strftime("%H:%M:%S") utc_dt_aware = datetime.datetime.now(datetime.timezone.utc) Log.info(function='reliability', message="I'm awake! Running realtime metrics") rt_mta_bus, rt_mta_mnr, rt_mta_lirr, rt_septa_bus, rt_septa_rail, rt_cta, rt_sf, rt_wmata_bus = rt_inputs.grab_rt_feeds() Log.debug(function='reliability', message='Fetched realtime feeds') #Compile all the metrics timestamp = [] region = [] agency = [] mode = [] abs_delay = [] early_delay = [] late_delay = [] otp = [] fraction = [] #Compile all of the metrics # New York if rt_mta_bus is not None: timestamp.append(timezone_converter(utc_dt_aware, "America/New_York")) region.append("nyc") agency.append("MTA") mode.append("Bus") abs_delay.append(avg_delay_abs(rt_mta_bus)) early_delay.append(avg_delay_early(rt_mta_bus)) late_delay.append(avg_delay_late(rt_mta_bus)) otp.append(on_time_percent(rt_mta_bus)) fraction.append(rt_fraction(rt_mta_bus)) if rt_mta_mnr is not None: timestamp.append(timezone_converter(utc_dt_aware, "America/New_York")) region.append("nyc") agency.append("MNR") mode.append("Rail") abs_delay.append(avg_delay_abs(rt_mta_mnr)) early_delay.append(avg_delay_early(rt_mta_mnr)) late_delay.append(avg_delay_late(rt_mta_mnr)) otp.append(on_time_percent(rt_mta_mnr)) fraction.append(rt_fraction(rt_mta_mnr)) if rt_mta_lirr is not None: timestamp.append(timezone_converter(utc_dt_aware, "America/New_York")) region.append("nyc") agency.append("LIRR") mode.append("Rail") abs_delay.append(avg_delay_abs(rt_mta_lirr)) early_delay.append(avg_delay_early(rt_mta_lirr)) late_delay.append(avg_delay_late(rt_mta_lirr)) otp.append(on_time_percent(rt_mta_lirr)) fraction.append(rt_fraction(rt_mta_lirr)) #Philadelphia if rt_septa_bus is not None: timestamp.append(timezone_converter(utc_dt_aware, "America/New_York")) region.append("philadelphia") agency.append("SEPTA") mode.append("Bus") abs_delay.append(avg_delay_abs(rt_septa_bus)) early_delay.append(avg_delay_early(rt_septa_bus)) late_delay.append(avg_delay_late(rt_septa_bus)) otp.append(on_time_percent(rt_septa_bus)) fraction.append(rt_fraction(rt_septa_bus)) if rt_septa_rail is not None: timestamp.append(timezone_converter(utc_dt_aware, "America/New_York")) region.append("philadelphia") agency.append("SEPTA") mode.append("Rail") abs_delay.append(avg_delay_abs(rt_septa_rail)) early_delay.append(avg_delay_early(rt_septa_rail)) late_delay.append(avg_delay_late(rt_septa_rail)) otp.append(on_time_percent(rt_septa_rail)) fraction.append(rt_fraction(rt_septa_rail)) #Chicago if rt_cta is not None: timestamp.append(timezone_converter(utc_dt_aware, "America/Chicago")) region.append("chicago") agency.append("CTA") mode.append("Bus") abs_delay.append(np.nan) early_delay.append(np.nan) late_delay.append(np.nan) otp.append(100 - (rt_cta[rt_cta.delay == True].shape[0] / rt_cta.shape[0] * 100)) fraction.append(np.nan) #Washington if rt_wmata_bus is not None: timestamp.append(timezone_converter(utc_dt_aware, "America/New_York")) region.append("dc") agency.append("WMATA") mode.append("Bus") abs_delay.append(avg_delay_abs(rt_wmata_bus)) early_delay.append(avg_delay_early(rt_wmata_bus)) late_delay.append(avg_delay_late(rt_wmata_bus)) otp.append(on_time_percent(rt_wmata_bus)) fraction.append(rt_fraction(rt_wmata_bus)) # San Francisco if rt_sf is not None: sf_agency_list = rt_sf['Id'].unique().tolist() for sf_agency in sf_agency_list: df_temp = rt_sf[rt_sf["Id"] == sf_agency].copy() if "Bus" in df_temp["mode"].values: timestamp.append(timezone_converter(utc_dt_aware, "America/Vancouver")) region.append("sf") agency.append(df_temp["Agency"].iloc[0]) mode.append("Bus") abs_delay.append(avg_delay_abs(df_temp[df_temp["mode"] == "Bus"])) early_delay.append(avg_delay_early(df_temp[df_temp["mode"] == "Bus"])) late_delay.append(avg_delay_late(df_temp[df_temp["mode"] == "Bus"])) otp.append(on_time_percent(df_temp[df_temp["mode"] == "Bus"])) fraction.append(rt_fraction(df_temp[df_temp["mode"] == "Bus"])) if "Rail" in df_temp["mode"].values: timestamp.append(timezone_converter(utc_dt_aware, "America/Vancouver")) region.append("sf") agency.append(df_temp["Agency"].iloc[0]) mode.append("Rail") abs_delay.append(avg_delay_abs(df_temp[df_temp["mode"] == "Rail"])) early_delay.append(avg_delay_early(df_temp[df_temp["mode"] == "Rail"])) late_delay.append(avg_delay_late(df_temp[df_temp["mode"] == "Rail"])) otp.append(on_time_percent(df_temp[df_temp["mode"] == "Rail"])) fraction.append(rt_fraction(df_temp[df_temp["mode"] == "Rail"])) reliability_metrics = {'timestamp': timestamp, 'region' : region, 'agency' : agency, 'mode' : mode, 'delay_abs': abs_delay, 'delay_late': late_delay, 'delay_early': early_delay, 'otp': otp, 'fraction': fraction} reliability_metrics = pd.DataFrame(reliability_metrics) # TODO: Do some cleaning here! reliability_metrics['delay_abs'] = reliability_metrics['delay_abs'].where( reliability_metrics['delay_abs'].astype(float) > -1800) reliability_metrics['delay_early'] = reliability_metrics['delay_early'].where( reliability_metrics['delay_early'].astype(float) < 1800) reliability_metrics['delay_late'] = reliability_metrics['delay_late'].where( reliability_metrics['delay_late'].astype(float) < 1800) reliability_metrics['otp'] = reliability_metrics['otp'].where( reliability_metrics['otp'].astype(float) > 20) # Write out a csv file with the metrics reliability_metrics.to_csv('reliability_metric.csv', mode='a', header=False, index=False) # Convert to none data types reliability_metrics = reliability_metrics.where(pd.notnull(reliability_metrics), None) Log.debug(function='reliability', message="Appended metrics to file") # Put it into the database for idx, row in reliability_metrics.iterrows(): Realtime.create( timestamp=row['timestamp'], region=row['region'], agency=row['agency'], mode=row['mode'], delay_abs=row['delay_abs'], delay_early=row['delay_early'], delay_late=row['delay_late'], otp=row['otp'], fraction=row['fraction'] ) Log.info(function='reliability', message="Inserted reliability metrics") #Make it sleep until the next hour if it is within the same hour curr_time = datetime.datetime.now().time().strftime("%H:%M:%S") hr_time = datetime.datetime.strptime(curr_time, '%H:%M:%S').time().hour min_time = datetime.datetime.strptime(curr_time, '%H:%M:%S').time().minute if hr_time == datetime.datetime.strptime(time_now, '%H:%M:%S').time().hour: sleep_time = (60 - min_time)*60 if sleep_time < 0: sleep_time = 0 else: sleep_time = 0 time.sleep(sleep_time) schedule_next_run() return schedule.CancelJob
def schedule_next_run(): time_str = ':{:02d}'.format(random.randint(0, 59)) # time_str = ':10' schedule.clear() Log.debug(function='reliability', message=f"Scheduled for {time_str}") schedule.every().hour.at(time_str).do(metric_report)
async def hod(self, context, recheck=True, channel_name: str = None): channel = (await self.get_channel( context, channel_name)) if channel_name else context.message.channel await channel.send("Checking raidlogs, back in a bit!") await self.monitor_clans(self.allclans) message = "" for raid in Raid.select().where( Raid.name == "dreadsylvania", Raid.end == None, Raid.clan_name == "The Hogs of Destiny"): message = "__**HOD BANISH STATUS**__ \n" for location in ["forest", "village", "castle"]: for element in [("stinky", "stench"), ("spooky", "spooky"), ("sleazy", "sleaze"), ("hot", "hot"), ("cold", "cold")]: data = "{{\"element\": \"{}\", \"location\": \"{}\"}}".format( element[0], location) if not (location == "village" and element[1] in ["spooky", "stench"]): if not Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadBanishElement, Log.data == data).exists(): message += "{} banish needed in {} {} \n".format( str(element[1]).capitalize(), str(location).capitalize(), banish_locations[(location, element[1])]) village_banishes = [ Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadBanishElement, Log.data == "{\"element\": \"spooky\", \"location\": \"village\"}"). exists(), Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadBanishElement, Log.data == "{\"element\": \"stinky\", \"location\": \"village\"}"). exists(), Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadBanishType, Log.data == "{\"location\": \"village\", \"type\": \"ghosts\"}").count( ), Log.select().where( Log.raid == raid, Log.action == RaidAction.DreadBanishType, Log.data == "{\"location\": \"village\", \"type\": \"zombies\"}"). count() ] if village_banishes[0]: if village_banishes[2] < 2: message += "{} ghost banish{} still needed. \n".format( "One" if village_banishes[2] else "Two", "" if village_banishes[2] else "es") elif village_banishes[1]: if village_banishes[3] < 2: message += "{} zombie banish{} still needed. \n".format( "One" if village_banishes[3] else "Two", "" if village_banishes[3] else "es") elif village_banishes[2]: message += "{} banish needed in {} {} \n".format( "Spooky", "Village", banish_locations[("village", "spooky")]) if village_banishes[3] < 2: message += "One ghost banish still needed. \n" elif village_banishes[3]: message += "{} banish needed in {} {} \n".format( "Stench", "Village", banish_locations[("village", "stench")]) if village_banishes[3] < 2: message += "One zombie banish still needed. \n" else: message += "Couldn't tell if this is a spooky ghost or a stench zombie instance. \n" message += "Either banish spooky {} or stench {} from the village. \n".format( banish_locations[("village", "spooky")], banish_locations[("village", "stench")]) if message == "__**HOD BANISH STATUS**__ \n": message += "All banishes complete. \n" message += "(You may also want to run !clan hod)" if channel_name: await context.send("Sending summary to {}".format(channel.name)) await channel.send(message)
class LogThread(object): """ Separate thread for logging stuff. It starts to log in separate thread as new instance will be done or _start() method will be called Timer could be stopped be calling stop() method. """ def __init__(self, resources): self._mutex = threading.RLock() logging.info('Starting logging thread') self._object = resources.object self._resources = resources db = self._resources.getDbManager() self._log = Log(db) self._message = Message(db) self._plc = self._resources.plcManager self._position = self._resources.plcManager.getPositionHelper() self._period = resources.config.getLoggingTime() self._altitude = None self._start() def _start(self): """ Starts timer to run, function is looped by itself. Should be interrupted by calling timer.cancel() in other case it will become a daemon """ if self._plc.isSwitchedOn(): self._doWork() self._timer = threading.Timer(self._period, self._start) self._timer.start() def stop(self): """ Thread safe method for stopping timer. It will finish as soon as timer thread stops working and thread will be closed""" with self._mutex: self._timer.cancel() self._timer.join() def _doWork(self): """ All logging stuff performs here. This method is calling by logging thread """ with self._mutex: if self._resources.plcManager.isConnected(): self._log.setStarId(self._getStarId()) self._log.setMsgId(self._getMsgId()) self._log.setCurrentRaDec(*self._getCurrentRaDec()) self._log.setCurrentFocus(self._getCurrentFocus()) self._log.setTemperature(*self._getTemperature()) self._log.setAlarmStatus(self._getAlarmStatus()) self._log.setCurrentAltitude(self._getAltitude()) self._log.writeToLog() def force(self): with self._mutex: self._timer.cancel() self._timer.join() self._start() def updatePeriod(self, time): """ Update logging period, period in seconds """ self._period = time def _getStarId(self): """ return selected object id from controller, if object is not selected return None """ object = self._resources.object return object.getId() def _getMsgId(self): """ for getting current message last stored message is used """ id = self._message.getLastId() return id def _getCurrentRaDec(self): """ current telescope positions are taken directly from plc """ return self._position.getCurrentPosition() def _getAltitude(self): if self._object.selected(): alt, az = self._object.getHorizontalPosition() return alt.real def _getCurrentFocus(self): """ current telescope positions are taken directly from plc """ focus = self._position.getFocus() return focus def _getTemperature(self): """ current dome and telescope temperatures """ return self._plc.getModeHelper().readTemperatures() def _getAlarmStatus(self): """ alarm status as a alarm codes separated by ',' """ return self._plc.readAlarmStatus()
async def parse_clan_raid_logs(self, clan_details, message_stream=sys.stdout): clan_name, clan_id, aliases = clan_details kol = self.bot.kol clan = Clan(kol, id=clan_id) await clan.join() try: current = await clan.get_raids() except ClanPermissionsError: message_stream.write( "Skipping {} due to lack of basement permissions".format( clan_name)) return try: previous = await clan.get_previous_raids() except: previous = [] pass tasks = [] created_raids = [] updated_raids = [] for data in tqdm( current + previous, desc="Discovering previous raid logs in {}".format(clan_name), file=message_stream, unit="raid logs", leave=False): raid = Raid.get_or_none(id=data.id) raids_list = updated_raids if raid is None: raid = Raid(id=data.id, name=data.name, clan_id=clan_id, clan_name=clan_name) raids_list = created_raids if data.events is None and raid.end is None: raid.start = data.start raid.end = data.end tasks += [asyncio.ensure_future(clan.get_raid_log(data.id))] if raid.is_dirty(): raids_list.append(raid) Raid.bulk_create(created_raids, batch_size=50) Raid.bulk_update(updated_raids, fields=[Raid.start, Raid.end], batch_size=50) raids_data = current + [ await t for t in tqdm( asyncio.as_completed(tasks), desc="Loading previous raid logs in {}".format(clan_name), unit="raid logs", total=len(tasks), leave=False, file=message_stream, ascii=False) ] with tqdm(raids_data, desc="Parsing raid logs in {}".format(clan_name), unit="raids", file=message_stream, ascii=False) as p: for data in p: raid = Raid.get_or_none(id=data.id) if raid is None: p.write("Something went wrong with raid {}".format( data.id)) continue logs = [] for category, events in data.events: category = category.rstrip(":") for event in events: turns = int(event.data.pop("turns", 0)) event_data = json.dumps(event.data, sort_keys=True) log = Log.get_or_none(Log.raid == raid, Log.category == category, Log.action == event.action, Log.username == event.username, Log.user_id == event.user_id, Log.data == event_data) if log is None: log = Log( raid=raid, category=category, action=event.action, username=event.username, user_id=event.user_id, turns=turns, data=event_data, ) elif log.turns != turns: log.turns = turns log.last_updated = time() logs.append(log) with db.atomic(): Log.delete().where(Log.raid == raid).execute() Log.bulk_create(logs, batch_size=50) raid.summary = json.dumps(data.summary) raid.save()