async def maybe_rotate_icons(self) -> None: """ Call `rotate_icons` if the configured amount of time has passed since last rotation. We offset the calculated time difference into the future to avoid off-by-a-little-bit errors. Because there is work to be done before the timestamp is read and written, the next read will likely commence slightly under 24 hours after the last write. """ log.debug("Checking whether it's time for icons to rotate.") last_rotation_timestamp = await self.cache_information.get( "last_rotation_timestamp") if last_rotation_timestamp is None: # Maiden case ~ never rotated. await self.rotate_icons() return last_rotation = Arrow.utcfromtimestamp(last_rotation_timestamp) difference = (Arrow.utcnow() - last_rotation) + timedelta(minutes=5) log.trace( f"Icons last rotated at {last_rotation} (difference: {difference})." ) if difference.days >= BrandingConfig.cycle_frequency: await self.rotate_icons()
async def _reload_tasks_from_redis(self) -> None: """Reload outstanding tasks from redis on startup, delete the task if the member has since left the server.""" await self.bot.wait_until_guild_available() items = await self.task_cache.items() for key, value in items: member = self.bot.get_guild(Guild.id).get_member(key) if not member: # Member isn't found in the cache try: member = await self.bot.get_guild(Guild.id ).fetch_member(key) except discord.errors.NotFound: log.debug( f"Member {key} left the guild before we could schedule " "the revoking of their streaming permissions.") await self.task_cache.delete(key) continue except discord.HTTPException: log.exception( f"Exception while trying to retrieve member {key} from Discord." ) continue revoke_time = Arrow.utcfromtimestamp(value) log.debug( f"Scheduling {member} ({member.id}) to have streaming permission revoked at {revoke_time}" ) self.scheduler.schedule_at( revoke_time, key, self._revoke_streaming_permission(member))
async def get_in_use_time(channel_id: int) -> t.Optional[timedelta]: """Return the duration `channel_id` has been in use. Return None if it's not in use.""" log.trace(f"Calculating in use time for channel {channel_id}.") claimed_timestamp = await _caches.claim_times.get(channel_id) if claimed_timestamp: claimed = Arrow.utcfromtimestamp(claimed_timestamp) return arrow.utcnow() - claimed
def execute_sfx_program(api_token, program, start_time, end_time, dimensions=None, resolution=60): """ Execute an arbitrary SignalFlow program :param api_token: a valid SFX API query token (you can get this from the SignalFX dashboard) :param program: a valid signalflow program to execute :param start_time: beginning of program execution range, as an Arrow object :param end_time: end of program execution range, as an Arrow object :param dimensions: list of strings to group the returned timeseries by :param resolution: smallest time interval (in seconds) to evaluate the program on note: SignalFX has a maximum resolution of 1 minute, and only for the most recent data; setting a resolution higher than this (or even 1 minute for older data) will be ignored :returns: a list of (timestamp, data_points) tuples, where data_points is a dict of timeseries_name -> value """ with signalfx.SignalFx().signalflow(api_token) as sfx: curr_time = start_time datapoints = [] while curr_time < end_time: # To prevent overloading SignalFX we grab a maximum of 5 days worth of data at a time next_time = min(curr_time.shift(days=5), end_time) logger.info(f'Querying SignalFX from {curr_time} to {next_time}') raw_data = sfx.execute( program, # SignalFX operates on millisecond timescales start=curr_time.timestamp * 1000, stop=next_time.timestamp * 1000, resolution=resolution * 1000, ) # We can only call _make_ts_label after all of the entries in the raw_data.stream() have been processed data_messages = [ msg for msg in raw_data.stream() if isinstance(msg, DataMessage) ] new_datapoints = sorted([ (Arrow.utcfromtimestamp(msg.logical_timestamp_ms / 1000), { _make_ts_label(raw_data, key, dimensions): value for key, value in msg.data.items() }) for msg in data_messages ]) # SignalFX sometimes gives us duplicate datapoints at the beginning of one chunk/the start of # the next chunk. This doesn't play nicely with the metrics client so detect and remove those here if datapoints and new_datapoints[0][0] == datapoints[-1][0]: new_datapoints = new_datapoints[1:] datapoints.extend(new_datapoints) curr_time = next_time return datapoints
def _transform_key(self, value): """ Truncates seconds and milliseconds from a long value of seconds from epoch :param value: An int value representing the number of seconds from epoch (timestmap) :type value: int :return: The value truncated to minutes :rtype: int """ if value is None: return None return Arrow.utcfromtimestamp(value).replace(second=0, microsecond=0).timestamp * 1000
def _transform_condition_parameter(self, source_value): """ Truncates seconds and milliseconds from a long value of milliseconds from epoch :param value: An int value representing the number of milliseconds from epoch (timestmap) :type value: int :return: The value truncated to midnight of the same day :rtype: int """ if source_value is None: return None return Arrow.utcfromtimestamp(source_value // 1000).replace(hour=0, minute=0, second=0, microsecond=0).timestamp * 1000
async def init_cog(self) -> None: """Initialise the metabase session.""" expiry_time = await self.session_info.get("session_expiry") if expiry_time: expiry_time = Arrow.utcfromtimestamp(expiry_time) if expiry_time is None or expiry_time < arrow.utcnow(): # Force a refresh and end the task await self.refresh_session() return # Cached token is in date, so get it and schedule a refresh for later self.session_token = await self.session_info.get("session_token") self.headers["X-Metabase-Session"] = self.session_token self._session_scheduler.schedule_at(expiry_time, 0, self.refresh_session())
async def liststream(self, ctx: commands.Context) -> None: """Lists all non-staff users who have permission to stream.""" non_staff_members_with_stream = [ member for member in ctx.guild.get_role(Roles.video).members if not any(role.id in STAFF_ROLES for role in member.roles) ] # List of tuples (UtcPosixTimestamp, str) # So that the list can be sorted on the UtcPosixTimestamp before the message is passed to the paginator. streamer_info = [] for member in non_staff_members_with_stream: if revoke_time := await self.task_cache.get(member.id): # Member only has temporary streaming perms revoke_delta = Arrow.utcfromtimestamp(revoke_time).humanize() message = f"{member.mention} will have stream permissions revoked {revoke_delta}." else: message = f"{member.mention} has permanent streaming permissions." # If revoke_time is None use max timestamp to force sort to put them at the end streamer_info.append((revoke_time or Arrow.max.timestamp(), message))
async def _reload_tasks_from_redis(self) -> None: """Reload outstanding tasks from redis on startup, delete the task if the member has since left the server.""" await self.bot.wait_until_guild_available() items = await self.task_cache.items() guild = self.bot.get_guild(Guild.id) for key, value in items: member = await get_or_fetch_member(guild, key) if not member: log.debug( "User with ID %d left the guild before their streaming permissions could be revoked.", key) await self.task_cache.delete(key) continue revoke_time = Arrow.utcfromtimestamp(value) log.debug( f"Scheduling {member} ({member.id}) to have streaming permission revoked at {revoke_time}" ) self.scheduler.schedule_at( revoke_time, key, self._revoke_streaming_permission(member))
def arrow(date=None, tz=None): if date is None: return utcnow() if tz is None else now(tz) else: if tz is None: try: tz = parser.TzinfoParser.parse(date) return now(tz) except: pass if isinstance(date, (float, int)): return Arrow.utcfromtimestamp(date) return Arrow.fromdatetime(date) else: tz = parser.TzinfoParser.parse(tz) return Arrow.fromdatetime(date, tz)
def get(*args, **kwargs): '''Returns an :class:`Arrow <arrow.Arrow>` object based on flexible inputs. Usage:: >>> import arrow **No inputs** to get current UTC time:: >>> arrow.get() <Arrow [2013-05-08T05:51:43.316458+00:00]> **One str**, **float**, or **int**, convertible to a floating-point timestamp, to get that timestamp in UTC:: >>> arrow.get(1367992474.293378) <Arrow [2013-05-08T05:54:34.293378+00:00]> >>> arrow.get(1367992474) <Arrow [2013-05-08T05:54:34+00:00]> >>> arrow.get('1367992474.293378') <Arrow [2013-05-08T05:54:34.293378+00:00]> >>> arrow.get('1367992474') <Arrow [2013-05-08T05:54:34+00:00]> **One str**, convertible to a timezone, or **tzinfo**, to get the current time in that timezone:: >>> arrow.get('local') <Arrow [2013-05-07T22:57:11.793643-07:00]> >>> arrow.get('US/Pacific') <Arrow [2013-05-07T22:57:15.609802-07:00]> >>> arrow.get('-07:00') <Arrow [2013-05-07T22:57:22.777398-07:00]> >>> arrow.get(tz.tzlocal()) <Arrow [2013-05-07T22:57:28.484717-07:00]> **One** naive **datetime**, to get that datetime in UTC:: >>> arrow.get(datetime(2013, 5, 5)) <Arrow [2013-05-05T00:00:00+00:00]> **One** aware **datetime**, to get that datetime:: >>> arrow.get(datetime(2013, 5, 5, tzinfo=tz.tzlocal())) <Arrow [2013-05-05T00:00:00-07:00]> **Two** arguments, a naive or aware **datetime**, and a timezone expression (as above):: >>> arrow.get(datetime(2013, 5, 5), 'US/Pacific') <Arrow [2013-05-05T00:00:00-07:00]> **Two** arguments, both **str**, to parse the first according to the format of the second:: >>> arrow.get('2013-05-05 12:30:45', 'YYYY-MM-DD HH:mm:ss') <Arrow [2013-05-05T12:30:45+00:00]> **Three or more** arguments, as for the constructor of a **datetime**:: >>> arrow.get(2013, 5, 5, 12, 30, 45) <Arrow [2013-05-05T12:30:45+00:00]> ''' arg_count = len(args) if arg_count == 0: return Arrow.utcnow() if arg_count == 1: arg = args[0] timestamp = None try: timestamp = float(arg) except: pass # (int), (float), (str(int)) or (str(float)) -> from timestamp. if timestamp is not None: return Arrow.utcfromtimestamp(timestamp) # (datetime) -> from datetime. elif isinstance(arg, datetime): return Arrow.fromdatetime(arg) # (tzinfo) -> now, @ tzinfo. elif isinstance(arg, tzinfo): return Arrow.now(arg) # (str) -> now, @ tzinfo. elif isinstance(arg, str): _tzinfo = parser.TzinfoParser.parse(arg) return Arrow.now(_tzinfo) else: raise TypeError('Can\'t parse single argument type of \'{0}\''.format(type(arg))) elif arg_count == 2: arg_1, arg_2 = args[0], args[1] if isinstance(arg_1, datetime): # (datetime, tzinfo) -> fromdatetime @ tzinfo. if isinstance(arg_2, tzinfo): return Arrow.fromdatetime(arg_1, arg_2) # (datetime, str) -> fromdatetime @ tzinfo. elif isinstance(arg_2, str): _tzinfo = parser.TzinfoParser.parse(arg_2) return Arrow.fromdatetime(arg_1, _tzinfo) else: raise TypeError('Can\'t parse two arguments of types \'datetime\', \'{0}\''.format( type(arg_2))) # (str, format) -> parsed. elif isinstance(arg_1, str) and isinstance(arg_2, str): dt = parser.DateTimeParser.parse(args[0], args[1]) return Arrow.fromdatetime(dt) else: raise TypeError('Can\'t parse two arguments of types \'{0}\', \'{1}\''.format( type(arg_1), type(arg_2))) # 3+ args. else: return Arrow(*args, **kwargs)
async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.Tuple[Arrow, ClosingReason]: """ Return the time at which the given help `channel` should be closed along with the reason. `init_done` is True if the cog has finished loading and False otherwise. The time is calculated as follows: * If `init_done` is True or the cached time for the claimant's last message is unavailable, add the configured `idle_minutes_claimant` to the time the most recent message was sent. * If the help session is empty (see `is_empty`), do the above but with `deleted_idle_minutes`. * If either of the above is attempted but the channel is completely empty, close the channel immediately. * Otherwise, retrieve the times of the claimant's and non-claimant's last messages from the cache. Add the configured `idle_minutes_claimant` and idle_minutes_others`, respectively, and choose the time which is furthest in the future. """ log.trace(f"Getting the closing time for #{channel} ({channel.id}).") is_empty = await _message.is_empty(channel) if is_empty: idle_minutes_claimant = constants.HelpChannels.deleted_idle_minutes else: idle_minutes_claimant = constants.HelpChannels.idle_minutes_claimant claimant_time = await _caches.claimant_last_message_times.get(channel.id) # The current session lacks messages, the cog is still starting, or the cache is empty. if is_empty or not init_done or claimant_time is None: msg = await _message.get_last_message(channel) if not msg: log.debug( f"No idle time available; #{channel} ({channel.id}) has no messages, closing now." ) return Arrow.min, ClosingReason.DELETED # Use the greatest offset to avoid the possibility of prematurely closing the channel. time = Arrow.fromdatetime( msg.created_at) + timedelta(minutes=idle_minutes_claimant) reason = ClosingReason.DELETED if is_empty else ClosingReason.LATEST_MESSSAGE return time, reason claimant_time = Arrow.utcfromtimestamp(claimant_time) others_time = await _caches.non_claimant_last_message_times.get(channel.id) if others_time: others_time = Arrow.utcfromtimestamp(others_time) else: # The help session hasn't received any answers (messages from non-claimants) yet. # Set to min value so it isn't considered when calculating the closing time. others_time = Arrow.min # Offset the cached times by the configured values. others_time += timedelta( minutes=constants.HelpChannels.idle_minutes_others) claimant_time += timedelta(minutes=idle_minutes_claimant) # Use the time which is the furthest into the future. if claimant_time >= others_time: closing_time = claimant_time reason = ClosingReason.CLAIMANT_TIMEOUT else: closing_time = others_time reason = ClosingReason.OTHER_TIMEOUT log.trace( f"#{channel} ({channel.id}) should be closed at {closing_time} due to {reason}." ) return closing_time, reason
parser.add_argument('-dir', '--directory', help='directory to use', action=FullPaths, type=is_dir) parser.add_argument('-dots', help='include dot files', action='store_true') args = parser.parse_args() dir = OSFS(args.directory) dots = args.dots dirs = [] skipFirst = True doc, tag, text = Doc().tagtext() with tag('urlSet', xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"): for name, stats in dir.listdirinfo(files_only=True): if accept(name): with tag('url'): with tag('loc'): text('http://www.example.com/%s' % quote(name)) with tag('lastmod'): text(str(Arrow.utcfromtimestamp(stats['modified_time'].timestamp()))) for aDir in dir.walkdirs(): if skipFirst: skipFirst = False continue if accept(aDir[1:None]): for name, stats in dir.listdirinfo(path=aDir, files_only=True): if accept(name): with tag('url'): with tag('loc'): text('http://www.example.com%s/%s' %(aDir,quote(name))) with tag('lastmod'): text(str(Arrow.utcfromtimestamp(stats['modified_time'].timestamp()))) dir.close()