Ejemplo n.º 1
0
	def test_pretty_eta(self):
		'''
			Test time_delta_to_legible_eta
			'''
		# test small deltas
		for i in range(0, 100):
			rand_delta = random.randint(0, 3600)
			time_delta_to_legible_eta(rand_delta, True)

		# test large deltas
		for i in range(0, 100):
			rand_delta = random.randint(0, 3600 * 24 * 2)
			time_delta_to_legible_eta(rand_delta, True)

		# test days only
		print(time_delta_to_legible_eta(3600 * 24 * 3, False))

		# test hours only
		print(time_delta_to_legible_eta(3600 * 12, False))

		# test minutes only
		print(time_delta_to_legible_eta(60 * 10, False))

		# test seconds only
		print(time_delta_to_legible_eta(30, False))

		# test with 0 seconds
		self.assertEqual(time_delta_to_legible_eta(0, False), 'just now')
Ejemplo n.º 2
0
def statistics(update, context):
    '''
	Responds to /stats command
	'''
    if rd.exists('converted-imgs'):
        imgs = int(rd.get('converted-imgs'))
    else:
        imgs = 0

    if rd.exists('chats'):
        chats = rd.get('chats')
        chats = len(chats.split(','))
    else:
        chats = 0

    sec_running = int(time.time()) - STARTUP_TIME
    runtime = time_delta_to_legible_eta(time_delta=sec_running,
                                        full_accuracy=False)

    msg = f'''­ЪЊі *Bot statistics*
	Images converted: {imgs}
	Unique chats seen: {chats}
	Bot started {runtime} ago
	'''

    context.bot.send_message(update.message.chat.id,
                             cleandoc(msg),
                             parse_mode='Markdown')
Ejemplo n.º 3
0
    def schedule_call(unix_timestamp: int) -> int:
        # verify time isn't in the past
        if unix_timestamp <= int(time.time()):
            logging.warning(
                'schedule_call called with a timestamp in the past! Scheduling for t+3 seconds.'
            )
            unix_timestamp = int(time.time()) + 3

        # delta
        until_update = unix_timestamp - int(time.time())

        # convert to a datetime object
        next_update_dt = datetime.datetime.fromtimestamp(unix_timestamp)

        # schedule next API update, and we're done: next update will be scheduled after the API update
        scheduler.add_job(ll2_api_call,
                          'date',
                          run_date=next_update_dt,
                          args=[db_path, scheduler, bot_username, bot],
                          id=f'api-{unix_timestamp}')

        logging.debug(
            '🔄 Next API update in %s (%s)',
            time_delta_to_legible_eta(time_delta=until_update,
                                      full_accuracy=False), next_update_dt)

        return unix_timestamp
Ejemplo n.º 4
0
	def test_time_delta_to_legible_eta(self):
		'''
		Test time_delta_to_legible_eta with random times
		'''

		# without full accuracy, large values
		for i in range(10):
			print(
				time_delta_to_legible_eta(time_delta=random.uniform(
				0, 3600 * 24 * 30),
				full_accuracy=False))

		# without full accuracy, small values
		for i in range(10):
			print(
				time_delta_to_legible_eta(time_delta=random.uniform(
				0, 3600 * 24),
				full_accuracy=False))

		# with full accuracy, large values
		for i in range(10):
			print(
				time_delta_to_legible_eta(time_delta=random.uniform(
				0, 3600 * 24 * 30),
				full_accuracy=True))

		# with full accuracy, small values
		for i in range(10):
			print(
				time_delta_to_legible_eta(time_delta=random.uniform(
				0, 3600 * 24),
				full_accuracy=True))

		def test_suffixed_readable_int(self):
			for i in range(1000):
				rand_int = random.randint(0, 200)
				print(f'{rand_int} -> {suffixed_readable_int(rand_int)}')
Ejemplo n.º 5
0
def api_call_scheduler(db_path: str, scheduler: BackgroundScheduler,
                       ignore_60: bool, bot_username: str,
                       bot: 'telegram.bot.Bot') -> int:
    """
	Schedules upcoming API calls for when they'll be required.
	Calls are scheduled dynamically, based on the time until next notification.

	The function returns the timestamp for when the next API call should be run.
	Whenever an API call is performed, the next call should be scheduled.

	TODO improve checking for overlapping jobs, especially when notification checks
	are scheduled. Keep track of scheduled job IDs. LaunchBot-class in main thread?
	"""
    def schedule_call(unix_timestamp: int) -> int:
        # verify time isn't in the past
        if unix_timestamp <= int(time.time()):
            logging.warning(
                'schedule_call called with a timestamp in the past! Scheduling for t+3 seconds.'
            )
            unix_timestamp = int(time.time()) + 3

        # delta
        until_update = unix_timestamp - int(time.time())

        # convert to a datetime object
        next_update_dt = datetime.datetime.fromtimestamp(unix_timestamp)

        # schedule next API update, and we're done: next update will be scheduled after the API update
        scheduler.add_job(ll2_api_call,
                          'date',
                          run_date=next_update_dt,
                          args=[db_path, scheduler, bot_username, bot],
                          id=f'api-{unix_timestamp}')

        logging.debug(
            '🔄 Next API update in %s (%s)',
            time_delta_to_legible_eta(time_delta=until_update,
                                      full_accuracy=False), next_update_dt)

        return unix_timestamp

    def require_immediate_update(cursor: sqlite3.Cursor) -> tuple:
        '''Summary
		Load previous time on startup to figure out if we need to update right now
		'''
        try:
            cursor.execute('SELECT last_api_update FROM stats')
        except sqlite3.OperationalError:
            return (True, None)

        last_update = cursor.fetchall()[0][0]
        if last_update in ('', None):
            return (True, None)

        return (
            True,
            None) if time.time() > last_update + UPDATE_PERIOD * 60 * 2 else (
                False, last_update)

    # debug print
    logging.debug('⏲ Starting api_call_scheduler...')

    # update period, in minutes TODO: remove and use raw minutes or hours
    UPDATE_PERIOD = 15

    # load the next upcoming launch from the database
    conn = sqlite3.connect(os.path.join(db_path, 'launchbot-data.db'))
    cursor = conn.cursor()

    # verify we don't need an immediate API update
    db_status = require_immediate_update(cursor)
    update_immediately, last_update = db_status[0], db_status[1]

    if update_immediately:
        logging.debug(
            '⚠️ DB outdated: scheduling next API update 5 seconds from now...')
        return schedule_call(int(time.time()) + 5)

    # if we didn't return above, no need to update immediately
    update_delta = int(time.time()) - last_update
    last_updated_str = time_delta_to_legible_eta(update_delta,
                                                 full_accuracy=False)

    logging.debug(f'🔀 DB up-to-date! Last updated {last_updated_str} ago.')

    # pull all launches with a net greater than or equal to notification window start
    select_fields = 'net_unix, launched, status_state'
    select_fields += ', notify_24h, notify_12h, notify_60min, notify_5min'
    notify_window = int(time.time()) - 60 * 5

    try:
        cursor.execute(
            f'SELECT {select_fields} FROM launches WHERE net_unix >= ?',
            (notify_window, ))
        query_return = cursor.fetchall()
    except sqlite3.OperationalError:
        query_return = set()

    conn.close()

    if len(query_return) == 0:
        logging.warning(
            '⚠️ No launches found for scheduling: running in 5 seconds...')
        os.rename(
            os.path.join(db_path, 'launchbot-data.db'),
            os.path.join(db_path,
                         f'launchbot-data-sched-error-{int(time.time())}.db'))

        return schedule_call(int(time.time()) + 5)

    # sort in-place by NET
    query_return.sort(key=lambda tup: tup[0])
    '''
	Create a list of notification send times, but also during launch to check for a postpone.
	- notification times, if not sent (60 seconds before)
	- 5 minutes after the launch is supposed to occur
	'''
    # notification times: 24 hours, 12 hours, 60 minutes, and 5 minutes before
    notif_times, time_map = set(), {
        0: 24 * 3600,
        1: 12 * 3600,
        2: 3600,
        3: 5 * 60
    }
    notif_time_map = dict()

    for launch_row in query_return:
        # don't use unverified launches for scheduling (status_state == TBD)
        launch_status = launch_row[2]
        if launch_status == 'TBD':
            continue

        # shortly after launch time for possible postpone/abort, if not launched
        if not launch_row[1] and time.time() - launch_row[0] < 60:
            notif_times.add(launch_row[0] + 5 * 60)

            check_time = launch_row[0] + 5 * 60
            if check_time not in notif_time_map.keys():
                notif_time_map[check_time] = {-1}
            else:
                notif_time_map[check_time].add(-1)

        for enum, notif_bool in enumerate(launch_row[3::]):
            # 0: 24h, 1: 12h, 2: 60m, 3: 5m
            if not notif_bool:
                # time for check: launch time - notification time - 60 (60s before)
                check_time = launch_row[0] - time_map[enum] - 60

                # if less than 60 sec until next check, pass if ignore_60 flag is set
                if check_time - int(time.time()) < 60 and ignore_60:
                    pass
                elif check_time < time.time():
                    pass
                else:
                    notif_times.add(check_time)

                    if check_time not in notif_time_map.keys():
                        notif_time_map[check_time] = {enum}
                    else:
                        notif_time_map[check_time].add(enum)

    # get time when next notification will be sent
    next_notif = min(notif_times)

    # logging for weird errors
    logging.debug(f"notif_times: {notif_times}")
    logging.debug(f"next_notif: {next_notif}")
    logging.debug(f"notif_time_map: {notif_time_map}")

    # 0: '24h', 1: '12h', 2: '60m', 3: '5m'
    next_notif_earliest_type = max(notif_time_map[next_notif])
    next_notif_type = {
        0: '24h',
        1: '12h',
        2: '60m',
        3: '5m',
        -1: 'LCHECK'
    }[next_notif_earliest_type]

    # time until next notif + send time
    until_next_notif = next_notif - int(time.time())
    next_notif_send_time = time_delta_to_legible_eta(
        time_delta=until_next_notif, full_accuracy=False)

    # debugging TODO remove
    logging.debug(
        f'next_notif_type: {next_notif_type} | until_next_notif: {until_next_notif}'
    )
    logging.debug(f'notif_time_map[next_notif]: {notif_time_map[next_notif]}')

    # convert to a datetime object for scheduling
    next_notif = datetime.datetime.fromtimestamp(next_notif)

    # do scheduling with next_notif_type and until_next_notif
    if next_notif_type == '24h':
        if until_next_notif >= 6 * 3600:
            # every 6 hours, if more than 6 hours until launch
            upd_period_mult = 16
        else:
            # every 3 hours, if less than 6 hours until launch
            upd_period_mult = 12
    elif next_notif_type == '12h':
        # 12 hour window (24h ... 12h): update every every 3 hours
        upd_period_mult = 12
    elif next_notif_type == '60m':
        # somewhere between 60m and 12h until launch
        if until_next_notif >= 4 * 3600:
            # if more than 4 hours until notification, every 2 hours
            upd_period_mult = 8
        else:
            # if less than 4 hours, update once an hour
            upd_period_mult = 4
    elif next_notif_type == '5m':
        # ≤ 55 minutes until next notification: update every 20 minutes
        upd_period_mult = 1.35
    elif next_notif_type == 'LCHECK':
        logging.debug('LCHECK found')
        logging.warning(
            f'next_notif_type: {next_notif_type} | until_next_notif: {until_next_notif}'
        )
        upd_period_mult = 1.35
    else:
        logging.warning(
            '⚠️ Unknown scheduling type! (using upd_period_mult=4)')
        logging.warning(
            f'next_notif_type: {next_notif_type} | until_next_notif: {until_next_notif}'
        )
        upd_period_mult = 4

    # add next auto-update to notif_times
    to_next_update = int(UPDATE_PERIOD * upd_period_mult) * 60 - update_delta
    next_auto_update = int(time.time()) + to_next_update
    notif_times.add(next_auto_update)

    # pick minimum of all possible API updates
    next_api_update = min(notif_times)

    # flush redis db, push so we can expire a bunch of keys just after next update
    rd = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
    rd.flushdb()
    logging.debug('📕 Redis db flushed!')
    rd.set('next-api-update', next_api_update)

    # if next update is same as auto-update, log as information
    if next_api_update == next_auto_update:
        logging.debug(
            '📭 Auto-updating: no notifications coming up before next API update.'
        )
    else:
        logging.debug(
            '📬 Notification coming up before next API update: not auto-updating!'
        )

    # log time next notification is sent
    logging.debug(
        f'📮 Next notification in {next_notif_send_time} ({next_notif})')

    # schedule the call
    return schedule_call(next_api_update)
Ejemplo n.º 6
0
    def verify_no_net_slip(launch_object: 'LaunchLibrary2Launch',
                           cursor: sqlite3.Cursor) -> (bool, tuple):
        '''
		Verify the NET of the launch hasn't slipped forward: if it has, verify
		that we haven't sent a notification: if we have, send a postpone
		notification to users.
		'''

        # load launch from db
        cursor.execute('SELECT * FROM launches WHERE unique_id = ?',
                       (launch_object.unique_id, ))
        query_return = [dict(row) for row in cursor.fetchall()]
        launch_db = query_return[0]

        # compare NETs: if they match, return
        if launch_db['net_unix'] == launch_object.net_unix:
            return (False, ())

        # NETs don't match: calculate slip (diff), verify we haven't sent any notifications
        net_diff = launch_object.net_unix - launch_db['net_unix']

        # notification states for the launch
        notification_states = {
            'notify_24h': launch_db['notify_24h'],
            'notify_12h': launch_db['notify_12h'],
            'notify_60min': launch_db['notify_60min'],
            'notify_5min': launch_db['notify_5min']
        }

        # store a copy we don't change so we can properly send postpone notifications
        old_notification_states = tuple(notification_states.values())

        # map notification "presend" time to hour multiples (i.e. 3600 * X)
        notif_pre_time_map = {
            'notify_24h': 24,
            'notify_12h': 12,
            'notify_60min': 1,
            'notify_5min': 5 / 60
        }

        # keep track of wheter we reset a notification state to 0
        notification_state_reset = False
        skipped_postpones = []

        # if we have at least one sent notification, the net has slipped >5 min, and we haven't launched
        if 1 in notification_states.values(
        ) and net_diff >= 5 * 60 and not launch_object.launched:
            # iterate over the notification states loaded from the database
            for key, status in notification_states.items():
                # reset if net_diff > the notification send period (we're outside the window again)
                # -> if goes "past" window, reset state

                # EX: 21 hours until launch: window_time_left???
                until_launch = launch_object.net_unix - int(time.time())

                # if int(status) == 1, window_end = launch time - 3600 * multiplier
                # should net_unix be old net? launch_db['net_unix']
                window_end = launch_db[
                    'net_unix'] - 3600 * notif_pre_time_map[key]
                window_diff = window_end - int(time.time()) + net_diff

                # check if postpone puts us past this window: if it does, reset state
                if int(status) == 1 and int(
                        time.time()) - net_diff < window_end:
                    postpone = {
                        'old net': launch_db['net_unix'],
                        'launch_obj.net_unix': launch_object.net_unix,
                        'time.time() - net_diff': int(time.time()) - net_diff,
                        'window_end': window_end,
                        'until_launch': until_launch,
                        'window_diff': window_diff
                    }

                    logging.debug('Notification state reset!')
                    logging.debug(f'Postpone: {postpone}')

                    notification_states[key] = 0
                    notification_state_reset = True
                else:
                    # log skipped postpones
                    postpone = {
                        'status': status,
                        'net_diff': net_diff,
                        'multipl.': notif_pre_time_map[key],
                        'time.time() + net_diff': int(time.time()) + net_diff,
                        'window_end': window_end,
                        'until_launch': until_launch,
                        'window_diff': window_diff
                    }

                    skipped_postpones.append(postpone)
                '''
				if int(status) == 1 and net_diff >= 3600 * notif_pre_time_map[key]:
					notification_states[key] = 0
					notification_state_reset = True
				else:
					# log skipped postpones
					postpone = {'status': status, 'net_diff': net_diff, 'multipl.': notif_pre_time_map[key]}
					skipped_postpones.append(postpone)
				'''

            if not notification_state_reset:
                logging.warning(
                    '⚠️ No notification states were reset: exiting...')
                logging.warning(
                    f'📜 notification_states: {notification_states}')
                logging.warning(f'📜 skipped_postpones: {skipped_postpones}')
                return (False, ())

            logging.warning('✅ A notification state was reset: continuing...')

            # generate the postpone string
            postpone_str = time_delta_to_legible_eta(time_delta=int(net_diff),
                                                     full_accuracy=False)

            # we've got the pretty eta: log
            logging.info(
                f'⏱ ETA string generated for net_diff={net_diff}: {postpone_str}'
            )

            # calculate days until next launch attempt
            eta_sec = launch_object.net_unix - time.time()
            next_attempt_eta_str = time_delta_to_legible_eta(
                time_delta=int(eta_sec), full_accuracy=False)

            # launch name: handle possible IndexError as well, even while this should never happen
            try:
                launch_name = launch_object.name.split('|')[1].strip()
            except IndexError:
                launch_name = launch_object.name.strip()

            # construct the postpone message
            postpone_msg = f'📢 *{launch_name}* has been postponed by {postpone_str}. '
            postpone_msg += f'*{launch_object.lsp_name}* is now targeting lift-off on *DATEHERE* at *LAUNCHTIMEHERE*.'
            postpone_msg += f'\n\n⏱ {next_attempt_eta_str} until next launch attempt.'

            # reconstruct
            postpone_msg = reconstruct_message_for_markdown(postpone_msg)

            # append the manually escaped footer
            postpone_msg += '\n\nℹ️ _You will be re\-notified of this launch\. '
            postpone_msg += f'For detailed info\, use \/next\@{bot_username}\. '
            postpone_msg += 'To disable\, mute this launch with the button below\._'

            # clean message
            postpone_msg = inspect.cleandoc(postpone_msg)

            # log the message
            logging.info(f'📢 postpone_msg generated:\n{postpone_msg}')

            # generate insert statement for db update
            insert_statement = '=?,'.join(notification_states.keys()) + '=?'

            # generate tuple for values: values for states + unique ID
            values_tuple = tuple(
                notification_states.values()) + (launch_object.unique_id, )

            # store updated notification states
            cursor.execute(
                f'UPDATE launches SET {insert_statement} WHERE unique_id = ?',
                values_tuple)

            # log
            logging.info(
                f'🚩 Notification states reset for launch_id={launch_object.unique_id}!'
            )
            logging.info(
                f'ℹ️ Postponed by {postpone_str}. New states: {notification_states}'
            )
            ''' return bool + a tuple we can use to send the postpone notification easily
			(launch_object, message, old_notif_statse) '''
            postpone_tup = (launch_object, postpone_msg,
                            old_notification_states)

            return (True, postpone_tup)

        return (False, ())
Ejemplo n.º 7
0
    def verify_no_net_slip(launch_object: 'LaunchLibrary2Launch',
                           cursor: sqlite3.Cursor) -> (bool, tuple):
        '''
		Verify the NET of the launch hasn't slipped forward: if it has, verify
		that we haven't sent a notification: if we have, send a postpone
		notification to users.
		'''

        # load launch from db
        cursor.execute('SELECT * FROM launches WHERE unique_id = ?',
                       (launch_object.unique_id, ))
        query_return = [dict(row) for row in cursor.fetchall()]
        launch_db = query_return[0]

        # compare NETs: if they match, return
        if launch_db['net_unix'] == launch_object.net_unix:
            return (False, ())

        # NETs don't match: calculate slip (diff), verify we haven't sent any notifications
        net_diff = launch_object.net_unix - launch_db['net_unix']
        notification_states = {
            'notify_24h': launch_db['notify_24h'],
            'notify_12h': launch_db['notify_12h'],
            'notify_60min': launch_db['notify_60min'],
            'notify_5min': launch_db['notify_5min']
        }

        # map notification "presend" time to hour multiples (i.e. 3600 * X)
        notif_pre_time_map = {
            'notify_24h': 24,
            'notify_12h': 12,
            'notify_60min': 1,
            'notify_5min': 5 / 60
        }

        # if we have at least one sent notification, the net has slipped >5 min, and we haven't launched
        if 1 in notification_states.values(
        ) and net_diff >= 5 * 60 and launch_object.launched != True:
            # iterate over the notification states loaded from the database
            for key, status in notification_states.items():
                # reset if net_diff > the notification send period (we're outside the window again)
                if status == 1 and net_diff > 3600 * notif_pre_time_map[key]:
                    notification_states[key] = 0

            # generate the postpone string
            postpone_str = time_delta_to_legible_eta(time_delta=int(net_diff),
                                                     full_accuracy=False)

            # we've got the pretty eta: log
            logging.info(
                f'⏱ ETA string generated for net_diff={net_diff}: {postpone_str}'
            )

            # calculate days until next launch attempt
            eta_sec = launch_object.net_unix - time.time()
            next_attempt_eta_str = time_delta_to_legible_eta(
                time_delta=int(eta_sec), full_accuracy=False)

            # launch name: handle possible IndexError as well, even while this should never happen
            try:
                launch_name = launch_object.name.split('|')[1].strip()
            except IndexError:
                launch_name = launch_object.name

            # construct the postpone message
            postpone_msg = f'📢 *{launch_name}* has been postponed by {postpone_str}. '
            postpone_msg += f'*{launch_object.lsp_name}* is now targeting lift-off on *DATEHERE* at *LAUNCHTIMEHERE*.'
            postpone_msg += f'\n\n⏱ {next_attempt_eta_str} until next launch attempt.'

            # reconstruct
            postpone_msg = reconstruct_message_for_markdown(postpone_msg)

            # append the manually escaped footer
            postpone_msg += '\n\nℹ️ _You will be re\-notified of this launch\. '
            postpone_msg += f'For detailed info\, use \/next\@{bot_username}\. '
            postpone_msg += 'To disable\, mute this launch with the button below\._'

            # clean message
            postpone_msg = inspect.cleandoc(postpone_msg)

            # log the message
            logging.info(f'📢 postpone_msg generated:\n{postpone_msg}')

            # generate insert statement for db update
            insert_statement = '=?,'.join(notification_states.keys()) + '=?'

            # generate tuple for values: values for states + unique ID
            values_tuple = tuple(
                notification_states.values()) + (launch_object.unique_id, )

            # store updated notification states
            cursor.execute(
                f'UPDATE launches SET {insert_statement} WHERE unique_id = ?',
                values_tuple)

            # log
            logging.info(
                f'🚩 Notification states reset for launch_id={launch_object.unique_id}!'
            )
            logging.info(
                f'ℹ️ Postponed by {postpone_str}. New states: {notification_states}'
            )
            ''' return bool + a tuple we can use to send the postpone notification easily
			(launch_object, message) '''
            postpone_tup = (launch_object, postpone_msg)

            return (True, postpone_tup)

        return (False, ())
Ejemplo n.º 8
0
def api_call_scheduler(db_path: str, scheduler: BackgroundScheduler,
                       ignore_60: bool, bot_username: str,
                       bot: 'telegram.bot.Bot') -> int:
    """
	Schedules upcoming API calls for when they'll be required.
	Calls are scheduled with the following logic:
	- every 20 minutes, unless any of the following has triggered an update:
		- 30 seconds before upcoming notification sends
		- the moment a launch is due to happen (postpone notification)

	The function returns the timestamp for when the next API call should be run.
	Whenever an API call is performed, the next call should be scheduled.

	TODO improve checking for overlapping jobs, especially when notification checks
	are scheduled. Keep track of scheduled job IDs. LaunchBot-class in main thread?
	"""
    def schedule_call(unix_timestamp: int) -> int:
        # verify time isn't in the past
        if unix_timestamp <= int(time.time()):
            logging.warning(
                'schedule_call called with a timestamp in the past! Scheduling for t+3 seconds.'
            )
            unix_timestamp = int(time.time()) + 3

        # delta
        until_update = unix_timestamp - int(time.time())

        # convert to a datetime object
        next_update_dt = datetime.datetime.fromtimestamp(unix_timestamp)

        # schedule next API update, and we're done: next update will be scheduled after the API update
        scheduler.add_job(ll2_api_call,
                          'date',
                          run_date=next_update_dt,
                          args=[db_path, scheduler, bot_username, bot],
                          id=f'api-{unix_timestamp}')

        logging.info(
            '🔄 Next API update in %s (%s)',
            time_delta_to_legible_eta(time_delta=until_update,
                                      full_accuracy=False), next_update_dt)

        return unix_timestamp

    def require_immediate_update(cursor: sqlite3.Cursor) -> tuple:
        '''Summary
		Load previous time on startup to figure out if we need to update right now
		'''
        try:
            cursor.execute(f'SELECT last_api_update FROM stats')
        except sqlite3.OperationalError:
            return (True, None)

        last_update = cursor.fetchall()[0][0]
        if last_update in ('', None):
            return (True, None)

        return (True,
                None) if time.time() > last_update + UPDATE_PERIOD * 60 else (
                    False, last_update)

    # debug print
    logging.debug('⏲ Starting api_call_scheduler...')

    # update period, in minutes
    UPDATE_PERIOD = 15

    # load the next upcoming launch from the database
    conn = sqlite3.connect(os.path.join(db_path, 'launchbot-data.db'))
    cursor = conn.cursor()

    # verify we don't need an immediate API update
    db_status = require_immediate_update(cursor)
    update_immediately, last_update = db_status[0], db_status[1]

    if update_immediately:
        logging.info(
            '⚠️ DB outdated: scheduling next API update 5 seconds from now...')
        return schedule_call(int(time.time()) + 5)

    # if we didn't return above, no need to update immediately
    update_delta = int(time.time()) - last_update
    last_updated_str = time_delta_to_legible_eta(update_delta,
                                                 full_accuracy=False)

    logging.info(f'🔀 DB up-to-date! Last updated {last_updated_str} ago.')

    # pull all launches with a net greater than or equal to notification window start
    select_fields = 'net_unix, launched, notify_24h, notify_12h, notify_60min, notify_5min'
    notify_window = int(time.time()) - 60 * 5

    try:
        cursor.execute(
            f'SELECT {select_fields} FROM launches WHERE net_unix >= ?',
            (notify_window, ))
        query_return = cursor.fetchall()
    except sqlite3.OperationalError:
        query_return = set()

    conn.close()

    if len(query_return) == 0:
        logging.warning(
            '⚠️ No launches found for scheduling: running in 5 seconds...')
        os.rename(
            os.path.join(db_path, 'launchbot-data.db'),
            os.path.join(db_path,
                         f'launchbot-data-sched-error-{int(time.time())}.db'))

        return schedule_call(int(time.time()) + 5)

    # sort in-place by NET
    query_return.sort(key=lambda tup: tup[0])
    '''
	Create a list of notification send times, but also during launch to check for a postpone.
	- notification times, if not sent (60 seconds before)
	- 60 seconds after the launch is supposed to occur
	'''
    notif_times, time_map = set(), {
        0: 24 * 3600,
        1: 12 * 3600,
        2: 3600,
        3: 5 * 60
    }
    for launch_row in query_return:
        # shortly after launch time for possible postpone/abort, if not launched
        if not launch_row[1] and time.time() - launch_row[0] < 60:
            notif_times.add(launch_row[0] + 60)

        for enum, notif_bool in enumerate(launch_row[2::]):
            if not notif_bool:
                # time for check: launch time - notification time - 60 (60s before)
                check_time = launch_row[0] - time_map[enum] - 60

                # if less than 60 sec until next check, pass if ignore_60 flag is set
                if check_time - int(time.time()) < 60 and ignore_60:
                    pass
                elif check_time < time.time():
                    pass
                else:
                    notif_times.add(check_time)

    # get time when next notification will be sent
    next_notif = min(notif_times)
    until_next_notif = next_notif - int(time.time())
    next_notif_send_time = time_delta_to_legible_eta(
        time_delta=until_next_notif, full_accuracy=False)

    # convert to a datetime object for scheduling
    next_notif = datetime.datetime.fromtimestamp(next_notif)

    # schedule next update more loosely if next notif is far away
    if until_next_notif >= 3600 * 24:
        # if more than 24 hours until next notif, check once every 2 hours
        upd_period_mult = 8
    elif until_next_notif >= 3600 * 12:
        # if more than 12 hours until next notif, check once an hour
        upd_period_mult = 4
    elif until_next_notif >= 3600:
        # if still more than hour (1-12 hours), check every 30 minutes
        upd_period_mult = 2
    else:
        # if less than an hour, check every 10.5 minutes
        upd_period_mult = 0.7

    # add next auto-update to notif_times
    to_next_update = UPDATE_PERIOD * upd_period_mult * 60 - update_delta
    next_auto_update = int(time.time()) + to_next_update
    notif_times.add(next_auto_update)

    # pick minimum of all possible API updates
    next_api_update = min(notif_times)

    # if next update is same as auto-update, log as information
    if next_api_update == next_auto_update:
        logging.info(
            '📭 Auto-updating: no notifications coming up before next API update.'
        )
    else:
        logging.info(
            '📬 Notification coming up before next API update: not auto-updating!'
        )

    # log time next notification is sent
    logging.info(
        f'📮 Next notification in {next_notif_send_time} ({next_notif})')

    # schedule the call
    return schedule_call(next_api_update)