def find_requests_and_send_mail(config): os.chdir(os.path.dirname(os.path.realpath(__file__))) db = DB.connect(db_file='../db/main.db') now = time.time() what_we_be_done_with = db['c'].execute( 'select * from reminders where end_time < %d' % now).fetchall() for row in DB.map(what_we_be_done_with, 'reminders', db=db): row['link'] = "http://indycast.net/%s/slices/%s_%d" % ( row['station'], time.strftime("%Y%m%d%H%M", time.gmtime(row['start_time'] - row['offset'] * 60)), (row['end_time'] - row['start_time']) / 60) if len(row['notes']): row['link'] += '/%s_on_%s' % (re.sub( '[^\w]', '_', row['notes']).strip('_'), row['station']) row['link'] += '.mp3' email = do_template(template_file='email_reminder_template.txt', settings=row) res = misc.send_email(config=config, who=row['email'], subject=email['subject'], body=email['body']) db['c'].execute('delete from reminders where id = %d' % row['id']) db['conn'].commit() return None
def find_requests_and_send_mail(config): os.chdir(os.path.dirname(os.path.realpath(__file__))) db = DB.connect(db_file='../db/main.db') now = time.time() what_we_be_done_with = db['c'].execute('select * from reminders where end_time < %d' % now).fetchall() for row in DB.map(what_we_be_done_with, 'reminders', db=db): row['link'] = "http://indycast.net/%s/slices/%s_%d" % ( row['station'], time.strftime("%Y%m%d%H%M", time.gmtime(row['start_time'] - row['offset'] * 60)), (row['end_time'] - row['start_time']) / 60) if len(row['notes']): row['link'] += '/%s_on_%s' % (re.sub('[^\w]', '_', row['notes']).strip('_'), row['station']) row['link'] += '.mp3' email = do_template(template_file='email_reminder_template.txt', settings=row) res = misc.send_email(config=config, who=row['email'], subject=email['subject'], body=email['body']) db['c'].execute('delete from reminders where id = %d' % row['id']) db['conn'].commit() return None
def find_streams(start_list, duration_min): import lib.misc as misc # Given a start week minute this looks for streams in the storage # directory that match it - regardless of duration ... so it may return # partial shows results. stream_list = [] if type(start_list) is int: start_list = [start_list] # Sort nominally - since we have unix time in the name, this should come out # as sorted by time for us for free. stitch_list = [] episode_list = [] # So we have a start list, we are about to query our database using the start_minute # and end_minute field ... to get end_minue we need to make use of our duration. # # timeline -> # # ################### << Region we want # start_sea#ch end_search # << Search # V V # | | | | | | | << Minute # a b b c # # so we want # (a) start_minute < start_search and end_minute >= start_search || # (b) start_minute > start_search and end_minute <= end_search || # (c) start_minute < end_search and end_minute >= end_search # condition_list = [] for start in start_list: end_search = (start + duration_min) % TS.MINUTES_PER_WEEK # print start, duration_min, end_search condition_list.append('start_minute < %d and end_minute >= %d' % (start, start)) condition_list.append('start_minute >= %d and end_minute >= %d and end_minute <= %d' % (start, start, end_search)) condition_list.append('start_minute < %d and end_minute >= %d' % (end_search, end_search)) condition_query = "((%s))" % ') or ('.join(condition_list) # see https://github.com/kristopolous/DRR/issues/50 - nah this shit is buggy condition_query += " and start_unix < datetime(%d, 'unixepoch', 'localtime')" % (TS.sec_now() - misc.config['cascade_time'] + 3) full_query = "select * from streams where {} order by strftime('%Y', start_unix) * 524160 + week_number * 10080 + start_minute asc".format(condition_query) entry_list = DB.map(DB.run(full_query).fetchall(), 'streams') #logging.info(full_query) #logging.info(entry_list) # print full_query, len(entry_list) # We want to make sure that we break down the stream_list into days. We can't JUST look at the week # number since we permit feed requests for shows which may have multiple days. Since this is leaky # data that we don't keep via our separation of concerns, we use a little hack to figure this out. by_episode = [] episode = [] cutoff_minute = 0 current_week = 0 for entry in entry_list: # look at start minute, if it's > 12 * cascade time (by default 3 hours), then we presume this is a new episode. if entry['start_minute'] > cutoff_minute or entry['week_number'] != current_week: if len(episode): by_episode.append(episode) episode = [] cutoff_minute = entry['start_minute'] + (12 * misc.config['cascade_time']) % TS.MINUTES_PER_WEEK current_week = entry['week_number'] # We know by definition that every entry in our stream_list is a valid thing we need # to look at. We just need to make sure we break them down by episode episode.append(entry) if len(episode): by_episode.append(episode) #print len(by_episode), condition_query # Start the creation of the audio files. for episode in by_episode: # We blur the test start to a bigger window test_start = (episode[0]['start_minute'] / (60 * 4)) for week_start in start_list: # Blur the query start to the same window query_start = week_start / (60 * 4) # This shouldn't be necessary but let's do it anyway if abs(query_start - test_start) <= 1: # Under these conditions we can say that this episode # can be associated with this particular start time # The start_minute is based on the week offset_start = week_start - episode[0]['start_minute'] fname = audio.stream_name(episode, week_start, duration_min) # print '--name',episode[0]['name'], fname # We get the name that it will be and then append that stream_list.append(stream_info(fname)) # print offset_start, duration_min, episode episode_list.append((episode, offset_start, duration_min)) break # print stream_list, "\nbreak\n", episode_list, "\nasfdasdf\n" return stream_list, episode_list
def find_streams(start_list, duration_min): import lib.misc as misc # Given a start week minute this looks for streams in the storage # directory that match it - regardless of duration ... so it may return # partial shows results. stream_list = [] if type(start_list) is int: start_list = [start_list] # Sort nominally - since we have unix time in the name, this should come out # as sorted by time for us for free. stitch_list = [] episode_list = [] # So we have a start list, we are about to query our database using the start_minute # and end_minute field ... to get end_minue we need to make use of our duration. # # timeline -> # # ################### << Region we want # start_sea#ch end_search # << Search # V V # | | | | | | | << Minute # a b b c # # so we want # (a) start_minute < start_search and end_minute >= start_search || # (b) start_minute > start_search and end_minute <= end_search || # (c) start_minute < end_search and end_minute >= end_search # condition_list = [] for start in start_list: end_search = (start + duration_min) % TS.MINUTES_PER_WEEK # print start, duration_min, end_search condition_list.append('start_minute < %d and end_minute >= %d' % (start, start)) condition_list.append( 'start_minute >= %d and end_minute >= %d and end_minute <= %d' % (start, start, end_search)) condition_list.append('start_minute < %d and end_minute >= %d' % (end_search, end_search)) condition_query = "((%s))" % ') or ('.join(condition_list) # see https://github.com/kristopolous/DRR/issues/50 - nah this shit is buggy condition_query += " and start_unix < datetime(%d, 'unixepoch', 'localtime')" % ( TS.sec_now() - misc.config['cascade_time'] + 3) full_query = "select * from streams where %s order by week_number * 10080 + start_minute asc" % condition_query entry_list = DB.map(DB.run(full_query).fetchall(), 'streams') #logging.info(full_query) #logging.info(entry_list) # print full_query, len(entry_list) # We want to make sure that we break down the stream_list into days. We can't JUST look at the week # number since we permit feed requests for shows which may have multiple days. Since this is leaky # data that we don't keep via our separation of concerns, we use a little hack to figure this out. by_episode = [] episode = [] cutoff_minute = 0 current_week = 0 for entry in entry_list: # look at start minute, if it's > 12 * cascade time (by default 3 hours), then we presume this is a new episode. if entry['start_minute'] > cutoff_minute or entry[ 'week_number'] != current_week: if len(episode): by_episode.append(episode) episode = [] cutoff_minute = entry['start_minute'] + ( 12 * misc.config['cascade_time']) % TS.MINUTES_PER_WEEK current_week = entry['week_number'] # We know by definition that every entry in our stream_list is a valid thing we need # to look at. We just need to make sure we break them down by episode episode.append(entry) if len(episode): by_episode.append(episode) #print len(by_episode), condition_query # Start the creation of the audio files. for episode in by_episode: # We blur the test start to a bigger window test_start = (episode[0]['start_minute'] / (60 * 4)) for week_start in start_list: # Blur the query start to the same window query_start = week_start / (60 * 4) # This shouldn't be necessary but let's do it anyway if abs(query_start - test_start) <= 1: # Under these conditions we can say that this episode # can be associated with this particular start time # The start_minute is based on the week offset_start = week_start - episode[0]['start_minute'] fname = audio.stream_name(episode, week_start, duration_min) # print '--name',episode[0]['name'], fname # We get the name that it will be and then append that stream_list.append(stream_info(fname)) # print offset_start, duration_min, episode episode_list.append((episode, offset_start, duration_min)) break # print stream_list, "\nbreak\n", episode_list, "\nasfdasdf\n" return stream_list, episode_list