def sync_new_events(configuration, condition, don_t_run_too_long=False): new = fusion_tables.select(configuration['master table'], condition=condition) logging.info( "Syncing %d new rows in %s master %s" % (len(new), configuration['id'], configuration['master table'])) for row in new: # create slave dicts (slaves, final_date) = fusion_tables.master_to_slave(row) # store slave dicts for slave in slaves: fusion_tables.insert_hold(configuration['slave table'], slave) # set master event state to 'public' update = { 'rowid': row['rowid'], 'state': 'public', 'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT), 'final date': final_date } fusion_tables.update_with_implicit_rowid(configuration['master table'], update) running_too_long(don_t_run_too_long) fusion_tables.insert_go(configuration['slave table']) logging.info("Done syncing new rows in %s master %s" % (configuration['id'], configuration['master table']))
def sync_outdated_events(configuration, condition): outdated = fusion_tables.select(configuration['master table'], condition=condition) logging.info("Syncing %d outdated public rows in %s master %s" % (len(outdated), configuration['id'], configuration['master table'])) for row in outdated: # select old slave rows condition = "'event slug' = '%s'" % row['event slug'] slaves = fusion_tables.select(configuration['slave table'], cols=['datetime slug'], condition=condition, filter_obsolete_rows=False) # create slave dicts datetime_slugs = [slave['datetime slug'] for slave in slaves] (new_slaves, final_date) = fusion_tables.master_to_slave(row) # store slave dicts logging.info("Inserting approx. %d future rows in %s slave %s" % (len(new_slaves) - len(slaves), configuration['id'], configuration['slave table'])) for new_slave in new_slaves: if new_slave['datetime slug'] not in datetime_slugs: fusion_tables.insert_hold(configuration['slave table'], new_slave) # set master event state to 'public' update = { 'rowid': row['rowid'], 'state': 'public', 'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT), 'final date': final_date } logging.info("Updated sync timestamp and final date in regenerated row in %s master %s" % (configuration['id'], configuration['master table'])) fusion_tables.update_with_implicit_rowid(configuration['master table'], update) running_too_long(don_t_run_too_long=True) fusion_tables.insert_go(configuration['slave table']) logging.info("Done syncing outdated public rows in %s master %s" % (configuration['id'], configuration['master table']))
def sync_updated_events(configuration, condition, don_t_run_too_long=False): updated = fusion_tables.select(configuration['master table'], condition=condition) logging.info("Syncing %d updated rows in %s master %s" % (len(updated), configuration['id'], configuration['master table'])) for row in updated: # old rows are not deleted! New slave rows are just added with incremented sequence number # create slave dicts (slaves, final_date) = fusion_tables.master_to_slave(row) # store slave dicts for slave in slaves: fusion_tables.insert_hold(configuration['slave table'], slave) # delete the old master row (the updated row was a copy!) condition = "'event slug' = '%s' and 'state' = 'public'" % row['event slug'] old = fusion_tables.select(configuration['master table'], cols=['rowid'], condition=condition) for old_row in old: # should be only a single row!! fusion_tables.delete_with_implicit_rowid(configuration['master table'], old_row) # set master event state to 'public' update = { 'rowid': row['rowid'], 'state': 'public', 'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT), 'final date': final_date } fusion_tables.update_with_implicit_rowid(configuration['master table'], update) running_too_long(don_t_run_too_long) fusion_tables.insert_go(configuration['slave table']) logging.info("Done syncing updated rows in %s master %s" % (configuration['id'], configuration['master table']))
def get(self): logging.info("Start syncing all by force. For this operation to have effect, you have to delete the slave rows manually first!") if self.request.get('id'): # for debugging, to limit sync to specific table configurations = [customer_configuration.get_configuration(self.request)] else: configurations = customer_configuration.get_configurations() running_too_long(initialize=True) # initialize try: logging.info("Start syncing by force") for configuration in [c for c in configurations if c['id'] != 'www']: # www is a fake configuration! logging.info("Start syncing %s by force" % configuration['id']) # in the master table, find all events with outdated sync condition = "'state' = 'public' ORDER BY 'sync date'" sync_outdated_events(configuration, condition) logging.info("Done syncing by force") # return the web-page content self.response.out.write("SyncHandler by force finished") return except RunningTooLongError: # first release pending inserts! fusion_tables.insert_go(configuration['slave table']) # then quit self.response.out.write("SyncHandler by force finished with leftovers") return
def get(self): configuration = customer_configuration.get_configuration(self.request) count = self.request.get('count') for i in xrange(0, int(count)): master = fusion_tables.random_master(configuration) fusion_tables.insert_hold(configuration['master table'], master) fusion_tables.insert_go(configuration['master table']) # return the web-page content self.response.out.write("LoadHandler finished") return
def sync_new_events(configuration, condition, don_t_run_too_long=False): new = fusion_tables.select(configuration['master table'], condition=condition) logging.info("Syncing %d new rows in %s master %s" % (len(new), configuration['id'], configuration['master table'])) for row in new: # create slave dicts (slaves, final_date) = fusion_tables.master_to_slave(row) # store slave dicts for slave in slaves: fusion_tables.insert_hold(configuration['slave table'], slave) # set master event state to 'public' update = { 'rowid': row['rowid'], 'state': 'public', 'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT), 'final date': final_date } fusion_tables.update_with_implicit_rowid(configuration['master table'], update) running_too_long(don_t_run_too_long) fusion_tables.insert_go(configuration['slave table']) logging.info("Done syncing new rows in %s master %s" % (configuration['id'], configuration['master table']))
def sync_outdated_events(configuration, condition): outdated = fusion_tables.select(configuration['master table'], condition=condition) logging.info( "Syncing %d outdated public rows in %s master %s" % (len(outdated), configuration['id'], configuration['master table'])) for row in outdated: # select old slave rows condition = "'event slug' = '%s'" % row['event slug'] slaves = fusion_tables.select(configuration['slave table'], cols=['datetime slug'], condition=condition, filter_obsolete_rows=False) # create slave dicts datetime_slugs = [slave['datetime slug'] for slave in slaves] (new_slaves, final_date) = fusion_tables.master_to_slave(row) # store slave dicts logging.info("Inserting approx. %d future rows in %s slave %s" % (len(new_slaves) - len(slaves), configuration['id'], configuration['slave table'])) for new_slave in new_slaves: if new_slave['datetime slug'] not in datetime_slugs: fusion_tables.insert_hold(configuration['slave table'], new_slave) # set master event state to 'public' update = { 'rowid': row['rowid'], 'state': 'public', 'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT), 'final date': final_date } logging.info( "Updated sync timestamp and final date in regenerated row in %s master %s" % (configuration['id'], configuration['master table'])) fusion_tables.update_with_implicit_rowid(configuration['master table'], update) running_too_long(don_t_run_too_long=True) fusion_tables.insert_go(configuration['slave table']) logging.info("Done syncing outdated public rows in %s master %s" % (configuration['id'], configuration['master table']))
def get(self): logging.info( "Start syncing all by force. For this operation to have effect, you have to delete the slave rows manually first!" ) if self.request.get('id'): # for debugging, to limit sync to specific table configurations = [ customer_configuration.get_configuration(self.request) ] else: configurations = customer_configuration.get_configurations() running_too_long(initialize=True) # initialize try: logging.info("Start syncing by force") for configuration in [ c for c in configurations if c['id'] != 'www' ]: # www is a fake configuration! logging.info("Start syncing %s by force" % configuration['id']) # in the master table, find all events with outdated sync condition = "'state' = 'public' ORDER BY 'sync date'" sync_outdated_events(configuration, condition) logging.info("Done syncing by force") # return the web-page content self.response.out.write("SyncHandler by force finished") return except RunningTooLongError: # first release pending inserts! fusion_tables.insert_go(configuration['slave table']) # then quit self.response.out.write( "SyncHandler by force finished with leftovers") return
def sync_updated_events(configuration, condition, don_t_run_too_long=False): updated = fusion_tables.select(configuration['master table'], condition=condition) logging.info( "Syncing %d updated rows in %s master %s" % (len(updated), configuration['id'], configuration['master table'])) for row in updated: # old rows are not deleted! New slave rows are just added with incremented sequence number # create slave dicts (slaves, final_date) = fusion_tables.master_to_slave(row) # store slave dicts for slave in slaves: fusion_tables.insert_hold(configuration['slave table'], slave) # delete the old master row (the updated row was a copy!) condition = "'event slug' = '%s' and 'state' = 'public'" % row[ 'event slug'] old = fusion_tables.select(configuration['master table'], cols=['rowid'], condition=condition) for old_row in old: # should be only a single row!! fusion_tables.delete_with_implicit_rowid( configuration['master table'], old_row) # set master event state to 'public' update = { 'rowid': row['rowid'], 'state': 'public', 'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT), 'final date': final_date } fusion_tables.update_with_implicit_rowid(configuration['master table'], update) running_too_long(don_t_run_too_long) fusion_tables.insert_go(configuration['slave table']) logging.info("Done syncing updated rows in %s master %s" % (configuration['id'], configuration['master table']))
def get(self): if self.request.get('id'): # for debugging, to limit sync to specific table configurations = [ customer_configuration.get_configuration(self.request) ] else: configurations = customer_configuration.get_configurations() running_too_long(initialize=True) # initialize try: logging.info("Start syncing") for configuration in [ c for c in configurations if c['id'] != 'www' ]: # www is a fake configuration! logging.info("Start syncing %s" % configuration['id']) # in the master table, find all new events condition = "'state' = 'new'" sync_new_events(configuration, condition, don_t_run_too_long=True) # in the master table, find all updated events condition = "'state' = 'updated'" sync_updated_events(configuration, condition, don_t_run_too_long=True) # in the master table, find all cancelled events condition = "'state' = 'cancelled'" sync_cancelled_events(configuration, condition) # in the master table, find all cancellations older than one month today_minus_one_month = ( datetime.today() - timedelta(days=30)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) condition = "'state' = 'cancellation' and 'update date' < '%s'" % today_minus_one_month sync_one_month_old_cancellations(configuration, condition) # in the master table, find all events with outdated sync today_minus_one_month = ( datetime.today() - timedelta(days=30)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) condition = "'state' = 'public' and 'sync date' < '%s'" % today_minus_one_month sync_outdated_events(configuration, condition) # in the master table, find all events with final date in the past (*) yesterday = ( datetime.today() - timedelta(days=1)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) condition = "'final date' < '%s'" % yesterday sync_events_with_final_date_passed(configuration, condition) # in the slave table, find all events with end date in the past (*) yesterday = ( datetime.today() - timedelta(days=1)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) condition = "'end' < '%s'" % yesterday sync_passed_events(configuration, condition) # in the master table, find all events flagged as updated (flag is set in submit.py) condition = "'update after sync' = 'true'" sync_old_version_of_updated_events(configuration, condition) # (*) yesterday, because this is running server time, and other timezones in the world # still need the event, while for the server it's already 'past' logging.info("Done syncing") # return the web-page content self.response.out.write("SyncHandler finished") return except RunningTooLongError: # first release pending inserts! fusion_tables.insert_go(configuration['slave table']) # then quit self.response.out.write("SyncHandler finished with leftovers") return
def get(self): if self.request.get('id'): # for debugging, to limit sync to specific table configurations = [customer_configuration.get_configuration(self.request)] else: configurations = customer_configuration.get_configurations() running_too_long(initialize=True) # initialize try: logging.info("Start syncing") for configuration in [c for c in configurations if c['id'] != 'www']: # www is a fake configuration! logging.info("Start syncing %s" % configuration['id']) # in the master table, find all new events condition = "'state' = 'new'" sync_new_events(configuration, condition, don_t_run_too_long=True) # in the master table, find all updated events condition = "'state' = 'updated'" sync_updated_events(configuration, condition, don_t_run_too_long=True) # in the master table, find all cancelled events condition = "'state' = 'cancelled'" sync_cancelled_events(configuration, condition) # in the master table, find all cancellations older than one month today_minus_one_month = (datetime.today() - timedelta(days=30)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) condition = "'state' = 'cancellation' and 'update date' < '%s'" % today_minus_one_month sync_one_month_old_cancellations(configuration, condition) # in the master table, find all events with outdated sync today_minus_one_month = (datetime.today() - timedelta(days=30)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) condition = "'state' = 'public' and 'sync date' < '%s'" % today_minus_one_month sync_outdated_events(configuration, condition) # in the master table, find all events with final date in the past (*) yesterday = (datetime.today() - timedelta(days=1)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) condition = "'final date' < '%s'" % yesterday sync_events_with_final_date_passed(configuration, condition) # in the slave table, find all events with end date in the past (*) yesterday = (datetime.today() - timedelta(days=1)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) condition = "'end' < '%s'" % yesterday sync_passed_events(configuration, condition) # in the master table, find all events flagged as updated (flag is set in submit.py) condition = "'update after sync' = 'true'" sync_old_version_of_updated_events(configuration, condition) # (*) yesterday, because this is running server time, and other timezones in the world # still need the event, while for the server it's already 'past' logging.info("Done syncing") # return the web-page content self.response.out.write("SyncHandler finished") return except RunningTooLongError: # first release pending inserts! fusion_tables.insert_go(configuration['slave table']) # then quit self.response.out.write("SyncHandler finished with leftovers") return