Beispiel #1
0
def sync_cancelled_events(configuration, condition):
    cancelled = fusion_tables.select(configuration['master table'],
                                     condition=condition)
    logging.info(
        "Syncing %d cancelled rows in %s master %s" %
        (len(cancelled), configuration['id'], configuration['master table']))
    for row in cancelled:
        # delete cancelled slave rows
        condition = "'event slug' = '%s'" % row['event slug']
        slaves = fusion_tables.select(configuration['slave table'],
                                      cols=['rowid'],
                                      condition=condition,
                                      filter_obsolete_rows=False)
        logging.info(
            "Deleting %d cancelled rows in %s slave %s" %
            (len(slaves), configuration['id'], configuration['slave table']))
        delete_slaves(configuration['slave table'], slaves)
        # set master event state to 'cancellation'
        update = {
            'rowid': row['rowid'],
            'state': 'cancellation',
            'sync date':
            datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT)
        }
        fusion_tables.update_with_implicit_rowid(configuration['master table'],
                                                 update)
        running_too_long(don_t_run_too_long=True)
    logging.info("Done syncing cancelled rows in %s master %s" %
                 (configuration['id'], configuration['master table']))
Beispiel #2
0
def sync_old_version_of_updated_events(configuration, condition):
    updated_master = fusion_tables.select(configuration['master table'],
                                          condition=condition)
    logging.info(
        "Deleting old slave rows for %d updated events in %s master %s" %
        (len(updated_master), configuration['id'],
         configuration['master table']))
    for updated_master_row in updated_master:
        # find the old slave row(s)
        condition = "'event slug' = '%s' AND 'sequence' < %s" % (
            updated_master_row['event slug'], updated_master_row['sequence'])
        old_slave = fusion_tables.select(configuration['slave table'],
                                         condition=condition,
                                         filter_obsolete_rows=False)
        # delete the old row(s)
        logging.info("Deleting %d old event rows in %s slave %s" %
                     (len(old_slave), configuration['id'],
                      configuration['slave table']))
        delete_slaves(configuration['slave table'], old_slave)
        logging.info("Deleted %d old rows in %s slave %s" %
                     (len(old_slave), configuration['id'],
                      configuration['slave table']))
        # unflag the updated master row
        unflagged_row = {}
        unflagged_row['rowid'] = updated_master_row['rowid']
        unflagged_row['update after sync'] = 'false'
        fusion_tables.update_with_implicit_rowid(configuration['master table'],
                                                 unflagged_row)
        logging.info("Unflagged updated row %s in %s master %s" %
                     (updated_master_row['rowid'], configuration['id'],
                      configuration['master table']))
        running_too_long(don_t_run_too_long=True)
    logging.info("Done deleting old slave rows in %s slave %s" %
                 (configuration['id'], configuration['slave table']))
Beispiel #3
0
def sync_updated_events(configuration, condition, don_t_run_too_long=False):
    updated = fusion_tables.select(configuration['master table'], condition=condition)
    logging.info("Syncing %d updated rows in %s master %s" % (len(updated), configuration['id'], configuration['master table']))
    for row in updated:
        # old rows are not deleted! New slave rows are just added with incremented sequence number
        # create slave dicts
        (slaves, final_date) = fusion_tables.master_to_slave(row)
        # store slave dicts
        for slave in slaves:
            fusion_tables.insert_hold(configuration['slave table'], slave)
        # delete the old master row (the updated row was a copy!)
        condition = "'event slug' = '%s' and 'state' = 'public'" % row['event slug']
        old = fusion_tables.select(configuration['master table'], cols=['rowid'], condition=condition)
        for old_row in old:  # should be only a single row!!
            fusion_tables.delete_with_implicit_rowid(configuration['master table'], old_row)
        # set master event state to 'public'
        update = {
            'rowid': row['rowid'],
            'state': 'public',
            'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT),
            'final date': final_date
        }
        fusion_tables.update_with_implicit_rowid(configuration['master table'], update)
        running_too_long(don_t_run_too_long)
    fusion_tables.insert_go(configuration['slave table'])
    logging.info("Done syncing updated rows in %s master %s" % (configuration['id'], configuration['master table']))
Beispiel #4
0
def sync_new_events(configuration, condition, don_t_run_too_long=False):
    new = fusion_tables.select(configuration['master table'],
                               condition=condition)
    logging.info(
        "Syncing %d new rows in %s master %s" %
        (len(new), configuration['id'], configuration['master table']))
    for row in new:
        # create slave dicts
        (slaves, final_date) = fusion_tables.master_to_slave(row)
        # store slave dicts
        for slave in slaves:
            fusion_tables.insert_hold(configuration['slave table'], slave)
            # set master event state to 'public'
        update = {
            'rowid': row['rowid'],
            'state': 'public',
            'sync date':
            datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT),
            'final date': final_date
        }
        fusion_tables.update_with_implicit_rowid(configuration['master table'],
                                                 update)
        running_too_long(don_t_run_too_long)
    fusion_tables.insert_go(configuration['slave table'])
    logging.info("Done syncing new rows in %s master %s" %
                 (configuration['id'], configuration['master table']))
Beispiel #5
0
def sync_outdated_events(configuration, condition):
    outdated = fusion_tables.select(configuration['master table'], condition=condition)
    logging.info("Syncing %d outdated public rows in %s master %s" % (len(outdated), configuration['id'], configuration['master table']))
    for row in outdated:
        # select old slave rows
        condition = "'event slug' = '%s'" % row['event slug']
        slaves = fusion_tables.select(configuration['slave table'], cols=['datetime slug'], condition=condition, filter_obsolete_rows=False)
        # create slave dicts
        datetime_slugs = [slave['datetime slug'] for slave in slaves]
        (new_slaves, final_date) = fusion_tables.master_to_slave(row)
        # store slave dicts
        logging.info("Inserting approx. %d future rows in %s slave %s" % (len(new_slaves) - len(slaves), configuration['id'], configuration['slave table']))
        for new_slave in new_slaves:
            if new_slave['datetime slug'] not in datetime_slugs:
                fusion_tables.insert_hold(configuration['slave table'], new_slave)
            # set master event state to 'public'
        update = {
            'rowid': row['rowid'],
            'state': 'public',
            'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT),
            'final date': final_date
        }
        logging.info("Updated sync timestamp and final date in regenerated row in %s master %s" % (configuration['id'], configuration['master table']))
        fusion_tables.update_with_implicit_rowid(configuration['master table'], update)
        running_too_long(don_t_run_too_long=True)
    fusion_tables.insert_go(configuration['slave table'])
    logging.info("Done syncing outdated public rows in %s master %s" % (configuration['id'], configuration['master table']))
Beispiel #6
0
 def post(self, event_slug=None):
     configuration = customer_configuration.get_configuration(self.request)
     original_master = fusion_tables.select_first(configuration['master table'], condition="'event slug' = '%s'" % event_slug)[0]
     data = self.request.POST['data']
     master = json.loads(data)
     master['location slug'] = location_slug(master)
     # check if the new location is in use, if so, reuse it's location slug
     same_location_condition = "ST_INTERSECTS('latitude', CIRCLE(LATLNG(%f,%f),2))" % (round(float(master['latitude']), 5), round(float(master['longitude']), 5))  # 3 meter
     same_location = fusion_tables.select_first(configuration['master table'], condition=same_location_condition)
     if same_location:
         logging.info("Using the location slug of an existing location [%s] instead of [%s]" % (same_location[0]['location slug'], master['location slug']))
         master['location slug'] = same_location[0]['location slug']
     else:
         base_location_slug = location_slug(master)
         logging.info("This is a new location [%s]" % base_location_slug)
         master['location slug'] = base_location_slug
         # add (1) or (2) or etc... to the location slug if it's already in use
         while fusion_tables.select_first(configuration['master table'], condition="'location slug' = '%s'" % master['location slug']):
             logging.info("Adding (1), (2),... to location slug [%s] because it already existed." % master['location slug'])
             counter = 1 if 'counter' not in locals() else counter + 1
             master['location slug'] = base_location_slug + '-(' + str(counter) + ')'
     if master['location slug'] != original_master['location slug']:
         # otherwise the old location and event remains visible because the FT layer cannot filter them out
         logging.info("Starting task on queue for deleting old versions of moved event %s" % original_master['event slug'])
         taskqueue.add(method="GET", url='/sync/old_version_of_updated_events/%s?id=%s' % (original_master['event slug'], configuration['id']))
     master['state'] = 'updated'
     master['sequence'] = int(original_master['sequence']) + 1
     master['entry date'] = original_master['entry date']
     master['update date'] = datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT)
     master['update after sync'] = 'true'  # this will trigger sync_old_version_of_updated_events()
     master['renewal date'] = (datetime.today() + timedelta(days=30 * 6)).strftime(FUSION_TABLE_DATE_TIME_FORMAT)
     master['event slug'] = original_master['event slug']
     master['hashtags'] = ','.join(["#%s#" % slugify(tag) for tag in extract_hash_tags(master['description'])])
     master['rowid'] = original_master['rowid']
     fusion_tables.update_with_implicit_rowid(configuration['master table'], master)
     sync.sync_updated_events(configuration, condition="'event slug' = '%s'" % master['event slug'])
     logging.info("LIST_OF_UPDATED_ROWS [%s] [%s] %s" % (configuration['id'], master['update date'], data))
     sender = 'info@%s.appspotmail.com' % (app_id)
     message = mail.EmailMessage(sender=sender, to="*****@*****.**")
     message.subject = "Event updated in MapTiming %s" % configuration['title']
     message.body = "http://%s.maptiming.com#event/%s" % (configuration['id'], master['event slug'])
     logging.info("Sending mail from %s: %s - %s" % (sender, message.subject, message.body))
     message.send()
     # return the web-page content
     self.response.out.write(master['event slug'])
     return
Beispiel #7
0
def sync_cancelled_events(configuration, condition):
    cancelled = fusion_tables.select(configuration['master table'], condition=condition)
    logging.info("Syncing %d cancelled rows in %s master %s" % (len(cancelled), configuration['id'], configuration['master table']))
    for row in cancelled:
        # delete cancelled slave rows
        condition = "'event slug' = '%s'" % row['event slug']
        slaves = fusion_tables.select(configuration['slave table'], cols=['rowid'], condition=condition, filter_obsolete_rows=False)
        logging.info("Deleting %d cancelled rows in %s slave %s" % (len(slaves), configuration['id'], configuration['slave table']))
        delete_slaves(configuration['slave table'], slaves)
        # set master event state to 'cancellation'
        update = {
            'rowid': row['rowid'],
            'state': 'cancellation',
            'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT)
        }
        fusion_tables.update_with_implicit_rowid(configuration['master table'], update)
        running_too_long(don_t_run_too_long=True)
    logging.info("Done syncing cancelled rows in %s master %s" % (configuration['id'], configuration['master table']))
Beispiel #8
0
def sync_old_version_of_updated_events(configuration, condition):
    updated_master = fusion_tables.select(configuration['master table'], condition=condition)
    logging.info("Deleting old slave rows for %d updated events in %s master %s" % (len(updated_master), configuration['id'], configuration['master table']))
    for updated_master_row in updated_master:
        # find the old slave row(s)
        condition = "'event slug' = '%s' AND 'sequence' < %s" % (updated_master_row['event slug'], updated_master_row['sequence'])
        old_slave = fusion_tables.select(configuration['slave table'], condition=condition, filter_obsolete_rows=False)
        # delete the old row(s)
        logging.info("Deleting %d old event rows in %s slave %s" % (len(old_slave), configuration['id'], configuration['slave table']))
        delete_slaves(configuration['slave table'], old_slave)
        logging.info("Deleted %d old rows in %s slave %s" % (len(old_slave), configuration['id'], configuration['slave table']))
        # unflag the updated master row
        unflagged_row = {}
        unflagged_row['rowid'] = updated_master_row['rowid']
        unflagged_row['update after sync'] = 'false'
        fusion_tables.update_with_implicit_rowid(configuration['master table'], unflagged_row)
        logging.info("Unflagged updated row %s in %s master %s" % (updated_master_row['rowid'], configuration['id'], configuration['master table']))
        running_too_long(don_t_run_too_long=True)
    logging.info("Done deleting old slave rows in %s slave %s" % (configuration['id'], configuration['slave table']))
Beispiel #9
0
def sync_new_events(configuration, condition, don_t_run_too_long=False):
    new = fusion_tables.select(configuration['master table'], condition=condition)
    logging.info("Syncing %d new rows in %s master %s" % (len(new), configuration['id'], configuration['master table']))
    for row in new:
        # create slave dicts
        (slaves, final_date) = fusion_tables.master_to_slave(row)
        # store slave dicts
        for slave in slaves:
            fusion_tables.insert_hold(configuration['slave table'], slave)
            # set master event state to 'public'
        update = {
            'rowid': row['rowid'],
            'state': 'public',
            'sync date': datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT),
            'final date': final_date
        }
        fusion_tables.update_with_implicit_rowid(configuration['master table'], update)
        running_too_long(don_t_run_too_long)
    fusion_tables.insert_go(configuration['slave table'])
    logging.info("Done syncing new rows in %s master %s" % (configuration['id'], configuration['master table']))
Beispiel #10
0
def sync_outdated_events(configuration, condition):
    outdated = fusion_tables.select(configuration['master table'],
                                    condition=condition)
    logging.info(
        "Syncing %d outdated public rows in %s master %s" %
        (len(outdated), configuration['id'], configuration['master table']))
    for row in outdated:
        # select old slave rows
        condition = "'event slug' = '%s'" % row['event slug']
        slaves = fusion_tables.select(configuration['slave table'],
                                      cols=['datetime slug'],
                                      condition=condition,
                                      filter_obsolete_rows=False)
        # create slave dicts
        datetime_slugs = [slave['datetime slug'] for slave in slaves]
        (new_slaves, final_date) = fusion_tables.master_to_slave(row)
        # store slave dicts
        logging.info("Inserting approx. %d future rows in %s slave %s" %
                     (len(new_slaves) - len(slaves), configuration['id'],
                      configuration['slave table']))
        for new_slave in new_slaves:
            if new_slave['datetime slug'] not in datetime_slugs:
                fusion_tables.insert_hold(configuration['slave table'],
                                          new_slave)
            # set master event state to 'public'
        update = {
            'rowid': row['rowid'],
            'state': 'public',
            'sync date':
            datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT),
            'final date': final_date
        }
        logging.info(
            "Updated sync timestamp and final date in regenerated row in %s master %s"
            % (configuration['id'], configuration['master table']))
        fusion_tables.update_with_implicit_rowid(configuration['master table'],
                                                 update)
        running_too_long(don_t_run_too_long=True)
    fusion_tables.insert_go(configuration['slave table'])
    logging.info("Done syncing outdated public rows in %s master %s" %
                 (configuration['id'], configuration['master table']))
Beispiel #11
0
def sync_updated_events(configuration, condition, don_t_run_too_long=False):
    updated = fusion_tables.select(configuration['master table'],
                                   condition=condition)
    logging.info(
        "Syncing %d updated rows in %s master %s" %
        (len(updated), configuration['id'], configuration['master table']))
    for row in updated:
        # old rows are not deleted! New slave rows are just added with incremented sequence number
        # create slave dicts
        (slaves, final_date) = fusion_tables.master_to_slave(row)
        # store slave dicts
        for slave in slaves:
            fusion_tables.insert_hold(configuration['slave table'], slave)
        # delete the old master row (the updated row was a copy!)
        condition = "'event slug' = '%s' and 'state' = 'public'" % row[
            'event slug']
        old = fusion_tables.select(configuration['master table'],
                                   cols=['rowid'],
                                   condition=condition)
        for old_row in old:  # should be only a single row!!
            fusion_tables.delete_with_implicit_rowid(
                configuration['master table'], old_row)
        # set master event state to 'public'
        update = {
            'rowid': row['rowid'],
            'state': 'public',
            'sync date':
            datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT),
            'final date': final_date
        }
        fusion_tables.update_with_implicit_rowid(configuration['master table'],
                                                 update)
        running_too_long(don_t_run_too_long)
    fusion_tables.insert_go(configuration['slave table'])
    logging.info("Done syncing updated rows in %s master %s" %
                 (configuration['id'], configuration['master table']))
Beispiel #12
0
 def post(self, event_slug=None):
     configuration = customer_configuration.get_configuration(self.request)
     original_master = fusion_tables.select_first(
         configuration['master table'],
         condition="'event slug' = '%s'" % event_slug)[0]
     data = self.request.POST['data']
     master = json.loads(data)
     master['location slug'] = location_slug(master)
     # check if the new location is in use, if so, reuse it's location slug
     same_location_condition = "ST_INTERSECTS('latitude', CIRCLE(LATLNG(%f,%f),2))" % (
         round(float(master['latitude']),
               5), round(float(master['longitude']), 5))  # 3 meter
     same_location = fusion_tables.select_first(
         configuration['master table'], condition=same_location_condition)
     if same_location:
         logging.info(
             "Using the location slug of an existing location [%s] instead of [%s]"
             % (same_location[0]['location slug'], master['location slug']))
         master['location slug'] = same_location[0]['location slug']
     else:
         base_location_slug = location_slug(master)
         logging.info("This is a new location [%s]" % base_location_slug)
         master['location slug'] = base_location_slug
         # add (1) or (2) or etc... to the location slug if it's already in use
         while fusion_tables.select_first(
                 configuration['master table'],
                 condition="'location slug' = '%s'" %
                 master['location slug']):
             logging.info(
                 "Adding (1), (2),... to location slug [%s] because it already existed."
                 % master['location slug'])
             counter = 1 if 'counter' not in locals() else counter + 1
             master['location slug'] = base_location_slug + '-(' + str(
                 counter) + ')'
     if master['location slug'] != original_master['location slug']:
         # otherwise the old location and event remains visible because the FT layer cannot filter them out
         logging.info(
             "Starting task on queue for deleting old versions of moved event %s"
             % original_master['event slug'])
         taskqueue.add(method="GET",
                       url='/sync/old_version_of_updated_events/%s?id=%s' %
                       (original_master['event slug'], configuration['id']))
     master['state'] = 'updated'
     master['sequence'] = int(original_master['sequence']) + 1
     master['entry date'] = original_master['entry date']
     master['update date'] = datetime.today().strftime(
         FUSION_TABLE_DATE_TIME_FORMAT)
     master[
         'update after sync'] = 'true'  # this will trigger sync_old_version_of_updated_events()
     master['renewal date'] = (
         datetime.today() +
         timedelta(days=30 * 6)).strftime(FUSION_TABLE_DATE_TIME_FORMAT)
     master['event slug'] = original_master['event slug']
     master['hashtags'] = ','.join([
         "#%s#" % slugify(tag)
         for tag in extract_hash_tags(master['description'])
     ])
     master['rowid'] = original_master['rowid']
     fusion_tables.update_with_implicit_rowid(configuration['master table'],
                                              master)
     sync.sync_updated_events(configuration,
                              condition="'event slug' = '%s'" %
                              master['event slug'])
     logging.info("LIST_OF_UPDATED_ROWS [%s] [%s] %s" %
                  (configuration['id'], master['update date'], data))
     sender = 'info@%s.appspotmail.com' % (app_id)
     message = mail.EmailMessage(sender=sender,
                                 to="*****@*****.**")
     message.subject = "Event updated in MapTiming %s" % configuration[
         'title']
     message.body = "http://%s.maptiming.com#event/%s" % (
         configuration['id'], master['event slug'])
     logging.info("Sending mail from %s: %s - %s" %
                  (sender, message.subject, message.body))
     message.send()
     # return the web-page content
     self.response.out.write(master['event slug'])
     return