def bulkload_table(self, table): """ @param table: input data as a list of dicts """ # before bulkloading, add the derived properties for row in table: row['kwartetsluggy'] = lib.slugify(row['kwartet']) row['kaartsluggy'] = lib.slugify(row['kaart']) row['cardurl'] = '/' + row['kwartetsluggy'] + '/' + row['kaartsluggy'] # to get the URLs of the other cards, first set them in a dict by 'nr' urls = {row['nr']: row['cardurl'] for row in table} for row in table: if row['type'] == 'kwartet': # the other cards in the set are identified based on the 'nr' attribute, based on the # remainder + 1 after division by 4 of the current card 'nr': mod = (row['nr'] - 1) % 4 + 1 bas = row['nr'] - mod # 1 -> 2,3,4 # 2 -> 1,3,4 # 3 -> 1,2,4 # 4 -> 1,2,3 row['firstcardurl'] = urls[bas + (2 if mod <= 1 else 1)] row['secondcardurl'] = urls[bas + (3 if mod <= 2 else 2)] row['thirdcardurl'] = urls[bas + (4 if mod <= 3 else 3)] Model_index.bulkload_table(self, table, 'nrc')
def bulkload_table(self, table): """ @param table: input data as a list of dicts """ # before bulkloading, add the derived properties for row in table: row['kwartetsluggy'] = lib.slugify(row['kwartet']) row['kaartsluggy'] = lib.slugify(row['kaart']) row['cardurl'] = '/' + row['kwartetsluggy'] + '/' + row[ 'kaartsluggy'] # to get the URLs of the other cards, first set them in a dict by 'nr' urls = {row['nr']: row['cardurl'] for row in table} for row in table: if row['type'] == 'kwartet': # the other cards in the set are identified based on the 'nr' attribute, based on the # remainder + 1 after division by 4 of the current card 'nr': mod = (row['nr'] - 1) % 4 + 1 bas = row['nr'] - mod # 1 -> 2,3,4 # 2 -> 1,3,4 # 3 -> 1,2,4 # 4 -> 1,2,3 row['firstcardurl'] = urls[bas + (2 if mod <= 1 else 1)] row['secondcardurl'] = urls[bas + (3 if mod <= 2 else 2)] row['thirdcardurl'] = urls[bas + (4 if mod <= 3 else 3)] Model_index.bulkload_table(self, table, 'nrc')
def get(self, *args, **kwargs): style = self.request.get("style") # hidden feature now = self.request.get("now") # hidden feature if not now: now = '' # no fallback needed here! configuration = customer_configuration.get_configuration(self.request) # detect language and use configuration as default language = get_language(self.request, configuration) # apply commercial limit limit = customer_configuration.get_limit(self.request) template = jinja_environment.get_template('map.html') # map colors to tags colors = ['purple', 'blue', 'teal', 'lightgreen', 'amber', 'red'] tags = configuration['tags'].split(',') tag_colors = {} for i, tag in enumerate(tags): tag_colors[slugify(tag)] = colors[i % 6] tag_colors['all-tags'] = 'white' content = template.render( configuration=configuration, limit=limit if limit else 0, # e.g. "2014-07-19 09:00:00" tag_colors=tag_colors, tag_colors_json=json.dumps(tag_colors), day_of_today=date.today().day, day_of_tomorrow=(date.today() + timedelta(days=1)).day, slugify=slugify, localization=localization[language], now=now, style=style) # return the web-page content self.response.out.write(content) return
def download_images(self, images_for_download): """ @param images_for_download: dict by url of {'url':...} @return: dict by url of {'id':..., 'url':...} The filename is arbitrarily based on the url """ d = {} for url in images_for_download: try: filename = slugify(url) file = urllib.urlopen(url) fd = io.BytesIO(file.read()) media = MediaIoBaseUpload(fd, mimetype='image/png', chunksize=1024*1024, resumable=True) metadata = { 'title': filename, 'parents': [ { 'id': google_drive_missale_images_folder_id } ] } request = self._drive_service.files().insert(media_body=media, body=metadata) response = None while response is None: status, response = request.next_chunk() if status: logging.info("Uploaded %d%%." % int(status.progress() * 100)) logging.info("Upload Complete!") id = response['id'] fileExtension = response['fileExtension'] d[url] = {'id': id, 'url': url, 'fileExtension': fileExtension} except httplib.HTTPException as e: logging.info("Upload failed: " + e.message) return d
def get(self, *args, **kwargs): style = self.request.get("style") # hidden feature now = self.request.get("now") # hidden feature if not now: now = '' # no fallback needed here! configuration = customer_configuration.get_configuration(self.request) # detect language and use configuration as default language = get_language(self.request, configuration) # apply commercial limit limit = customer_configuration.get_limit(self.request) template = jinja_environment.get_template('map.html') # map colors to tags colors = ['purple', 'blue', 'teal', 'lightgreen', 'amber', 'red'] tags = configuration['tags'].split(',') tag_colors = {} for i, tag in enumerate(tags): tag_colors[slugify(tag)] = colors[i % 6] tag_colors['all-tags'] = 'white' content = template.render( configuration=configuration, limit=limit if limit else 0, # e.g. "2014-07-19 09:00:00" tag_colors=tag_colors, tag_colors_json=json.dumps(tag_colors), day_of_today=date.today().day, day_of_tomorrow=(date.today() + timedelta(days=1)).day, slugify=slugify, localization=localization[language], now=now, style=style ) # return the web-page content self.response.out.write(content) return
def put(self): # we set the slug on the first save # after which it is never changed self.html = textile(unicode(self.description)) if not self.slug: self.slug = slugify(unicode(self.name)) super(Project, self).put()
def post(self): # if we don't have a user then throw # an unauthorised error user = users.get_current_user() if not user: self.render_403() return name = self.request.get("name") # check we have a value if name: # then check we have a value which isn't just spaces if name.strip(): if Project.all().filter('name =', name).count() == 0: # we also need to check if we have something with the same slug if Project.all().filter('slug =', slugify( unicode(name))).count() == 0: try: project = Project( name=name, user=users.get_current_user(), ) project.put() logging.info("project added: %s" % project.name) except db.BadValueError, e: logging.error("error adding project: %s" % e)
def put(self): "Overridden save method" # we save the html here as it's faster than processing # everytime we display it self.html = textile(unicode(self.description)) # internal url is set on first save and then not changed # as the admin interface doesn't allow for changing name if not self.internal_url: slug = slugify(unicode(self.name)) self.internal_url = "/%s/%s/" % (self.project.slug, slug) # each issue has a per project unique identifier which is based # on an integer. This integer is stored in counter in the datastore # which is associated with the project if not self.identifier: counter = Counter.get_by_key_name("counter/%s" % self.project.name) if counter is None: # if it's the first issue we need to create the counter counter = Counter( key_name="counter/%s" % self.project.name, project=self.project, count=0, ) # increment the count counter.count += 1 counter.put() # save the count against the issue for use in the identifier self.identifier = counter.count # if the bug gets fixed then we store that date # if it's later marked as open we clear the date if self.fixed: self.fixed_date = datetime.now() else: self.fixed_date = None # if the bug has been fixed then send an email if self.fixed and self.email: mail.send_mail( sender="*****@*****.**", to=self.email, subject="[GitBug] Your bug has been fixed", body= """You requested to be emailed when a bug on GitBug was fixed: Issue name: %s Description: %s ------- %s ------- Thanks for using GitBug <http://gitbug.appspot.com>. A very simple issue tracker. """ % (self.name, self.description, self.fixed_description)) super(Issue, self).put()
def put(self): "Overridden save method" # we save the html here as it's faster than processing # everytime we display it self.html = textile(unicode(self.description)) # internal url is set on first save and then not changed # as the admin interface doesn't allow for changing name if not self.internal_url: slug = slugify(unicode(self.name)) self.internal_url = "/%s/%s/" % (self.project.slug, slug) # each issue has a per project unique identifier which is based # on an integer. This integer is stored in counter in the datastore # which is associated with the project if not self.identifier: counter = Counter.get_by_key_name("counter/%s" % self.project.name) if counter is None: # if it's the first issue we need to create the counter counter = Counter( key_name="counter/%s" % self.project.name, project=self.project, count=0, ) # increment the count counter.count += 1 counter.put() # save the count against the issue for use in the identifier self.identifier = counter.count # if the bug gets fixed then we store that date # if it's later marked as open we clear the date if self.fixed: self.fixed_date = datetime.now() self.priority = None else: self.fixed_date = None # if the bug has been fixed then send an email if self.fixed and self.email: mail.send_mail(sender="*****@*****.**", to=self.email, subject="[GitBug] Your bug has been fixed", body="""You requested to be emailed when a bug on GitBug was fixed: Issue name: %s Description: %s ------- %s ------- Thanks for using GitBug <http://gitbug.appspot.com>. A very simple issue tracker. """ % (self.name, self.description, self.fixed_description)) super(Issue, self).put()
def orphaned_topic_files(): topics = get_topics() slugs = [slugify(topic[0].strip()) for topic in topics] filenames = glob(os.path.join("nbsource", "*.ipynb")) file_slugs = [os.path.splitext(os.path.basename(f))[0] for f in filenames] for slug in slugs: comment = " ***" if slug not in file_slugs else "" print(os.path.join("project", "nbsource", slug + ".ipynb"), comment)
def random_tags(tags_comma_separated): tags = re.split(',', tags_comma_separated) selected = [] for tag in tags: chance = randint(1,10) if chance > 7: selected.append("#%s#" % slugify(tag)) return ','.join(selected)
def __init__(self, title, content, time, **kwargs): self.title = title self.content = content self.time = time self.slug = lib.slugify(self.title) self.tags = [' '.join(tup) for tup in lib.collocations(self.content, threshold=2)][:5] for attr, val in kwargs.items(): setattr(self, attr, val)
def test_slugify(self): tests = [ ['test', 'test'], ['test test', 'test-test'], ['test&^%', 'test'], ['test_test', 'testtest'], ] for input, output in tests: self.assertEqual(slugify(input), output)
def put(self): self.slug = slugify(unicode(self.title)) if not self.internal_url: self.internal_url = "/%s/%s/" % (self.publish_date.strftime("%Y/%m/%d"), self.slug) for tag in self.tags: obj = Tag( name=tag, title=self.title, url=self.internal_url, ) obj.put() super(Page, self).put()
def topic_and_file(words, exists=True): """Takes a list of words returning the topics that match. exists: True, file must exist False, must not exist None: don't care.""" search_words = [word.lower() for word in words] topics = get_topics() slugs = [slugify(topic[0].strip()) for topic in topics] for (title, indent), slug in zip(topics, slugs): title_words = set([word.lower() for word in title.split()]) if all(matching(word, title_words) for word in search_words): if (exists is None) or (os.path.exists(os.path.join("project", "nbsource", slug + ".ipynb")) != exists): print(" " * indent + title)
def find_images_for_download(self, d): """ @param d: an emtpy dict @return: the dict (by url) filled with rows from the spreadsheet index that had an url but no id """ sync = time.strftime('%Y-%m-%d %H:%M:%S') for i in self.index_illustrations: if not i['id'] and i['url'] and not ('wasted' in i and i['wasted']): caption = compose_caption( title=i['title'], artist=i['artist'], year=i['year'], location=i['location'], copyright=i['copyright'] ) or i['caption'] or slugify(i['url']) filename = slugify(caption) # file extension is added later on d[i['url']] = { 'url': i['url'], 'caption': caption, 'filename': filename, 'sync': sync } logging.debug("find_images_for_download() found %s [%s]" % (caption, i['url']))
def post(self, event_slug=None): configuration = customer_configuration.get_configuration(self.request) original_master = fusion_tables.select_first(configuration['master table'], condition="'event slug' = '%s'" % event_slug)[0] data = self.request.POST['data'] master = json.loads(data) master['location slug'] = location_slug(master) # check if the new location is in use, if so, reuse it's location slug same_location_condition = "ST_INTERSECTS('latitude', CIRCLE(LATLNG(%f,%f),2))" % (round(float(master['latitude']), 5), round(float(master['longitude']), 5)) # 3 meter same_location = fusion_tables.select_first(configuration['master table'], condition=same_location_condition) if same_location: logging.info("Using the location slug of an existing location [%s] instead of [%s]" % (same_location[0]['location slug'], master['location slug'])) master['location slug'] = same_location[0]['location slug'] else: base_location_slug = location_slug(master) logging.info("This is a new location [%s]" % base_location_slug) master['location slug'] = base_location_slug # add (1) or (2) or etc... to the location slug if it's already in use while fusion_tables.select_first(configuration['master table'], condition="'location slug' = '%s'" % master['location slug']): logging.info("Adding (1), (2),... to location slug [%s] because it already existed." % master['location slug']) counter = 1 if 'counter' not in locals() else counter + 1 master['location slug'] = base_location_slug + '-(' + str(counter) + ')' if master['location slug'] != original_master['location slug']: # otherwise the old location and event remains visible because the FT layer cannot filter them out logging.info("Starting task on queue for deleting old versions of moved event %s" % original_master['event slug']) taskqueue.add(method="GET", url='/sync/old_version_of_updated_events/%s?id=%s' % (original_master['event slug'], configuration['id'])) master['state'] = 'updated' master['sequence'] = int(original_master['sequence']) + 1 master['entry date'] = original_master['entry date'] master['update date'] = datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT) master['update after sync'] = 'true' # this will trigger sync_old_version_of_updated_events() master['renewal date'] = (datetime.today() + timedelta(days=30 * 6)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) master['event slug'] = original_master['event slug'] master['hashtags'] = ','.join(["#%s#" % slugify(tag) for tag in extract_hash_tags(master['description'])]) master['rowid'] = original_master['rowid'] fusion_tables.update_with_implicit_rowid(configuration['master table'], master) sync.sync_updated_events(configuration, condition="'event slug' = '%s'" % master['event slug']) logging.info("LIST_OF_UPDATED_ROWS [%s] [%s] %s" % (configuration['id'], master['update date'], data)) sender = 'info@%s.appspotmail.com' % (app_id) message = mail.EmailMessage(sender=sender, to="*****@*****.**") message.subject = "Event updated in MapTiming %s" % configuration['title'] message.body = "http://%s.maptiming.com#event/%s" % (configuration['id'], master['event slug']) logging.info("Sending mail from %s: %s - %s" % (sender, message.subject, message.body)) message.send() # return the web-page content self.response.out.write(master['event slug']) return
def post(self): configuration = customer_configuration.get_configuration(self.request) data = self.request.POST['data'] master = json.loads(data) # check if the location is in use, if so, reuse it's location slug same_location_condition = "ST_INTERSECTS('latitude', CIRCLE(LATLNG(%f,%f),2))" % (round(float(master['latitude']), 5), round(float(master['longitude']), 5)) # 3 meter same_location = fusion_tables.select_first(configuration['master table'], condition=same_location_condition) if same_location: logging.info("Using the location slug of an existing location [%s] instead of [%s]" % (same_location[0]['location slug'], master['location slug'])) master['location slug'] = same_location[0]['location slug'] else: base_location_slug = location_slug(master) logging.info("This is a new location [%s]" % base_location_slug) master['location slug'] = base_location_slug # add (1) or (2) or etc... to the location slug if it's already in use while fusion_tables.select_first(configuration['master table'], condition="'location slug' = '%s'" % master['location slug']): logging.info("Adding (1), (2),... to location slug [%s] because it already existed." % master['location slug']) counter = 1 if 'counter' not in locals() else counter + 1 master['location slug'] = base_location_slug + '-(' + str(counter) + ')' master['state'] = 'new' master['sequence'] = 1 master['entry date'] = datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT) master['update date'] = datetime.today().strftime(FUSION_TABLE_DATE_TIME_FORMAT) master['renewal date'] = (datetime.today() + timedelta(days=30 * 6)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) base_event_slug = event_slug(master) master['event slug'] = base_event_slug # add (1) or (2) or etc... to the event slug if it's already in use while fusion_tables.select_first(configuration['master table'], condition="'event slug' = '%s'" % master['event slug']): counter = 1 if 'counter' not in locals() else counter + 1 master['event slug'] = base_event_slug + '-(' + str(counter) + ')' # hashtags master['hashtags'] = ','.join(["#%s#" % slugify(tag) for tag in extract_hash_tags(master['description'])]) fusion_tables.insert(configuration['master table'], master) sync.sync_new_events(configuration, condition="'event slug' = '%s'" % master['event slug']) logging.info("LIST_OF_ADDED_ROWS [%s] [%s] %s" % (configuration['id'], master['update date'], data)) sender = 'info@%s.appspotmail.com' % (app_id) message = mail.EmailMessage(sender=sender, to="*****@*****.**") message.subject = "New event added to MapTiming %s" % configuration['title'] message.body = "http://%s.maptiming.com#event/%s" % (configuration['id'], master['event slug']) logging.info("Sending mail from %s: %s - %s" % (sender, message.subject, message.body)) message.send() # return the web-page content self.response.out.write(master['event slug']) return
def mktopic(title): "Takes a list of the words of a topic title and builds the appropriate notebook file." os.chdir(lib.get_project_dir()) template_env = Environment(loader=FileSystemLoader("data/templates")) # -t template would be nice, but this will do for now src_template = template_env.get_template("base.ipynb") now = datetime.datetime.today() title = " ".join(title) slug = lib.slugify(title) nb_name = slug+".ipynb" dst_file = os.path.join("nbsource", nb_name) render_context = {"slug": slug, "title": title, "date": now.date(), "time": now.time(), "src_file": dst_file, "template": src_template.filename} # we are writing a source ... if os.path.isfile(dst_file): # If the topic exists do not overwrite it XXX [unless option set]. sys.exit("file {} already exists".format(dst_file)) dst_nb_file = open(dst_file, "w") nb_content = src_template.render(render_context) dst_nb_file.write(nb_content) dst_nb_file.close()
snippet_list = [] top_level_snippets = [] for line in nullstrip(open("outline.txt", "r")): assert len(snippet_stack), "Nothing on the stack!" name_of_article = line.strip().rstrip(" *") indent = (len(line)-len(line.lstrip())) if indent > indent_stack[-1]: # Lower level than previous indent_stack.append(indent) snippet_stack.append(last_snippet) else: while indent < indent_stack[-1]: indent_stack.pop() snippet_stack.pop() if indent > indent_stack[-1]: sys.exit("Mismatched indentation on", name_of_article) slug = slugify(name_of_article) snippet = Snippet(title=name_of_article, slug=slug, indent_level=len(indent_stack), section=False, snippets=[]) slug_snippets[slug] = snippet snippet_stack[-1].snippets.append(snippet) slug_list.append(slug) snippet_list.append(snippet) if snippet.indent_level == 1: top_level_snippets.append(snippet) last_snippet = snippet assert top_level_snippets == root_snippet.snippets # Establish jinja templating environment template_env = Environment(loader=FileSystemLoader("data/templates")) src_template = template_env.get_template("source_base.ipynb")
def test_slugify(self): tests = [["test", "test"], ["test test", "test-test"], ["test&^%", "test"], ["test_test", "testtest"]] for input, output in tests: self.assertEqual(slugify(input), output)
def master_to_slave(master): # returns a tuple of a list of slave dicts # ('list' because a recurring date will produce multiple rows) # and the final date as second tuple element # first create a dict with the copied attributes slave = {} for key in [ 'event slug', 'event name', 'description', 'contact', 'website', 'registration required', 'sequence', 'location name', 'address', 'postal code', 'latitude', 'longitude', 'location slug', 'location details', 'tags', 'hashtags' ]: slave[key] = master[key] previous_start = "1970-01-01 00:00:00" # then calculate the date occurrences if master['calendar rule']: # start field holds the start date for the recurrence rule start_date = datetime.strptime(master['start'], FUSION_TABLE_DATE_TIME_FORMAT).date() end_date = datetime.strptime(master['end'], FUSION_TABLE_DATE_TIME_FORMAT).date() days = end_date - start_date today_date = datetime.today().date() if start_date <= today_date: start_date = today_date data = { 'year': start_date.year, 'month': start_date.month, 'day': start_date.day, 'rrule': master['calendar rule'], 'format': DATE_FORMAT, 'batch_size': 10, 'start': 0 } start = datetime.strptime(master['start'], FUSION_TABLE_DATE_TIME_FORMAT).time() end = datetime.strptime(master['end'], FUSION_TABLE_DATE_TIME_FORMAT).time() today_plus_13_months_date = today_date + timedelta(days=13*30) # naive, don't care slaves = [] done = False final_date = '' while True: occurrences = [o for o in calculate_occurrences(data)['occurrences'] if o['type'] != 'exdate'] for occurrence in occurrences: start_date = datetime.strptime(occurrence['date'], ISO_DATE_TIME_FORMAT).date() if today_date <= start_date < today_plus_13_months_date: # only add events within one year timeframe from now new_slave = copy.deepcopy(slave) new_slave['start'] = datetime.combine(start_date, start).strftime(FUSION_TABLE_DATE_TIME_FORMAT) new_slave['end'] = datetime.combine(start_date + days, end).strftime(FUSION_TABLE_DATE_TIME_FORMAT) new_slave['datetime slug'] = slugify(new_slave['start']) new_slave['previous start'] = previous_start previous_start = new_slave['start'] if final_date < new_slave['end']: final_date = new_slave['end'] slaves.append(new_slave) else: done = True break # for if occurrences and not done: data['start'] += data['batch_size'] else: break # while return (slaves, final_date) else: # not recurring, can span multiple days slave['start'] = master['start'] slave['end'] = master['end'] slave['datetime slug'] = slugify(slave['start']) slave['previous start'] = previous_start return ([slave], slave['end'])
def post(self): configuration = customer_configuration.get_configuration(self.request) data = self.request.POST['data'] master = json.loads(data) # check if the location is in use, if so, reuse it's location slug same_location_condition = "ST_INTERSECTS('latitude', CIRCLE(LATLNG(%f,%f),2))" % ( round(float(master['latitude']), 5), round(float(master['longitude']), 5)) # 3 meter same_location = fusion_tables.select_first( configuration['master table'], condition=same_location_condition) if same_location: logging.info( "Using the location slug of an existing location [%s] instead of [%s]" % (same_location[0]['location slug'], master['location slug'])) master['location slug'] = same_location[0]['location slug'] else: base_location_slug = location_slug(master) logging.info("This is a new location [%s]" % base_location_slug) master['location slug'] = base_location_slug # add (1) or (2) or etc... to the location slug if it's already in use while fusion_tables.select_first( configuration['master table'], condition="'location slug' = '%s'" % master['location slug']): logging.info( "Adding (1), (2),... to location slug [%s] because it already existed." % master['location slug']) counter = 1 if 'counter' not in locals() else counter + 1 master['location slug'] = base_location_slug + '-(' + str( counter) + ')' master['state'] = 'new' master['sequence'] = 1 master['entry date'] = datetime.today().strftime( FUSION_TABLE_DATE_TIME_FORMAT) master['update date'] = datetime.today().strftime( FUSION_TABLE_DATE_TIME_FORMAT) master['renewal date'] = ( datetime.today() + timedelta(days=30 * 6)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) base_event_slug = event_slug(master) master['event slug'] = base_event_slug # add (1) or (2) or etc... to the event slug if it's already in use while fusion_tables.select_first(configuration['master table'], condition="'event slug' = '%s'" % master['event slug']): counter = 1 if 'counter' not in locals() else counter + 1 master['event slug'] = base_event_slug + '-(' + str(counter) + ')' # hashtags master['hashtags'] = ','.join([ "#%s#" % slugify(tag) for tag in extract_hash_tags(master['description']) ]) fusion_tables.insert(configuration['master table'], master) sync.sync_new_events(configuration, condition="'event slug' = '%s'" % master['event slug']) logging.info("LIST_OF_ADDED_ROWS [%s] [%s] %s" % (configuration['id'], master['update date'], data)) sender = 'info@%s.appspotmail.com' % (app_id) message = mail.EmailMessage(sender=sender, to="*****@*****.**") message.subject = "New event added to MapTiming %s" % configuration[ 'title'] message.body = "http://%s.maptiming.com#event/%s" % ( configuration['id'], master['event slug']) logging.info("Sending mail from %s: %s - %s" % (sender, message.subject, message.body)) message.send() # return the web-page content self.response.out.write(master['event slug']) return
def post(self, event_slug=None): configuration = customer_configuration.get_configuration(self.request) original_master = fusion_tables.select_first( configuration['master table'], condition="'event slug' = '%s'" % event_slug)[0] data = self.request.POST['data'] master = json.loads(data) master['location slug'] = location_slug(master) # check if the new location is in use, if so, reuse it's location slug same_location_condition = "ST_INTERSECTS('latitude', CIRCLE(LATLNG(%f,%f),2))" % ( round(float(master['latitude']), 5), round(float(master['longitude']), 5)) # 3 meter same_location = fusion_tables.select_first( configuration['master table'], condition=same_location_condition) if same_location: logging.info( "Using the location slug of an existing location [%s] instead of [%s]" % (same_location[0]['location slug'], master['location slug'])) master['location slug'] = same_location[0]['location slug'] else: base_location_slug = location_slug(master) logging.info("This is a new location [%s]" % base_location_slug) master['location slug'] = base_location_slug # add (1) or (2) or etc... to the location slug if it's already in use while fusion_tables.select_first( configuration['master table'], condition="'location slug' = '%s'" % master['location slug']): logging.info( "Adding (1), (2),... to location slug [%s] because it already existed." % master['location slug']) counter = 1 if 'counter' not in locals() else counter + 1 master['location slug'] = base_location_slug + '-(' + str( counter) + ')' if master['location slug'] != original_master['location slug']: # otherwise the old location and event remains visible because the FT layer cannot filter them out logging.info( "Starting task on queue for deleting old versions of moved event %s" % original_master['event slug']) taskqueue.add(method="GET", url='/sync/old_version_of_updated_events/%s?id=%s' % (original_master['event slug'], configuration['id'])) master['state'] = 'updated' master['sequence'] = int(original_master['sequence']) + 1 master['entry date'] = original_master['entry date'] master['update date'] = datetime.today().strftime( FUSION_TABLE_DATE_TIME_FORMAT) master[ 'update after sync'] = 'true' # this will trigger sync_old_version_of_updated_events() master['renewal date'] = ( datetime.today() + timedelta(days=30 * 6)).strftime(FUSION_TABLE_DATE_TIME_FORMAT) master['event slug'] = original_master['event slug'] master['hashtags'] = ','.join([ "#%s#" % slugify(tag) for tag in extract_hash_tags(master['description']) ]) master['rowid'] = original_master['rowid'] fusion_tables.update_with_implicit_rowid(configuration['master table'], master) sync.sync_updated_events(configuration, condition="'event slug' = '%s'" % master['event slug']) logging.info("LIST_OF_UPDATED_ROWS [%s] [%s] %s" % (configuration['id'], master['update date'], data)) sender = 'info@%s.appspotmail.com' % (app_id) message = mail.EmailMessage(sender=sender, to="*****@*****.**") message.subject = "Event updated in MapTiming %s" % configuration[ 'title'] message.body = "http://%s.maptiming.com#event/%s" % ( configuration['id'], master['event slug']) logging.info("Sending mail from %s: %s - %s" % (sender, message.subject, message.body)) message.send() # return the web-page content self.response.out.write(master['event slug']) return
comment = " ***" if slug not in file_slugs else "" print(os.path.join("project", "nbsource", slug + ".ipynb"), comment) if __name__ == "__main__": # possible -d option for directory? exists = orphaned = False # XXX one switch only ... use proper arg parsing if len(sys.argv) > 1 and sys.argv[1][0] == "-": if sys.argv[1] == "-u": exists = True elif sys.argv[1] == "-o": orphaned = True elif sys.argv[1] == "-a": exists = None else: import sys sys.exit("topics.py [-o | -u | -a] [List of words]") del sys.argv[1] os.chdir(get_project_dir()) if orphaned: orphaned_topic_files() else: topic_list = slugify(" ".join(sys.argv[1:])).split("-") # if topic_list == [""]: # Annoying special case? # topic_list = [] topic_and_file(topic_list, exists=exists)
def post(self): """ site = Site( name = 'name', url = 'http://url.com', slug = 'slug', ) site.put() issue = Issue( title = 'title', description = 'description', site = site, ) issue.put() """ # get url and then decide if we have a site already or # need to create one name = self.request.get("name") url = self.request.get("url") try: site = Site.gql("WHERE url=:1", url)[0] except IndexError: """ import sys import pdb for attr in ('stdin', 'stdout', 'stderr'): setattr(sys, attr, getattr(sys, '__%s__' % attr)) pdb.set_trace() """ site = Site( name = name, url = url, slug = slugify(name), ) site.put() title = self.request.get("title") description = self.request.get("description") issue = Issue( title = title, description = description, site = site, ) issue.put() context = { 'issue': issue, 'sites': Site.all(), } # prepare the context for the template # calculate the template path path = os.path.join(os.path.dirname(__file__), 'templates', 'index.html') # render the template with the provided context self.response.out.write(template.render(path, context))