def get_location(request): ''' Returns a List of Location objects in JSON if no object specified. ''' log.info('location_api.cors_origins_for("get") = {0}' .format(location_api.cors_origins_for('get'))) # first, lets just query that we can get to the data locations_dir = request.registry.settings['locations_dir'] base_len = len(locations_dir.split(sep)) location_content = [] location_file_dirs = [] for (path, _dirnames, filenames) in walk(locations_dir): if len(path.split(sep)) == base_len + 1: [location_content.append(ujson.load(open(join(path, f), 'r'))) for f in filenames if f == 'compiled.json'] [location_file_dirs.append(join(path, basename(path) + '_save')) for f in filenames if f == 'compiled.json'] slug = obj_id_from_url(request) if slug: matching = [(i, c) for i, c in enumerate(location_content) if slugify.slugify_url(c['name']) == slug] if matching: session_lock = acquire_session_lock(request) log.info(' session lock acquired (sess:{}, thr_id: {})' .format(id(session_lock), current_thread().ident)) try: location_file = location_file_dirs[matching[0][0]] log.info('load location: {0}'.format(location_file)) load_location_file(location_file, request) except Exception: raise cors_exception(request, HTTPInternalServerError, with_stacktrace=True) finally: session_lock.release() log.info(' session lock released (sess:{}, thr_id: {})' .format(id(session_lock), current_thread().ident)) return matching[0][1] else: raise cors_exception(request, HTTPNotFound) else: features = [Feature(geometry=Point(c['geometry']['coordinates']), properties={'title': c['name'], 'slug': slugify.slugify_url(c['name']), 'content': c['steps'] } ) for c in location_content] return FeatureCollection(features)
def get_location(request): ''' Returns a List of Location objects in JSON if no object specified. ''' log.info('location_api.cors_origins_for("get") = {0}' .format(location_api.cors_origins_for('get'))) # first, lets just query that we can get to the data locations_dir = request.registry.settings['locations_dir'] base_len = len(locations_dir.split(sep)) location_content = [] location_file_dirs = [] for (path, _dirnames, filenames) in walk(locations_dir): if len(path.split(sep)) == base_len + 1: [location_content.append(ujson.load(open(join(path, f), 'r'))) for f in filenames if f == 'compiled.json'] [location_file_dirs.append(path + "/" + basename(path) + '_save') for f in filenames if f == 'compiled.json'] slug = obj_id_from_url(request) if slug: matching = [(i, c) for i, c in enumerate(location_content) if slugify.slugify_url(c['name']) == slug] if matching: gnome_sema = request.registry.settings['py_gnome_semaphore'] gnome_sema.acquire() try: location_file = location_file_dirs[matching[0][0]] log.info('load location: {0}'.format(location_file)) load_location_file(location_file, request) except: raise cors_exception(request, HTTPInternalServerError, with_stacktrace=True) finally: gnome_sema.release() return matching[0][1] else: raise cors_exception(request, HTTPNotFound) else: features = [Feature(geometry=Point(c['geometry']['coordinates']), properties={'title': c['name'], 'slug': slugify.slugify_url(c['name']), 'content': c['steps'] } ) for c in location_content] return FeatureCollection(features)
def __init__(self, title, iterator=[]): self.id = Story.id Story.id += 1 self.title = title self.slug = slugify_url(title) self.pages = {} for stream in iterator: p = Page.from_stream(self, stream) self.pages[p.id] = p for p in self.pages.itervalues(): for l in p.links: if l.page: if l.page not in self.pages: raise LinkError( self.title + ': Cannot link page %d to %d: page doesn\'t exist.' % ( p.id, l.page)) else: if l.success not in self.pages or l.failure not in self.pages: raise LinkError( self.title + ': Cannot link page %d to (%d or %d): page doesn\'t exist.' % ( p.id, l.success, l.failure)) if not len(self.pages): raise PageLoadingError(self.title + ': No page was found.') if 1 not in self.pages: raise PageLoadingError( self.title + ': No page with id 1 was found.')
def clean_slug(self): data = self.cleaned_data if data.get('slug'): return data['slug'] if apps.is_installed('modeltranslation'): name = data.get('name_{}'.format(settings.LANGUAGE_CODE)) else: name = data.get('name') return slugify_url(name, separator='_')
def __init__(self, url): self.fetched_url = url self.fetched_at = datetime.now() # need to better use newspaper egg self.article = self._articlize() # this needs to be killed self.returned_url = self.article.url self.html = self.article.html try: self.fulltext = re.sub('\s+', ' ', fulltext(self.html)).strip() except AttributeError: self.fulltext = "" # resp = self._fetch(self.fetched_url) try: self.doc = self.article.doc except ArticleException: self.doc = htmlparser.fromstring(self.html) # self.headers = resp.headers # self.encoding = resp.encoding # self.status_code = resp.status_code # self.history = resp.history # elements _c = self._extract_element('canonical_url') self.canonical_url = _c[0] if _c else self.returned_url self.titles = self._extract_element('title') self.title = self.titles[0] self.descriptions = self._extract_element('description') self.description = self.descriptions[0] if self.descriptions else "" self.description = WS_RX.sub(' ', self.description).strip() # necessary to do an extractor for authors, to present different candidates? self.authors = self.article.authors self.words = re.split(r' ', self.fulltext) self.word_count = len(self.words) self.excerpt = ' '.join( self.words if self.word_count < EXCERPT_WORD_COUNT else self.words[:EXCERPT_WORD_COUNT] + ['...']) self.published_at = self.article.publish_date.strftime( '%Y-%m-%d') if self.article.publish_date else '' _url = urlparse(self.returned_url) # stuff self.url_scheme = _url.scheme self.url_domain = re.sub('^www\.', '', _url.netloc) self.url_path = _url.path self.url_fragment = _url.fragment self.url_query_string = _url.query self.slug = slugify_url(' '.join( [self.title, self.url_domain, self.url_fragment]))
def download_map(id): """ Download map as a PNG """ fp = io.BytesIO() subsector = Subsector.query.get_or_404(id) map.draw_map(fp, subsector.worlds) fp.seek(0) filename = 'starmap-{}.png'.format(slugify_url(subsector.name)) return send_file( fp, mimetype='image/png', as_attachment=True, attachment_filename=filename, )
def get_year_urls(year): links_arr = [] page_url = BASE_URL + str(year) + '.asp' resp = requests.get(page_url) soup = BeautifulSoup(resp.text, 'lxml') rows = soup.select('table tbody > tr') for row in rows: datetd = row.find('td') if datetd: rawdate = row.find('td').text if 'PIC' not in rawdate and re.search(r'\w+ +\d+ *, 20\d\d', rawdate): date = dateparse.parse(rawdate).strftime('%Y-%m-%d') for cell in row.select('td'): link = cell.find('a') if link and 'Video' not in link.text: fname = date + '_' + slugify_url(link.text) + '.pdf' url = urljoin(page_url, link['href']) links_arr.append((url, fname)) return links_arr
def get_location(request): ''' Returns a List of Location objects in JSON if no object specified. ''' # first, lets just query that we can get to the data locations_dir = get_locations_dir_from_config(request) base_len = len(locations_dir.split(sep)) location_content = [] location_file_dirs = [] for (path, dirnames, filenames) in walk(locations_dir): if len(path.split(sep)) == base_len + 1: [location_content.append(json.load(open(join(path, f), 'r'), object_pairs_hook=OrderedDict)) for f in filenames if f[-12:] == '_wizard.json'] [location_file_dirs.append(join(path, f[:-12] + '_save')) for f in filenames if f[-12:] == '_wizard.json'] slug = obj_id_from_url(request) if slug: matching = [(i, c) for i, c in enumerate(location_content) if slugify.slugify_url(c['name']) == slug] if matching: gnome_sema = request.registry.settings['py_gnome_semaphore'] gnome_sema.acquire() try: location_file = location_file_dirs[matching[0][0]] load_location_file(location_file, request.session) finally: gnome_sema.release() return matching[0][1] else: return HTTPNotFound() pass else: return FeatureCollection(location_content).serialize()
def download_csv(id): """ Download details as a CSV """ fp = io.StringIO() subsector = Subsector.query.get_or_404(id) writer = csv.writer(fp) writer.writerow([ "Name", "Coords", "UWP", "Starport", "Travel zone", "Size", "Atmosphere", "Temperature", "Hydrographics", "Population", "Government", "Law level", "Tech level", "Notes", "Gas giant?", "Scout base?", "Naval base?", "Research base?", "Pirate base?", "Traveler's Aid Society?", "Imperial consulate?", ]) for world in subsector.worlds: writer.writerow([ world.name, world.coords_desc, world.uwp, world.starport, world.travel_zone, world.size_desc, world.atmosphere_desc, world.temperature_desc, world.hydrographics_desc, world.population_desc, world.government_desc, world.law_level_desc, world.tech_level_desc, world.long_trade_classifications, yesno(world.is_gas_giant), yesno(world.is_scout_base), yesno(world.is_naval_base), yesno(world.is_research_base), yesno(world.is_pirate_base), yesno(world.is_tas), yesno(world.is_consulate), ]) filename = 'subsector-{}.csv'.format(slugify_url(subsector.name)) return send_file( io.BytesIO(fp.getvalue().encode()), mimetype='text/csv', as_attachment=True, attachment_filename=filename, )
def slug(self): return slugify_url(self.name, separator='_')
def save(self, *args, **kwargs): if not self.slug: self.slug = (slugify_url(self.name) + '-' + str(int(time()))) super().save(*args, **kwargs)
def properties(self): return dict(title=self.json_body['name'], slug=slugify.slugify_url(self.json_body['name']), content='???')
def create(cls, tag_title): tag = cls(tag_title=tag_title, tag_url=slugify_url(tag_title)) tag.save() # do something with the book return tag
def save(self, *args, **kwargs): self.slug = slugify_url(self.title) super().save(*args, **kwargs)
def create(cls, tag_title): tag = cls(tag_title=tag_title, tag_url=slugify_url(tag_title)) tag.save() return tag
def setName(message, card_info): card_info["name"] = message.text card_info["slug"] = slugify_url(message.text)
def save(self, *args, **kwargs): self.slug = slugify_url(self.content, max_length=40) return super(Question, self).save(*args, **kwargs)
def save(self, *args, **kwargs): self.slug = slugify_url(self.title, max_length=40) return super(Cars, self).save(*args, **kwargs)
def slugify_func(self, content): if content: return slugify_url( content ) # slugify_url settings: to_lower, max_length, min_lemgth, stop_words=(), separator = '-' return ''
def save(self, *args, **kwargs): self.slug = (slugify_url(self.title) + '-' + str(int(time()))) super().save(*args, **kwargs)
def slugify_func(self, content): if content: return slugify_url(content) # slugify_url settings: to_lower, max_length, min_lemgth, stop_words=(), separator = '-' return ''
def save(self, *args, **kwargs): if not self.slug: self.slug = slugify_url(self.title, max_length=40) return super(Gallery, self).save(*args, **kwargs)
# -*- coding: utf-8 -*-
def test_slugify_url(self): self.assertEqual(slugify_url('The Über article'), 'uber-article')
def save(self, *args, **kwargs): self.title=self.body[:60] self.slug = slugify_url(self.title, max_length=40) return super(Callback, self).save(*args, **kwargs)