def load_names(filename, genitives): """ Load approved names into male and female lists and find their genitive form while it's at it. """ male_names = [] female_names = [] with open(filename) as csv_file: reader = csv.DictReader(csv_file) for row in reader: # Approved names have Afgreitt == Sam if row['Afgreitt'] == 'Sam': # Female names are 'ST' if row['Tegund'] == 'ST': female_names.append({ 'slug': slugify(row['Nafn']), 'name': row['Nafn'], 'genitive': genitives.get(row['Nafn'], row['Nafn']) }) # Male names are 'DR' elif row['Tegund'] == 'DR': male_names.append({ 'slug': slugify(row['Nafn']), 'name': row['Nafn'], 'genitive':genitives.get(row['Nafn'], None) }) return (female_names, male_names)
def test_slugify_umlaut(self): self.assertEqual(slugify('kožušček'), 'kozuscek',) self.assertEqual(slugify('C\'est déjà l\'été.'), 'Cest-deja-lete') self.assertEqual(slugify('jaja---lol-méméméoo--a'), 'jaja-lol-mememeoo-a') self.assertEqual(slugify('Nín hǎo. Wǒ shì zhōng guó rén'), 'Nin-hao-Wo-shi-zhong-guo-ren') self.assertEqual(slugify('Programmes de publicité - Solutions d\'entreprise'), 'Programmes-de-publicite-Solutions-dentreprise')
def get_annotation_tags(data: dict): return ",".join( ( "{} {}".format(slugify(key), slugify(str(value))) for key, value in data.items() ) )
def update_dataset_lists_finalize(self): concepts = {} codelists = {} dimension_keys = [] attribute_keys = [] for key, value in self.dataset.concepts.items(): key_slug = slugify(key, save_order=True) concepts[key_slug] = value for key, value in self.dataset.codelists.items(): new_value = {} for k, v in value.items(): new_value[slugify(k, save_order=True)] = v codelists[slugify(key, save_order=True)] = new_value for key in self.dataset.dimension_keys: dimension_keys.append(slugify(key, save_order=True)) if self.dataset.attribute_keys: for key in self.dataset.attribute_keys: attribute_keys.append(slugify(key, save_order=True)) self.dataset.concepts = concepts self.dataset.codelists = codelists self.dataset.dimension_keys = dimension_keys if self.dataset.attribute_keys: self.dataset.attribute_keys = attribute_keys
def save(self, *args, **kwargs): if not self.pk: slug_entry = self.name chk = Category.objects.filter(slug=slugify(slug_entry)) if len(chk): slug_entry = slug_entry + "-" + str(len(chk)) self.slug = slugify(slug_entry) super(Category, self).save(*args, **kwargs)
def _set_meta(self, title, published, category='', tags=''): """ the header :param title: the title of the post :param published: the date when the data has been published by the provider :param category: category of this data :param tags: the tags :type title: string :type published: string :param category: string :param tags: string :return: the complet head :rtype: string """ slug_published = slugify(arrow.get(published).format( 'YYYY-MM-DD HH:mm')) slug_title = slugify(title) header = '\n\t\t<meta name="date" content="{}" />\n'.format(published) if tags: header += '\t\t<meta name="tags" content="{}" />\n'.format(tags) if category: header += '\t\t<meta name="category" content="{}" />\n'.format( category) if self.AUTHOR: header += '\t\t<meta name="authors" content="{}" />\n'.format( self.AUTHOR) header += '\t\t<meta name="slug" content="{}"/>\n'.format( slug_published + '-' + slug_title) header += '\t</head>' return header
def __init__(self, data = None): if data == None: return self.label = data.get('label') if (data.get('name', None)): self.name = slugify(str(data.get('name')), max_length=30, separator="_") else: self.name = slugify(str(data.get('label')), max_length=30, separator="_") #check if name is already taken if Dataset.by_name(self.name): for x in range(10): newname = self.name + "_" + str(x) if not Dataset.by_name(newname): self.name = newname break self.description = data.get('description') self.ORoperations = data.get('ORoperations', {}) self.mapping = data.get('mapping', {}) self.prefuncs = data.get('prefuncs', {}) self.created_at = datetime.utcnow() self.dataType = data.get('dataType') if type(data.get('dataorg')) == int: self.dataorg = DataOrg.by_id(data.get('dataorg')) else: try: self.dataorg = data.get('dataorg') except Exception, e: print "failed to load the dataorg for dataset" print e
def build_docs(self, row): # In this case, it's because in the CSV doc there is a column for each year... year = row[3] # Clean expense string so that is is numerical (e.g. turn blank string to 0). cost = row[2].replace(',', '') if not cost.strip(): cost = 0 # Create doc. doc = { 'region': { 'name': self.get_region(), 'slug': slugify(self.get_region(), to_lower=True) }, 'dataset': { 'name': self.get_dataset(), 'slug': slugify(self.get_dataset(), to_lower=True) }, 'activity': { 'type': row[0], 'description': row[1] }, 'cost': float(cost), 'year': int(year) } # Console output to provide user with feedback on status of importing process. print '%s - %s: %s (%s %i)' % (doc['activity']['type'], doc['activity']['description'], doc['cost'], doc['region']['name'], doc['year']) return [doc]
def download_ticket(self, ticket, destination_dir='.', override=False): route = '%s_%s' % (slugify(ticket.route_from, separator='_')[:3], slugify(ticket.route_to, separator='_')[:3]) filename = '%s/%s_%s_%s.pdf' % ( destination_dir, ticket.date.strftime('%Y%m%d%H%M'), route, ticket.number) if not os.path.exists(filename) or override: if not os.path.exists(destination_dir): os.mkdir(destination_dir) response = self.session.get( self.PDF_URL % ticket.number[3:], stream=True) total = int(response.headers.get('Content-Length', 0)) if not total: return name = os.path.split(filename)[1] chunk_size = 1024 progress = tqdm( total=total, leave=True, unit_scale=chunk_size, unit='B', desc='Downloading %s' % (name,)) with open(filename, 'wb') as file_handler: for chunk in response.iter_content(chunk_size=chunk_size): if chunk: file_handler.write(chunk) progress.update(chunk_size) progress.close() return open(filename)
def do_slugify(string): replacements = (('4', 'a'), ('3', 'e'), ('1', 'l'), ('0', 'o'), ('7', 't'), ('5', 's')) for old, new in replacements: string = string.replace(old, new) slugify(string, separator='_') string = string.replace('_', '') return string
def crea_group_with_manager(name_user, name_group): # Recuperation de la surcharge de user User = get_user_model() # Test si nom user est deja prit lUser = list(User.objects.all()) for u in lUser: if u.name_long == name_user: raise Exception('That user name already exists') lGroup = list(GroupProfile.objects.all()) for g in lGroup: if g.title == name_group or g.slug == slugify(name_group): raise('That group name already exists') user = User.objects.create_user(name_user, None, name_user) user.save() group = GroupProfile() group.title = name_group group.slug = slugify(name_group) group.description = name_group group.save() group.join(user, role="manager") group.save() return True
def create_dbdict(self): print('Creating dbdict for cultivar "{}"...'.format(self.h3)) self._dbdict['name'] = first_text(self.h3) self._dbdict['subtitle'] = first_text(self.subtitle_em) self._dbdict['botanical_names'] = first_text(self.bn_em) self._dbdict['description'] = tags_to_str(self.ps) self._dbdict['new_for'] = '2017' if self.new_for else '' self._dbdict['favorite'] = True if self.favorite else False self._dbdict['images'] = [ i['src'].replace('\n', '') for i in self.images ] self._dbdict['packets'] = self.get_packet_dicts() self._dbdict['veg_info'] = {} if self.veg_em: abbrs = self.veg_em.find_all('abbr') self._dbdict['veg_info']['open_pollinated'] = False for abbr in abbrs: abbr.extract() if '(OP)' in abbr.text: self._dbdict['veg_info']['open_pollinated'] = True self._dbdict['veg_info']['maturation'] = str_contents(self.veg_em) try: self._dbdict['slug'] = slugify(self.tag['id']) except KeyError: try: self._dbdict['slug'] = slugify(self.h3['id']) except KeyError: try: self._dbdict['slug'] = slugify(self.tag.img['id']) except KeyError: self._dbdict['slug'] = None
def ensure_unify_datasets_exist(): """ Read the unify datasets to create from the CSV file. 1. Check if they exist. 2. If they don't, create 'em. 3. There is no step 3. 4. Profit """ dc.ensure_publisher('unify') unifyfile = DATA_DIR/'datasets.csv' with unifyfile.csv(header=True) as csv: for row in csv: if row.source == 'UNIFY2': dc.Dataset.create_or_update( name=slugify(row.title).lower(), title=row.title, state='active', private=row.public=='N', license_id='ogl', url='http://data.england.nhs.uk', owner_org='unify', resources=[] ) print slugify(row.title).lower() return
def template_to_path(self, track: dict) -> str: """Create valid filepath based on template :param track: track metadata :return: filepath """ logging.debug(" Generating filepath/trackname..") path = self.template if self.no_slugify: path = path.replace("%{artist}", track['artist']) path = path.replace("%{album}", track['album']) path = path.replace("%{title}", track['title']) else: path = path.replace("%{artist}", slugify(track['artist'])) path = path.replace("%{album}", slugify(track['album'])) path = path.replace("%{title}", slugify(track['title'])) if track['track'] == "None": path = path.replace("%{track}", "Single") else: path = path.replace("%{track}", str(track['track']).zfill(2)) path = u"{0}/{1}.{2}".format(self.directory, path, "mp3") logging.debug(" filepath/trackname generated..") logging.debug("\n\tPath: {}".format(path)) return path
def main(): is_rewrite = 'rewrite' in sys.argv cards = json.load(open(CARDS_FILE_PATH)) print("From '{}' loaded {} cards".format(CARDS_FILE_PATH, len(cards))) if not os.path.isdir('cards'): os.mkdir('cards') added_cards_count = 0 for card in cards: cycle_number = card['cyclenumber'] if cycle_number >= 1: # ignore alternates and specials set_name = card['setname'] set_name = '{}-{}'.format(SET_ORDER.get(set_name, ''), set_name) dir_name = '{:02}-{}'.format(cycle_number, slugify(set_name)) dir_path = os.path.join('cards', dir_name) file_name = '{:03}-{}.yaml'.format(card['number'], slugify(card['title'])) file_path = os.path.join(dir_path, file_name) if not os.path.isdir(dir_path): os.mkdir(dir_path) # Add absent cards only if is_rewrite or not os.path.isfile(file_path): card_file = open(file_path, 'w') card_yaml = OrderedDict() card_yaml['side'] = card['side'] card_yaml['faction'] = card['faction'] card_yaml['type'] = card['type'] card_yaml['uniqueness'] = card['uniqueness'] card_yaml['obvious'] = False card_yaml['progress'] = 0.0 card_yaml['title'] = card['title'] card_yaml['title_ru'] = 'нет' text = clean_breaks(card['text']) card_yaml['text'] = folded(text) card_yaml['text_ru'] = folded(text) if 'flavor' in card: flavor = clean_breaks(card['flavor']) card_yaml['flavor'] = folded(flavor) card_yaml['flavor_ru'] = folded(flavor) yaml.dump(card_yaml, card_file, default_flow_style=False, allow_unicode=True, indent=4, width=70) added_cards_count += 1 print('Added {} cards'.format(added_cards_count))
def _output_folder(self): return join( OUTPUT_DIR, self.options.blueprint, slugify(getattr(self.header, self.options.group_by)), slugify(self.name) )
def get_slug_default(self): """ Naively constructs a translated default slug from the object. For better results, just set the `slug_default` property on the class to a lazy translated string. Alternatively, override this method if you need to more programmatically determine the default slug. Example: If your model is "news article" and your source field is "title" this will return "news-article-without-title". """ if self.slug_default: # Implementing class provides its own, translated string, use it. return force_text(self.slug_default) object_name = self._meta.verbose_name # Introspect the field name try: trans_meta = self.translations.model._meta source_field = trans_meta.get_field( self.slug_source_field_name) field_name = getattr(source_field, 'verbose_name') except Exception: field_name = _('name') slug_default = _("{0}-without-{1}").format( slugify(force_text(object_name)), slugify(force_text(field_name)), ) return slug_default
def create_organization(ckan, id_str, slug_str, name, desc=None, parent=None): name_slug = slugify(slug_str.decode('utf-8')).lower()[:100] # field max length is 100 characters id_slug = slugify(id_str.decode('utf-8')).lower()[:100] data_dict = {'name': name_slug, 'id': id_slug, 'title': name, 'state': 'active'} if desc: data_dict['description'] = desc log.info('>> Creating organization {} [ID: {}]'.format(name, id_slug)) if parent: # Add to hierarchy, if a parent ID is given # This happens by adding a parent capacity to groups field log.info('Adding to hierarchy, under parent ID {}.'.format(parent)) data_dict['groups'] = [{'capacity': 'public', 'name': parent}] try: # Try to create the organization, if it doesn't exist yet ckan.call_action('organization_create', data_dict, requests_kwargs={'verify': False}) log.info('Organization created.') except ValidationError as e: # If it already exists, just patch the new data log.info('Organization with same ID was found from the database. Updating instead.'.format(name, name_slug, id_slug)) try: ckan.call_action('organization_patch', data_dict, requests_kwargs={'verify': False}) except NotFound: log.error("Could not patch organization {}, {}, {}".format(name, name_slug, id_slug)) log.info('Organization updated.') except NotAuthorized: log.error('API NotAuhtorized - please give a valid admin API key') sys.exit(1) return id_str
def lines(self, line_id=None): lines_obj = [] for table in self.get_html(): #~ group = dict( #~ slug = slugify(table.select('.style20')[0].string), #~ description = table.select('.style20')[0].string #~ ) #~ links = table.find_all('a') group = table.select('.style20')[0].string for link in table.find_all('a'): #~ new_line = Line() new_line = Line(id=link['href'].replace('lineas.asp?lin=','')) new_line.title = link.string new_line.slug = slugify(link.string) new_line.group = dict( slug = slugify(group), description = group ) if line_id == new_line.id: return new_line lines_obj.append(new_line) #~ group['lines'] = return lines_obj
def test_fold_abbr_4(self): slugify = Slugify(fold_abbrs=True) self.assertEqual('mind-in-a-box', slugify('mind.in.a.box')) self.assertEqual('mind-in-a-b-c-box', slugify('mind.in.a.b.c.box')) self.assertEqual('a-b-c-box', slugify('.a.b.c.box')) self.assertEqual('abcbox', slugify('a.b.c.box')) self.assertEqual('abcb-ox', slugify('a.b.c.b ox'))
def scrap_user(self, user): root = '/{user}/albums'.format(user=user) for f_album in self.process_pages(root): folder_name = os.path.join(self.data_folder, slugify(user), slugify(f_album['name'])) if not os.path.isdir(folder_name): os.makedirs(folder_name) self.image_pool += [(blob['images'][0]['source'], folder_name) for blob in self.process_album(f_album)]
def udemy_dl(username, password, course_link, lecture_start=1, lecture_end=None, save_links=False, safe_file_names=False, just_list=False, dest=""): # pylint: disable=too-many-arguments """Login into udemy and do all magic.""" login(username, password) course_id = get_course_id(course_link) check_course_status(course_id) last_chapter = -1 for data in get_data_links(course_id, lecture_start, lecture_end): if save_links: save_link(data['data_url'], dest, data['data_type']) else: try: directory = '{0:02d} {1!s}'.format(data['chapter_number'], safeencode(data['chapter'])) if safe_file_names: directory = slugify(directory, lower=True, spaces=False, ok='.', only_ascii=True) else: directory = sanitize_path(directory) except AttributeError: # Fix for untitled opening chapter if safe_file_names: directory = '00-opening' else: directory = '00 Opening' if dest: directory = os.path.join(dest, directory) filename = '{0:03d} {1!s}'.format(data['lecture_number'], safeencode(data['lecture'])) if safe_file_names: filename = slugify(filename, lower=True, spaces=False, ok='.', only_ascii=True) else: filename = sanitize_path(filename) if just_list: if last_chapter != data['chapter_number']: last_chapter = data['chapter_number'] print('\r\n{0:02d} {1!s}\r\n=========================='.format(last_chapter, safeencode(data['chapter']))) print('{0:03d} {1!s}'.format(data['lecture_number'], safeencode(data['lecture']))) else: data_url = data['data_url'] data_type = data['data_type'] attached_info = data['attached_info'] caption_list = data['caption_list'] get_data(directory, filename, data_url, data_type, attached_info, caption_list) if os.path.exists(dest) and save_links: print('Links successfully saved to : {0!s}\n'.format((os.path.abspath(dest)))) print('Logging out...', end=' ') sys.stdout.flush() session.get('http://www.udemy.com/user/logout') print('Done')
def build_docs(self, row): # Clean expense string so that is is numerical (e.g. turn blank string to 0). cost = row[9].replace(',', '') if not cost.strip(): cost = 0 # Create doc. doc = { 'region': { 'name': self.get_region(), 'slug': slugify(self.get_region(), to_lower=True), 'subregion':{ 'name': cyrtranslit.to_latin(row[0]), 'slug': cyrtranslit.to_latin(slugify(row[0], to_lower=True)), } }, 'activity':{ 'id': int(row[1]), 'description': cyrtranslit.to_latin(row[2]) }, 'dataset': { 'name': self.get_dataset(), 'slug': slugify(self.get_dataset(), to_lower=True) }, 'cost': cost, 'year': 2010 } # Console output to provide user with feedback on status of importing process. print '%s - %s: %s (%s %i)' % (doc['activity']['id'], doc['activity']['description'], doc['cost'], doc['region']['name'], doc['year']) return [doc]
def default_content(cls): ret = {} title = 'About text (left column)' slug = slugify(title, to_lower=True) ret[slug] = cls(title=title, slug=slug, content=( '<p>The aim of this app is to demonstrate that, with the ' 'help of modern JS libraries, and with some well-' 'thought-out server-side snippets, it\'s now perfectly ' 'possible to "bake in" live in-place editing for ' 'virtually every content element in a typical ' 'brochureware site.</p>'), active=True) title = 'About text (right column)' slug = slugify(title, to_lower=True) ret[slug] = cls(title=title, slug=slug, content=( '<p>This app is not a CMS. On the contrary, think of it ' 'as a proof-of-concept alternative to a CMS. An ' 'alternative where there\'s no "admin area", there\'s ' 'no "editing mode", and there\'s no "preview ' 'button".</p>'), active=True) title = 'About text (below columns)' slug = slugify(title, to_lower=True) ret[slug] = cls( title=title, slug=slug, content="<p>There's only direct manipulation.</p>", active=True) return ret
def get_station_info(station_code, station_data_dict): station = { 'kodi': station_code, 'emri': station_data_dict[station_code]['name'], 'slug': slugify(station_data_dict[station_code]['name']), 'kordinatat': { 'gjatesi': float(station_data_dict[station_code]['longitude']), 'gjeresi': float(station_data_dict[station_code]['latitude']) }, 'gjiriLumit': { 'emri': station_data_dict[station_code]['riverBasin'], 'slug': slugify(station_data_dict[station_code]['riverBasin']) }, 'lumi': { 'emri': station_data_dict[station_code]['river'], 'slug': slugify(station_data_dict[station_code]['river']) }, 'regjioniDetit': { 'emri': station_data_dict[station_code]['seaRegion'], 'slug': slugify(station_data_dict[station_code]['seaRegion']) }, 'vendMostrimi': float(station_data_dict[station_code]['catchmentArea']), 'dendesiaPopullates': float(station_data_dict[station_code]['populationDensity']), 'lartesia': int(station_data_dict[station_code]['altitude']) } return station
def new(): """Create a new post or page.""" date = datetime.now() page_type = click.prompt('Create new post/page', type=click.Choice(['post', 'page']), default='post') page_attributes['title'] = click.prompt('Title', default='New ' + page_type) page_attributes['date'] = date.strftime(config['internal_date_format']) page_attributes['template'] = config[page_type + '_template'] if page_type == 'post': # i.e. "2014-05-10-post-title.md" file_name = date.strftime(config['post_prefix_format']) \ + slugify(page_attributes['title']) + '.md' else: # i.e. "page-title.md" file_name = slugify(page_attributes['title']) + '.md' file_path = os.path.join(config['src_dir'], file_name) if os.path.isfile(file_path): click.echo('A file with the same name already exists.') else: with codecs.open(file_path, 'w', encoding='utf-8') as f: f.write(config['delimiter']) f.write(yaml.dump(page_attributes, default_flow_style=False)) f.write(config['delimiter']) f.write('\n')
def search_page(self, response): trs = response.xpath('.//table/tbody/tr') for tr in trs: tds = [x.extract() for x in tr.xpath('./td/text()')] scheme = tds[3].strip() if 'Sum total' in scheme: continue amount = float(tds[4].replace('.', '').replace(',', '.')) recipient_name = tds[0] if self.NUM_ONLY_RE.match(recipient_name) is not None: recipient_id = u'SI-%s-%s' % (self.YEAR, recipient_name) recipient_name = '' else: recipient_id = u'SI-%s-%s' % (slugify(tds[1]), slugify(recipient_name)) recipient_location = u'%s, %s' % (tds[1], tds[2]) yield FarmSubsidyItem( year=self.YEAR, scheme=scheme, amount=amount, recipient_id=recipient_id, recipient_name=recipient_name, recipient_location=recipient_location, country='SI', currency='EUR', )
def prepare_folder(list): global type type = list['extractor_key'] if "www.youtube.com/user/" in sys.argv[1]: type = "user" global title global title_html if type == "YoutubePlaylist": title = slugify.slugify(list['title']) title_html = list['title'] else: title = slugify.slugify(list.get('entries')[0].get('uploader')) title_html = list.get('entries')[0].get('uploader') global scraper_dir scraper_dir = script_dirname + "build/" + title + "/" if not os.path.exists(scraper_dir): os.makedirs(scraper_dir) if not os.path.exists(scraper_dir+"CSS/"): shutil.copytree("templates/CSS/", scraper_dir+"CSS/") if not os.path.exists(scraper_dir+"JS/"): shutil.copytree("templates/JS/", scraper_dir+"JS/") get_user_pictures(list.get('entries')[0].get('uploader_id')) global color color = colorz(scraper_dir+"CSS/img/header.png", 1)[0]; global background_color background_color = solarize_color(color);
def create_new(): app.logger.debug("Form keys: " + ", ".join(request.form.keys())) if request.method == "GET": return render_template("new.html", user=slugify(request.args.get("user", "")), key=slugify(request.args.get("key", "")), code=request.args.get("code", "")) else: user = slugify(request.form.get("user", "Anonymous")) try: key = slugify(request.form.get("key", modelo.gen_key(user))) except KeyError: app.logger.error("Too many retries to generate a key") abort(500) code = request.form.get("code", "").strip() if (code is None or len(code) == 0): flash("No code to submit?") return redirect(url_for("create_new", user=user, key=key)) elif modelo.is_used_key(user, key): flash("Select another key, that one has already been taken!") return redirect(url_for("create_new", user=user, key=key, code=code)) else: modelo.add_pasta(user, key, code) return redirect(url_for("get_pasta", user=user, key=key))
def _add_family_info(self, project_id, family_id, individuals): """ Add all the background info about this family We try to keep this as simple as possible - just IDs After this is run, variants are ready to be loaded """ if self.family_exists(project_id, family_id): #raise Exception("Family (%s, %s) already exists" % (project_id, family_id)) return for indiv_id in individuals: if not self.individual_exists(project_id, indiv_id): self.add_individual(project_id, indiv_id) family_coll_name = "family_%s_%s" % (slugify.slugify(project_id, separator='_'), slugify.slugify(family_id, separator='_')) family = { 'project_id': project_id, 'family_id': family_id, 'individuals': individuals, 'coll_name': family_coll_name, 'status': 'loading' } family_collection = self._db[family_coll_name] self._index_family_collection(family_collection) self._db.families.save(family, safe=True)
def create_url(string): """ Implement 3 alphadigit using blake2b(digest_size=3).hexdigest() in order to avoid duplicated articles per user """ return slugify(string, max_length=100)
def tsuru_label(self): return slugify("{}-{}".format(self.name, self.environment()))
def transform_for_postajob(job): """ Cleans a job coming from My.jobs post-a-job. This should add any required fields, and re-format any fields that are not coming in in the required format. inputs: :job: A dictionary with the following fields: postajob.job.id (id), city, company.id (company), country, country_short, date_new, date_updated, description, guid, link, on_sites, state, state_short, reqid, title, uid, and zipcode. outputs: A solr-ready job as a dictionary """ try: company = Company.objects.get(id=job['company']) except Company.DoesNotExist: return None job['date_new'] = _clean_time(job['date_new']) job['date_updated'] = _clean_time(job['date_updated']) solr_job = {'is_posted': True} on_sites = job.get('on_sites', '0') if not on_sites: solr_job['on_sites'] = '' else: solr_job['on_sites'] = [str(x) for x in on_sites.split(',')] solr_job['id'] = 'postajob.job.%s' % job['guid'] # This has to be seo.joblisting, otherwise the jobs won't be included # in the search results. solr_job['django_ct'] = 'seo.joblisting' solr_job['django_id'] = 0 solr_job['city_slug'] = slugify(job['city']) solr_job['country_short'] = job['country_short'].upper() solr_job['country_short_exact'] = job['country_short'].upper() solr_job['date_updated_exact'] = job['date_updated'] solr_job['job_source_name'] = 'Post-a-Job' solr_job['date_updated'] = job['date_updated'] solr_job['salted_date'] = DEJobFeed.date_salt(job['date_new']) solr_job['reqid'] = job['reqid'] solr_job['company_digital_strategies_customer'] = company.digital_strategies_customer solr_job['guid'] = job['guid'] solr_job['uid'] = job['id'] solr_job['company_member'] = company.member solr_job['city'] = job['city'] solr_job['date_new'] = job['date_new'] solr_job['country_exact'] = job['country'] solr_job['country_slug'] = slugify(job['country']) solr_job['company_ac'] = company.name solr_job['html_description'] = DEJobFeed.markdown_to_html(job['description']) solr_job['state'] = job['state'] solr_job['country_ac'] = job['country'] solr_job['city_ac'] = job['city'] solr_job['state_short_exact'] = job['state_short'] solr_job['title_ac'] = job['title'] solr_job['company_canonical_microsite'] = company.canonical_microsite solr_job['description'] = job['description'] solr_job['state_ac'] = job['state'] solr_job['company'] = company.name solr_job['state_short'] = job['state_short'] solr_job['title_exact'] = job['title'] solr_job['link'] = job.get('link', '') solr_job['apply_info'] = job.get('apply_info', '') solr_job['company_enhanced'] = company.enhanced solr_job['state_slug'] = slugify(job['state']) solr_job['city_exact'] = job['city'] solr_job['title_slug'] = slugify(job['title']) solr_job['state_exact'] = job['state'] solr_job['zipcode'] = job['zipcode'] solr_job['title'] = job['title'] solr_job['date_new_exact'] = job['date_new'] solr_job['country'] = job['country'] solr_job['company_exact'] = company.name solr_job['company_canonical_microsite_exact'] = company.canonical_microsite solr_job['date_added'] = solr_job['date_added_exact'] = datetime.datetime.now() # Requires city, state_short, state, and country_short to be filled # in on solr_job to work. solr_job['location'] = DEJobFeed.location(solr_job) # Requires city, state, location, and country to be filled in on # solr_jobs. solr_job['full_loc'] = DEJobFeed.full_loc(solr_job) solr_job['full_loc_exact'] = solr_job['full_loc'] solr_job['company_slab'] = DEJobFeed.co_slab(company.name) # Requires solr_job['country_short'], solr_job['state'], and # solr_job['city'] to already be filled in. solr_job['city_slab'] = DEJobFeed.city_slab(solr_job) # Requires solr_job['country_short'] and solr_job['state'] to already be # filled in. solr_job['state_slab'] = DEJobFeed.state_slab(solr_job) # Requires solr_job['country_short'] to already be filled in. solr_job['country_slab'] = DEJobFeed.country_slab(solr_job) # Requires solr_job['title'] to already be filled in. solr_job['title_slab'] = DEJobFeed.title_slab(solr_job) solr_job['location_exact'] = solr_job['location'] solr_job['state_slab_exact'] = solr_job['state_slab'] solr_job['company_slab_exact'] = solr_job['company_slab'] solr_job['country_slab_exact'] = solr_job['country_slab'] solr_job['city_slab_exact'] = solr_job['city_slab'] solr_job['title_slab_exact'] = solr_job['title_slab'] solr_job['all_locations'] = [job['zipcode'], job['city'], job['state'], job['state_short'], "%s, %s" % (job['city'], job['state']), job['country']] solr_job['text'] = " ".join([force_text((job.get(k)) or "None") for k in text_fields]) return solr_job
logger.error("Unable to parse string %s as a date", date_new) raise # Determine what sites these jobs should be on on_sites = set(business_unit.site_packages.values_list('pk', flat=True)) on_sites = filter(None, on_sites) job['on_sites'] = on_sites or [0] # This has to be seo.joblisting, otherwise the jobs won't be included # in the search results. job['id'] = 'seo.joblisting.%s' % guid.replace('-', '') job['django_ct'] = 'seo.joblisting' job['django_id'] = 0 job['city_slug'] = slugify(city) job['country_short'] = country_short.upper() job['country_short_exact'] = country_short.upper() job['date_updated_exact'] = job['date_updated'] job['job_source_name'] = business_unit.title job['salted_date'] = DEJobFeed.date_salt(job['date_new']) job['buid'] = business_unit.id job['reqid'] = reqid job['company_digital_strategies_customer'] = company.digital_strategies_customer job['guid'] = guid.replace('-', '') job['uid'] = "" job['company_member'] = company.member job['city'] = city job['country'] = country job['country_exact'] = country job['country_slug'] = slugify(country)
def _send_rule(self): try: self._cfg.setSettings("promptDialog/geometry", self.saveGeometry()) self._rule = ui_pb2.Rule(name="user.choice") self._rule.enabled = True self._rule.action = Config.ACTION_DENY if self._default_action == self.ACTION_IDX_DENY else Config.ACTION_ALLOW self._rule.duration = self._get_duration( self.durationCombo.currentIndex()) what_idx = self.whatCombo.currentIndex() self._rule.operator.type, self._rule.operator.operand, self._rule.operator.data = self._get_combo_operator( self.whatCombo, what_idx) if self._rule.operator.data == "": print("Invalid rule, discarding: ", self._rule) self._rule = None return rule_temp_name = self._get_rule_name(self._rule) self._rule.name = rule_temp_name # TODO: move to a method data = [] if self.checkDstIP.isChecked( ) and self.whatCombo.itemData(what_idx) != self.FIELD_DST_IP: _type, _operand, _data = self._get_combo_operator( self.whatIPCombo, self.whatIPCombo.currentIndex()) data.append({ "type": _type, "operand": _operand, "data": _data }) rule_temp_name = slugify("%s %s" % (rule_temp_name, _data)) if self.checkDstPort.isChecked( ) and self.whatCombo.itemData(what_idx) != self.FIELD_DST_PORT: data.append({ "type": Config.RULE_TYPE_SIMPLE, "operand": "dest.port", "data": str(self._con.dst_port) }) rule_temp_name = slugify( "%s %s" % (rule_temp_name, str(self._con.dst_port))) if self.checkUserID.isChecked( ) and self.whatCombo.itemData(what_idx) != self.FIELD_USER_ID: data.append({ "type": Config.RULE_TYPE_SIMPLE, "operand": "user.id", "data": str(self._con.user_id) }) rule_temp_name = slugify( "%s %s" % (rule_temp_name, str(self._con.user_id))) if self._is_list_rule(): data.append({ "type": self._rule.operator.type, "operand": self._rule.operator.operand, "data": self._rule.operator.data }) self._rule.operator.data = json.dumps(data) self._rule.operator.type = Config.RULE_TYPE_LIST self._rule.operator.operand = Config.RULE_TYPE_LIST self._rule.name = rule_temp_name self.hide() if self._ischeckAdvanceded: self.checkAdvanced.toggle() self._ischeckAdvanceded = False except Exception as e: print("[pop-up] exception creating a rule:", e) finally: # signal that the user took a decision and # a new rule is available self._done.set() self.hide()
def add_page(self, contents, base_page, **options): """ Adds the report info to a new DocumentPage Excpect json in the format: [ { "title": "February 2015 OIG Report", "name": "Review of outstanding recommendations", "url": "http://www.fec.gov/fecig/documents/ReviewofOutstandingRecommendationsasofFebruary2015-FinalReport.pdf", "date": "02/01/2015", "category": "oig report" }, ] """ for item in contents: item_year = parser.parse(item['date']).year title = item['title'] slug = slugify(str(item_year) + '-' + title)[:225] url_path = options['parent_path'] + slug + '/' dt_unaware = parser.parse(item['date']) # Make datetime timezone aware to get rid of warnings publish_date = timezone.make_aware(dt_unaware, timezone.get_current_timezone()) size = item['size'] if 'size' in item else None category = self.validate_category( item.get('category', DEFAULT_CATEGORY), DEFAULT_CATEGORY, document_categories, **options) document_page = DocumentPage( depth=4, numchild=0, title=title, file_url=item['url'], file_name=item['name'], size=size, category=category, live=1, has_unpublished_changes='0', url_path=url_path, seo_title=title, show_in_menus=0, search_description=title, expired=0, owner_id=1, locked=0, latest_revision_created_at=publish_date, first_published_at=publish_date) try: base_page.add_child(instance=document_page) saved_page = DocumentPage.objects.get(id=document_page.id) saved_page.first_published_at = publish_date saved_page.created_at = publish_date saved_page.date = publish_date saved_page.save() if options['verbosity'] > 1: self.stdout.write( self.style.SUCCESS('Successfully added {0}.'.format( saved_page.id))) except: self.stdout.write( self.style.WARNING('Could not save page {0}'.format( document_page.title)))
def validate_slug(self, key, value) -> str: assert slugify(value) == value, "Incorrect slug for domain!" return value
def save(self, *args, **kargs): self.slug = slugify(self.title) super(ArticlePost, self).save(*args, **kargs)
def sync_cuisines(output_folder: str = "_cuisines"): typer.secho("sync-cuisines", fg="yellow") aliases = load_aliases() cuisine_aliases = aliases["cuisines"] data = [] places = Path("_places").glob("*.md") for place in places: post = frontmatter.loads(place.read_text()) cuisines = post["cuisines"] if cuisines and len(cuisines): data += cuisines output_folder = Path(output_folder) if not output_folder.exists(): output_folder.mkdir() for cuisine in CUISINE_INITIAL: cuisine_slug = slugify(cuisine, stopwords=STOPWORDS) input_file = output_folder.joinpath(f"{cuisine_slug}.md") if input_file.exists(): post = frontmatter.load(input_file) else: post = frontmatter.loads("") post["active"] = True post[ "description"] = f"{cuisine} restaurants offering curbside, takeout, and delivery food in Lawrence, Kansas" post["name"] = cuisine post["sitemap"] = True post["slug"] = cuisine_slug if cuisine.endswith("s"): post["title"] = f"{cuisine} in Lawrence, Kansas" else: post["title"] = f"{cuisine} Restaurants in Lawrence, Kansas" try: aliases = [ alias["aliases"] for alias in cuisine_aliases if cuisine_slug == alias["name"].lower() ][0] redirect_from = [ f"/cuisines/{slugify(alias, stopwords=STOPWORDS)}/" for alias in aliases ] post["aliases"] = aliases post["redirect_from"] = redirect_from except IndexError: pass typer.echo(dict(Cuisine.validate(post.metadata))) input_file.write_text(frontmatter.dumps(post)) data = set(data) alias_data = [] aliases = [alias["aliases"] for alias in cuisine_aliases] for alias in aliases: alias_data += alias for cuisine in data: cuisine_slug = slugify(cuisine, stopwords=STOPWORDS) if cuisine.lower() not in alias_data: input_file = output_folder.joinpath(f"{cuisine_slug}.md") if input_file.exists(): post = frontmatter.loads(input_file.read_text()) obj = Cuisine.validate(post.metadata) else: post = frontmatter.loads("") post["name"] = cuisine post["slug"] = cuisine_slug obj = Cuisine.validate(post.metadata) typer.echo(dict(obj)) post.metadata.update(dict(obj)) input_file.write_text(frontmatter.dumps(post))
def slug(self): """Return a slug for the instance.""" return slugify(self.name, max_length=25)
def f( image, internal_port, host_port, env=None, volumes=None, command=None, checker_callable=None, skip_exception=None, timeout=None, ): if docker_pull: print(f'Pulling {image} image') docker.pull(image) # Use in devcontainer to allow volumes access if getenv('LOCAL_WORKSPACE_FOLDER') is not None: volumes = [ vol.replace( '/workspaces/toucan-connectors/tests/.', f'{getenv("LOCAL_WORKSPACE_FOLDER")}/tests', ) for vol in volumes ] host_config = docker.create_host_config( port_bindings={internal_port: host_port}, binds=volumes) if volumes is not None: volumes = [vol.split(':')[1] for vol in volumes] container_name = '-'.join(['toucan', slugify(image), 'server']) print(f'Creating {container_name} on port {host_port}') container = docker.create_container( image=image, name=container_name, ports=[internal_port], detach=True, environment=env, volumes=volumes, command=command, host_config=host_config, ) print(f'Starting {container_name}') docker.start(container=container['Id']) def fin(): print(f'Stopping {container_name}') docker.kill(container=container['Id']) print(f'Killing {container_name}') with suppress(Exception): docker.remove_container(container['Id'], v=True) request.addfinalizer(fin) container['port'] = host_port if checker_callable is not None: wait_for_container(checker_callable, host_port, image, skip_exception, timeout) return container
def sync_places( output_folder: str = "_places", sheet_app_id: str = typer.Argument(default="", envvar="LFK_GOOGLE_SHEET_APP_ID"), sheet_name: str = typer.Argument(default="", envvar="LFK_SHEET_NAME"), ): typer.secho("sync-places", fg="yellow") output_folder = Path(output_folder) if not output_folder.exists(): output_folder.mkdir() cuisine_aliases = aliases_to_cuisine() aliases = load_aliases() try: unknown_cuisines = aliases["unknown-cuisines"][0]["aliases"] except: unknown_cuisines = None try: sa = SpreadsheetApp(from_env=True) except AttributeError: print_expected_env_variables() raise typer.Exit() try: spreadsheet = sa.open_by_id(sheet_app_id) except Exception: typer.echo( f"We can't find that 'sheet_app_id'. Please double check that 'LFK_GOOGLE_SHEET_APP_ID' is set. (Currently set to: '{sheet_app_id}')" ) raise typer.Exit() try: sheet = spreadsheet.get_sheet_by_name(sheet_name) except Exception: typer.echo( f"We can't find that 'sheet_name' aka the tab. Please double check that 'LFK_SHEET_NAME' is set. (Currently set to: '{sheet_name}')" ) raise typer.Exit() # returns the sheet range that contains data values. data_range = sheet.get_data_range() table = Table(data_range, backgrounds=True) for item in table: name = item.get_field_value("name") address = item.get_field_value("address") neighborhood = item.get_field_value("neighborhood") slug = slugify(" ".join([name, neighborhood or address]), stopwords=STOPWORDS) filename = f"{slug}.md" input_file = output_folder.joinpath(filename) if input_file.exists(): post = frontmatter.load(input_file) else: post = frontmatter.loads("") place = {} place["sitemap"] = False place["slug"] = slug # Our goal is to build a Place record without having to deal with # annoying errors if a field doesn't exist. We will still let you # know which field wasn't there though. if SHEETS_BOOL_FIELDS: for var in SHEETS_BOOL_FIELDS: try: place[var] = string_to_boolean(item.get_field_value(var)) except ValueError: typer.echo( f"A column named '{var}' was expected, but not found.") if SHEETS_STRING_FIELDS: for var in SHEETS_STRING_FIELDS: try: place[var] = item.get_field_value(var) except ValueError: typer.echo( f"A column named '{var}' was expected, but not found.") if SHEETS_URL_FIELDS: for var in SHEETS_URL_FIELDS: try: place[var] = verify_http(item.get_field_value(var)) except ValueError: typer.echo( f"A column named '{var}' was expected, but not found.") food_urls = [] if "cuisine" in place and len(place["cuisine"]): place["cuisines"] = [ cuisine.strip() for cuisine in place["cuisine"].split(",") ] if unknown_cuisines: place["cuisines"] = [ cuisine for cuisine in place["cuisines"] if slugify( cuisine, stopwords=STOPWORDS) not in unknown_cuisines ] else: place["cuisines"] = None if place["cuisines"] and len(place["cuisines"]): place["cuisine_slugs"] = [] for cuisine in place["cuisines"]: cuisine_slug = slugify(cuisine, stopwords=STOPWORDS) place["cuisine_slugs"].append(cuisine_slug) if (cuisine_slug in cuisine_aliases and cuisine_aliases[cuisine_slug] not in place["cuisine_slugs"]): place["cuisine_slugs"].append( cuisine_aliases[cuisine_slug]) else: place["cuisine_slugs"] = None if "neighborhood" in place and len(place["neighborhood"]): place["neighborhood_slug"] = slugify(place["neighborhood"], stopwords=STOPWORDS) if "delivery_service_websites" in place and len( place["delivery_service_websites"]): food_urls.append({ "name": "order online", "url": place["delivery_service_websites"] }) if FOOD_SERVICE_URLS: for var in FOOD_SERVICE_URLS: try: value = verify_http(item.get_field_value(var)) if len(value): food_urls.append({ "name": FOOD_SERVICE_DICT.get(var), "url": value }) except ValueError: typer.echo( f"A column named '{var}' was expected, but not found.") place["food_urls"] = [ food_url for food_url in food_urls if food_url ] post.content = item.get_field_value("notes") post.metadata.update(place) typer.echo(dict(Place.validate(post.metadata))) input_file.write_text(frontmatter.dumps(post))
def http_get(self, url, zone=None, params=None, *args, **kwargs): """ Get the events of one type :param start_time: :param span: time span in seconds :param event_type: :param tz_name: :return: """ # logger.debug(f"Run fetch task {str(self)}") from time import sleep delay = 5 last_exception = None for i in range(5): # 5 retries on errors try: url = self.process_url(url, params) logger.debug(url) r = requests.get(url, headers=self.request_headers(zone), *args, **kwargs) r.raise_for_status() return r except HTTPError as e: logger.error('{} Failed. Retry in {} seconds: {}'.format( str(self), delay, e)) err = { 'request_url': e.request.url, 'request_headers': dict(e.request.headers), 'response_headers': dict(e.response.headers), 'response_body': e.response.text } fn = slugify(url) p = Path(self.config.cache_errors).joinpath(fn) if not p.parent.exists(): p.parent.mkdir(parents=True, exist_ok=True) with p.open('w') as f: json.dump(err, f, default=json_serial, indent=4) delay *= 2 # Delay backoff delay = delay if delay <= 60 else 60 sleep(delay) last_exception = e except Exception as e: logger.error( f"Error '{type(e)}: {e}' for {self.access_object}") last_exception = e if last_exception: logger.error(f"{last_exception} Giving up.") raise last_exception
"Japanese", "Korean", "Latin American", "Mexican", "Middle-Eastern", "Pizza", "Sandwiches/Subs", "Seafood", "Spanish", "Steakhouse", "Sushi", "Thai", ] CUISINE_INITIAL_SLUGS = [ slugify(cuisine, stopwords=STOPWORDS) for cuisine in CUISINE_INITIAL ] # Don't customize these EXPECTED_ENV_VARS = [ "LFK_GOOGLE_SHEET_APP_ID", "SHEETFU_CONFIG_AUTH_PROVIDER_URL", "SHEETFU_CONFIG_AUTH_URI", "SHEETFU_CONFIG_CLIENT_CERT_URL", "SHEETFU_CONFIG_CLIENT_EMAIL", "SHEETFU_CONFIG_CLIENT_ID", "SHEETFU_CONFIG_PRIVATE_KEY", "SHEETFU_CONFIG_PRIVATE_KEY_ID", "SHEETFU_CONFIG_PROJECT_ID", "SHEETFU_CONFIG_TOKEN_URI", "SHEETFU_CONFIG_TYPE",
def get_mail_from_id(self, id): fullname = self.get_member(id=id)["fullName"] return ".".join(slugify(el.lower()) for el in fullname.split(" ")) + "@nsigma.fr"
"shandarogers_", "roanmclean" ] userVideoArrayList["fitness"] = [ "naomiboyer", "alexgeorgy", "bbycailey", "hope.cee", "jessthrowiton", "kaylaseah", "elizabethkando", "inthesejeans", "mscrisssy", "thais.talo" ] bumpEmail = userEmails.get("maternitywear") or None result = rn.UpdateRelations(bumpEmail, "demo", monthFlags, 0.51, "bump-style", 1) # 图片帖子 userFlagMap = {} for key, userArray in userImageArrayList.items(): flag = slugify(key) for user in userArray: userFlagMap[user] = flag userLimit = 19 userCount = 0 users = userFlagMap.keys() print len(users) random.shuffle(users) for user in users: flag = userFlagMap.get(user) or None if not flag: continue email = userEmails.get(user) or None
def slug(self): return slugify(self.name)
def save(self, *args, **kwargs): self.slug = slugify(self.title) super(Image, self).save(*args, **kwargs)
def load_persons(file_name): persons, parties = {}, {} with open(file_name, 'rb') as fh: data = json.load(fh) for d in data.get('results'): out = {'memberships': [], 'identifiers': [], 'links': []} p = d['partei'] if p is not None and p != 'fraktionslos': pid = 'popit.bundestag/party/%s' % slugify(p) if pid not in parties: parties[pid] = { 'id': pid, 'classification': 'party', 'name': p } out['party_id'] = pid out['memberships'].append({ 'organization_id': pid, 'role': 'member' }) if d['slug'] is None: continue pid = 'popit.bundestag/person/%s' % d.pop('slug') out['id'] = pid out['identifiers'].append({ 'identifier': d.pop('fingerprint'), 'scheme': 'offenesparlament.de' }) name = '%s %s %s %s' % (d['titel'] or '', d['vorname'], d['adelstitel'] or '', d['nachname']) name = re.sub(r'\s+', ' ', name).strip() out['name'] = name out['family_name'] = d.pop('nachname') out['first_name'] = d.pop('vorname') out['birth_date'] = d.pop('geburtsdatum') out['state'] = d.pop('land') ges = d.pop('geschlecht') or '' fem_b = 'eiblich' in ges out['gender'] = 'female' if fem_b else 'male' out['biography'] = d.pop('bio') out['links'].append({ 'url': d.pop('bio_url'), 'note': 'Official profile' }) out['links'].append({ 'url': d.pop('source_url'), 'note': 'XML profile data' }) f = d.pop('foto_url') if f is not None: out['image'] = f f = d.pop('mdb_id') if f is not None: out['identifiers'].append({ 'identifier': f, 'scheme': 'bundestag.de' }) #print d.keys() #print d['bio_url'] print pid persons[pid] = out with open('parties.json', 'wb') as fh: json.dump(parties.values(), fh, indent=2) with open('people.json', 'wb') as fh: json.dump(persons.values(), fh, indent=2) return persons, parties
def get_modules_marked_with_tag(self, tag_key, tag_value): tags_distribution_data = self._get_course_module_data() available_tags = self.get_available_tags() intermediate = OrderedDict() def _get_tags_info(av_tags, tags): """ Helper function to return information about all tags connected with the current item. """ data = {} for av_tag_key in av_tags: if av_tag_key in tags and tags[av_tag_key]: data[av_tag_key] = u', '.join(tags[av_tag_key]) else: data[av_tag_key] = None return data for item in tags_distribution_data.values(): if tag_key in item['tags']: for item_tag_val in item['tags'][tag_key]: if tag_value == slugify(item_tag_val): val = { 'id': item['id'], 'name': item['id'], 'total_submissions': item['total_submissions'], 'correct_submissions': item['correct_submissions'], 'incorrect_submissions': item['incorrect_submissions'], 'correct_percent': utils.math.calculate_percent( item['correct_submissions'], item['total_submissions']), 'incorrect_percent': utils.math.calculate_percent( item['incorrect_submissions'], item['total_submissions']), 'url': reverse( 'courses:performance:learning_outcomes_answers_distribution', kwargs={ 'course_id': self.course_id, 'tag_value': tag_value, 'problem_id': item['id'] }) } if available_tags: val.update( _get_tags_info(available_tags, item['tags'])) intermediate[item['id']] = val result = [] index = 0 course_structure = self._get_course_structure() for key, val in six.iteritems(course_structure): if key in intermediate: first_parent = course_structure[val['parent']] second_parent = course_structure[first_parent['parent']] index += 1 intermediate[key]['index'] = index intermediate[key]['name'] = u', '.join([ second_parent['display_name'], first_parent['display_name'], val['display_name'] ]) result.append(intermediate[key]) return result
keyDict = defaultdict(list) rhythmDict = defaultdict(list) with open(tunebookDir + archiveName, 'r') as infile: for line in infile: if line.startswith(('%', '\\', 'E:')): continue #line = line.rstrip() if line.startswith('X:'): abcIndex = line.split(':')[1] titleFound = False continue if line.startswith('T:') and titleFound == False: titleFound = True title = line.split(':')[1] titleDict[slugify(title)] = title abcDict[slugify(title)] = ' X:' + abcIndex abcDict[slugify(title)] += ' ' + line elif line.startswith('K:'): keyDict[slugify(title)] = line.split(':')[1] abcDict[slugify(title)] += ' ' + line elif line.startswith('R:'): rhythmDict[slugify(title)] = line.split(':')[1] abcDict[slugify(title)] += ' ' + line elif abcIndex: abcDict[slugify(title)] += ' ' + line for tuneName in abcDict: with open(tunesDir + tuneName + '-' + archiveStub + '.md', 'w') as outfile: outfile.write('---\ntitle: ' + titleDict[tuneName])
# read voters i = 0 voters = {} with open('source/voters.tsv', 'r') as f: csvreader = csv.reader(f, delimiter="\t") for row in csvreader: if i == 0: nothing = 0 else: voter = { 'id': row[0].strip(), 'name': row[1].strip(), 'short_name': row[2].strip(), 'code': row[3].strip(), 'friendly_name': slugify(row[2].strip()) } voters[voter['code']] = voter i = i + 1 #print voters #get votes and details (comments) i = 0 details = {} print("mismatching codes:") with open('source/answers.tsv', 'r') as f: csvreader = csv.reader(f, delimiter="\t") for row in csvreader: if i == 0: questions = {} # col: id
"Parking", "Pharmacy", "Restaurant", "School", "Shopping Mall", "Stadium", "Store", "Train Station", "University", "Zoo", ] str = "" # JSON string generation loop with categories for i, pc in enumerate(places_categories): str += '''{{ 'model': 'miot.category', 'pk': {0}, 'fields': {{ 'name': '{1}', 'slug': '{2}', 'emoji_name': null, 'short_description': 'A place.', 'parent': 1 }} }},'''.format(i + place_index + 1, pc, slugify(pc)) # Print the final JSON string print(str.replace("\'", "\""))
def get(self, request): datatype_factory = DataTypeFactory() set_precision = GeoUtils().set_precision resourceid = request.GET.get("resourceid", None) nodeid = request.GET.get("nodeid", None) tileid = request.GET.get("tileid", None) nodegroups = request.GET.get("nodegroups", []) precision = request.GET.get("precision", None) field_name_length = int(request.GET.get("field_name_length", 0)) use_uuid_names = bool(request.GET.get("use_uuid_names", False)) include_primary_name = bool(request.GET.get("include_primary_name", False)) include_geojson_link = bool(request.GET.get("include_geojson_link", False)) use_display_values = bool(request.GET.get("use_display_values", False)) geometry_type = request.GET.get("type", None) indent = request.GET.get("indent", None) limit = request.GET.get("limit", None) page = int(request.GET.get("page", 1)) if limit is not None: limit = int(limit) if indent is not None: indent = int(indent) if isinstance(nodegroups, str): nodegroups = nodegroups.split(",") if hasattr(request.user, "userprofile") is not True: models.UserProfile.objects.create(user=request.user) viewable_nodegroups = request.user.userprofile.viewable_nodegroups nodegroups = [i for i in nodegroups if i in viewable_nodegroups] nodes = models.Node.objects.filter(datatype="geojson-feature-collection", nodegroup_id__in=viewable_nodegroups) if nodeid is not None: nodes = nodes.filter(nodeid=nodeid) nodes = nodes.order_by("sortorder") features = [] i = 1 property_tiles = models.TileModel.objects.filter(nodegroup_id__in=nodegroups) property_node_map = {} property_nodes = models.Node.objects.filter(nodegroup_id__in=nodegroups).order_by("sortorder") for node in property_nodes: property_node_map[str(node.nodeid)] = {"node": node} if node.fieldname is None or node.fieldname == "": property_node_map[str(node.nodeid)]["name"] = slugify(node.name, max_length=field_name_length, separator="_") else: property_node_map[str(node.nodeid)]["name"] = node.fieldname tiles = models.TileModel.objects.filter(nodegroup__in=[node.nodegroup for node in nodes]) last_page = None if resourceid is not None: tiles = tiles.filter(resourceinstance_id__in=resourceid.split(",")) if tileid is not None: tiles = tiles.filter(tileid=tileid) tiles = tiles.order_by("sortorder") if limit is not None: start = (page - 1) * limit end = start + limit tile_count = tiles.count() last_page = tiles.count() < end tiles = tiles[start:end] for tile in tiles: data = tile.data for node in nodes: try: for feature_index, feature in enumerate(data[str(node.pk)]["features"]): if geometry_type is None or geometry_type == feature["geometry"]["type"]: if len(nodegroups) > 0: for pt in property_tiles.filter(resourceinstance_id=tile.resourceinstance_id).order_by("sortorder"): for key in pt.data: field_name = key if use_uuid_names else property_node_map[key]["name"] if pt.data[key] is not None: if use_display_values: property_node = property_node_map[key]["node"] datatype = datatype_factory.get_instance(property_node.datatype) value = datatype.get_display_value(pt, property_node) else: value = pt.data[key] try: feature["properties"][field_name].append(value) except KeyError: feature["properties"][field_name] = value except AttributeError: feature["properties"][field_name] = [feature["properties"][field_name], value] if include_primary_name: feature["properties"]["primary_name"] = self.get_name(tile.resourceinstance) feature["properties"]["resourceinstanceid"] = tile.resourceinstance_id feature["properties"]["tileid"] = tile.pk if nodeid is None: feature["properties"]["nodeid"] = node.pk if include_geojson_link: feature["properties"]["geojson"] = "%s?tileid=%s&nodeid=%s" % (reverse("geojson"), tile.pk, node.pk) feature["id"] = i if precision is not None: coordinates = set_precision(feature["geometry"]["coordinates"], precision) feature["geometry"]["coordinates"] = coordinates i += 1 features.append(feature) except KeyError: pass except TypeError as e: print(e) print(tile.data) feature_collection = {"type": "FeatureCollection", "features": features} if last_page is not None: feature_collection["_page"] = page feature_collection["_lastPage"] = last_page response = JSONResponse(feature_collection, indent=indent) return response
def generate_slug(title, published_at): return slugify(title)
def clean_slug(self): if not self.cleaned_data.get('slug', None): if self.instance is None or not ALLOW_SLUG_CHANGE: self.cleaned_data['slug'] = slugify( SLUG_TRANSLITERATOR(self.cleaned_data['name'])) return self.cleaned_data['slug'][:50]
def _get_slugline(f): f['slugline'] = slugify(f['headline']) return None
def write_album(album): tracks = spotify.album_tracks(album['id']) text_file = u'{0}.txt'.format(slugify(album['name'], ok='-_()[]{}')) print(u'Feeding {0} tracks to {1}'.format(tracks['total'], text_file)) write_tracks(text_file, tracks)
}) vendor_code_key.update({'available': '1'}) vendor_code_key.update({'stock': '1'}) a += 1 # 5 Если ключа нет, формируем новый файл для импорта с новыми товарами except KeyError: if str(raw_product.iloc[c, 2]) == "В наявності": id_t += 1 id_product = str(id_t) vendor = "Ricoh" category = "CATEGORY" type_product = "TYPE_PRODUCT" name = str(raw_product.iloc[c, 1]) name = '"' + name.replace(',', '').replace('"', '') + '"' vendor_code = str(raw_product.iloc[c, 0]) slug = slugify(name + '-' + vendor_code) price = str(raw_product.iloc[c, 4]) price = round(float(price.replace(",", ".")) * rates, 2) price = str(price) provider = prov available, stock = "1", "1" product_file_not_in_db.writelines(id_product + ',' + category + ',' + name + ',' + slug + ',' + provider + ',' + vendor_code + ',' + vendor + ',' + type_product + ',' + price + ',' + stock + ',' + available + '\n') b += 1 c += 1 # 6 Формируем обновленный файл для экспорта baden-new.json
def _update_slugline(s, f): if 'headline' in f.keys(): f['slugline'] = slugify(f['headline']) return None