def upload_to(instance, filename): filename_base, filename_ext = os.path.splitext(filename) return "tracks/%s--%s%s" % ( slugify(instance.artist), slugify(instance.name), filename_ext.lower(), )
def fetch_contact_field_results(org, contact_field, segment): from ureport.polls.models import CACHE_ORG_FIELD_DATA_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME from ureport.polls.models import UREPORT_RUN_FETCHED_DATA_CACHE_TIME start = time.time() print "Fetching %s for %s with segment %s" % (contact_field, org.name, segment) cache_time = UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME if segment and segment.get('location', "") == "District": cache_time = UREPORT_RUN_FETCHED_DATA_CACHE_TIME try: segment = substitute_segment(org, segment) this_time = datetime.now() temba_client = org.get_temba_client() client_results = temba_client.get_results(contact_field=contact_field, segment=segment) results_data = temba_client_flow_results_serializer(client_results) cleaned_results_data = results_data print "Fetch took %ss" % (time.time() - start) key = CACHE_ORG_FIELD_DATA_KEY % (org.pk, slugify(unicode(contact_field)), slugify(unicode(segment))) cache.set(key, {'time': datetime_to_ms(this_time), 'results': cleaned_results_data}, cache_time) except: client.captureException() import traceback traceback.print_exc()
def clean(self): if self.slug in ("", None): try: self.slug = slugify(self.title) except: self.slug = slugify(self.name) else: pass self.validate_slug() if hasattr(self, 'get_absolute_url'): try: path = self.get_absolute_url() except: path = self.slug # when get_absolute_url fails site = self.site or Site.objects.get(pk=1) redirect = Redirect.objects.filter( site=site, old_path=path ) if redirect.exists(): raise ValidationError( _(u"The URL already exists as a redirect") ) try: super(Slugged, self).clean() except AttributeError: pass # does not implement the clean method
def _populate_channels(self, channels): """ Access the rss page and catch all available channels """ Channel = get_model('channels', 'Channel') for parser_chanel in channels: name = parser_chanel.find('h6').text channel, created = Channel.objects.get_or_create( name=name, slug=slugify(name), show_in_menu=True, published=True, user=self._user ) # Catch the sub-channels sub_channels = parser_chanel.findAll( 'a', {'href': re.compile('^(?!(#))')}) for parser_sub_chanel in sub_channels: name = parser_sub_chanel.text sub_channel, created = Channel.objects.get_or_create( name=name, slug=slugify(name), show_in_menu=True, published=True, user=self._user, parent=channel ) self._populate_posts(sub_channel, parser_sub_chanel['href'])
def setUp(self): """ Create default Sections and Posts. """ self.blog = Blog.objects.first() self.apples = Section.objects.create(name="Apples", slug="apples") self.oranges = Section.objects.create(name="Oranges", slug="oranges") self.user = self.make_user("patrick") self.markup = "markdown" # Create two published Posts, one in each section. self.orange_title = "Orange You Wonderful" self.orange_slug = slugify(self.orange_title) self.orange_post = Post.objects.create(blog=self.blog, section=self.oranges, title=self.orange_title, slug=self.orange_slug, author=self.user, markup=self.markup, state=Post.STATE_CHOICES[-1][0]) self.apple_title = "Apple of My Eye" self.apple_slug = slugify(self.apple_title) self.apple_post = Post.objects.create(blog=self.blog, section=self.apples, title=self.apple_title, slug=self.apple_slug, author=self.user, markup=self.markup, state=Post.STATE_CHOICES[-1][0])
def handle(self, show_index_id, **options): # Find show index page show_index = ShowIndexPage.objects.get(id=show_index_id) sub_expressions_compiled = [(re.compile(sub_expr[0]), sub_expr[1]) for sub_expr in SUB_EXPRESSIONS] for student in show_index.get_students(): # Work out a URL to redirect from first_name = student.first_name last_name = student.last_name for sub_expr in sub_expressions_compiled: first_name = sub_expr[0].sub(sub_expr[1], first_name) last_name = sub_expr[0].sub(sub_expr[1], last_name) from_url = slugify(first_name) + '-' + slugify(last_name) # Find students url inside if show_index.is_programme_page: to_url = show_index.reverse_subpage('student', programme=student.programme, slug=student.slug) else: to_url = show_index.reverse_subpage('student', school=student.school, programme=student.programme, slug=student.slug) # Normalise the URL from_url_normalised = Redirect.normalise_path(from_url) # Create the redirect redirect, created = Redirect.objects.get_or_create(old_path=from_url_normalised, defaults={'redirect_link': to_url}) # Print message if created: print "Created redirect: " + from_url_normalised + " to: " + to_url + " for student: " + student.title + " (" + str(student.id) + ")"
def get_job_links(job, max_sites=3): if hasattr(job, 'is_approved') and not job.is_approved: return '' locations = job.locations.all() sites = job.on_sites() domains = [site.domain for site in sites] title_slug = bleach.clean(slugify(job.title)) base_url = 'http://{domain}/{loc_slug}/{title_slug}/{guid}/job/' href_tag = '<a href="{url}">{domain}</a>' urls = [] for domain in domains: for location in locations: loc_slug = bleach.clean(slugify(u'{city}, {state}'.format( city=location.city, state=location.state_short))) job_url = base_url.format(domain=domain, loc_slug=loc_slug, title_slug=title_slug, guid=location.guid) urls.append(href_tag.format(url=job_url, domain=domain)) url_html = mark_safe("<br/>".join(urls[:max_sites])) if max_sites and len(urls) > max_sites: url_html = mark_safe("%s <br/>..." % url_html) return url_html
def save(self, *args, **kwargs): if not self.slug or not self.category_slug: self.slug = slugify(self.title) self.category_slug = slugify(self.categories) super().save(*args, **kwargs) else: super().save(*args, **kwargs)
def duplikat_li(request, slug, keg, kode, li): if slug is None or kode is None: pass try: instruksi = get_object_or_404(LembarInstruksi, pk=li) except Http404: messages.warning(request, 'Penugasan tidak ditemukan') return redirect('halaman_tugas_anggota', pk=keg) try: instruksi.pk = None if cek_keanggotaan(request.user, keg): if request.user.username == instruksi.pemberi.username: instruksi.save() messages.warning(request, 'Lembar instruksi berhasil diduplikat') return redirect('halaman_li_anggota_rinci', slug=slugify(instruksi.nomor, allow_unicode=True), pk=li, keg=keg) else: messages.warning(request, 'Hanya pemberi tugas yang mendapatkan hak akses!') return redirect('halaman_tugas_anggota', pk=keg) else: messages.warning(request, 'Maaf, Anda tidak mendapatkan hak akses!') return redirect('halaman_tugas_anggota', pk=keg) except Http404: messages.warning(request, 'Tugas tidak ditemukan') return redirect('halaman_li_anggota_rinci', slug=slugify(instruksi.nomor, allow_unicode=True), pk=li, keg=keg)
def import_rt(apps, schema_editor): file = 'flood_mapper/data/List of RT DKI Jakarta - updated 2012.csv' RT = apps.get_model("flood_mapper", "RT") RW = apps.get_model("flood_mapper", "RW") Village = apps.get_model("flood_mapper", "Village") with open(file, 'rb') as csvfile: spamreader = csv.reader(csvfile) for row in spamreader: village_name = row[0] rw_number = int(row[1]) rt_number = int(row[2]) village_name_slug = slugify(unicode(village_name)) try: village = Village.objects.get(slug=village_name_slug) except: continue try: rw = RW.objects.get( village=village, name='RW %02.0f' % rw_number) except: continue rt = RT() rt.rw = rw rt.name = 'RT %02.0f' % rt_number rt.slug = slugify(rt.name) rt.save()
def get_test_song_dataset_params(self): """ Pull random row from song list """ fname = join('input', 'top3000-song-list.csv') assert isfile(fname), 'Input file not found: %s' % fname cline = random.choice(open(fname, 'rU').readlines()) citems = [unicode(x.strip().replace('"', '')) for x in cline.split(',')] print len(citems) position, artist, song_name, year = citems[:4] # title, description, contact title = '%s %s (%s)' % (song_name, artist, year) description = '%s by %s in %s' % (song_name, artist, year) datasetContact = '%s@%s.com' % (slugify(song_name), slugify(artist)) upload_file_path = self.get_test_data_file_path('%s.txt' % slugify(title), description) return dict(title=song_name, author=artist, datasetContact=datasetContact, dsDescription=description, upload_file_path=upload_file_path, )
def export_files(self, request, queryset): with ZipFile('StudentSolutions.zip', 'w') as sols_zip: for obj in queryset: prob = slugify(obj.problem) ps = slugify(obj.get_problemset()) user = str(obj.get_user()) for psfile in obj.studentproblemfile_set.filter( attempt_num=obj.attempt_num): head, filename = os.path.split(psfile.submitted_file.name) path = os.path.join( vrfy.settings.MEDIA_ROOT, psfile.submitted_file.name) sols_zip.write( path, "{!s}/{!s}/{!s}/{!s}".format(ps, prob, user, filename),) # probably inefficient to write a file to disk just to read it again # TODO: change to an in-mememory solution with open('StudentSolutions.zip', 'rb') as sols_zip: response = HttpResponse( FileWrapper(sols_zip), content_type='application/zip') response['Content-Disposition'] = 'attachment; filename=StudentSolutions.zip' return response
def create_slug(instance): # LEMBRAR DE TESTAR ESCAPE STRING POR SEGURANCA DO BANCO!!!! # VER SE A DESCRICAO COMECAR COM ' OU " NAO VAI DAR PAU NO BANCO E INJETAR CODIGO MALICIOSO # TESTAR O WHILE LOOP PRA VER SE NAO VAI DAR PAU NESSA PORRA!!! pre_slug = instance.description[:15].split(" ") shuffle(pre_slug) pre_slug = ' '.join(pre_slug) slug = slugify(instance.title[:42]+" " + pre_slug) qs = Product.objects.filter(slug=slug).exists() if qs: pre_slug = instance.description[:15].split(" ") shuffle(pre_slug) rnd = [choice(string.ascii_letters + string.digits) for n in range(7)] shuffle(rnd) rnd = ''.join(rnd) pre_slug = ' '.join(pre_slug)+" "+rnd slug = slugify(instance.title[:30] + " " + pre_slug) while True: qs = Product.objects.filter(slug=slug).exists() if qs: slug += "-" + str(randint(0, 2000000)) else: break return slug
def main(): if not os.path.exists('cltk_json'): os.makedirs('cltk_json') for root, dirs, files in os.walk("."): path = root.split('/') for fname in files: if fname.startswith('chapter') or fname.endswith('txt'): #print((len(path) - 1) * '---', os.path.basename(root)) title = path[-1]+'_'+fname title = title.split('.')[0].title() work = { 'originalTitle': title, 'englishTitle': title, 'author': "Not Available", 'source': source, 'sourceLink': sourceLink, 'language': 'Telugu', 'text': {}, } text = open(os.path.join(root, fname)).read().splitlines() text = [textNode.strip() for textNode in text if len(textNode.strip())] for i, textNode in enumerate(text): work['text'][i] = textNode fname = slugify(work['source']) + '__' + slugify(work['englishTitle']) + '__' + slugify(work['language']) + '.json' fname = fname.replace(" ", "") with open('cltk_json/' + fname, 'w') as f: json.dump(work, f)
def get_candidate_url(self): if self.candidate_id_checked: return "/candidate/%s/%s/" % (slugify(unicode(self.candidate_name_raw())), self.candidate_id_checked) elif self.candidate_id_number: return "/candidate/%s/%s/" % (slugify(unicode(self.candidate_name_raw())), self.candidate_id_number) else: return None
def get_context_data(self, geo_id1, geo_id2): page_context = { 'geo_id1': geo_id1, 'geo_id2': geo_id2, } release = self.request.GET.get('release') version = self.request.GET.get('geo_version', self.default_geo_version) try: level, code = geo_id1.split('-', 1) self.geo = geo_data.get_geography(code, level, version) year = self.request.GET.get( 'release', get_primary_release_year_per_geography(self.geo)) page_context['geo1'] = geo_data.get_geography(code, level) page_context['geo1_slug'] = '-' + slugify(page_context['geo1']) page_context['geo1_release_year'] = str(year) level, code = geo_id2.split('-', 1) page_context['geo2'] = geo_data.get_geography(code, level) page_context['geo2_slug'] = '-' + slugify(page_context['geo2']) page_context['geo2_release_year'] = str(year) # Get Release page_context['geography'] = self.geo.as_dict_deep() page_context['compare_primary_releases'] = get_page_releases_per_country( settings.HURUMAP['primary_dataset_name'], self.geo, year) except (ValueError, LocationNotFound): raise Http404 return page_context
def get_artist_tracks_from_musicbrainz(artist): """ Create Album, Track, and Solo records for artists we find in the MusicBrainzNGS API :param artist: an artist's name as a string to search for :return: Queryset of Solos """ search_results = mb.search_artists(artist) best_result = search_results['artist-list'][0] if 'jazz' not in [d['name'] for d in best_result['tag-list']]: return Solo.objects.none() instrument = Solo.get_instrument_from_musicbrainz_tags(best_result['tag-list']) for album_dict in mb.browse_releases(best_result['id'], includes=['recordings'])['release-list']: album = Album.objects.create(name=album_dict['title'], artist=artist, slug=slugify(album_dict['title'])) for track_dict in album_dict['medium-list'][0]['track-list']: track = Track.objects.create(album=album, name=track_dict['recording']['title'], track_number=track_dict['position'], slug=slugify(track_dict['recording']['title'])) Solo.objects.create(track=track, artist=artist, instrument=instrument, slug=slugify(artist)) return Solo.objects.filter(artist=artist)
def save(self, *args, **kwargs): """Override save""" if (self.slug != slugify(self.name)): self.slug = slugify(self.name) super(Semantic, self).save(*args, **kwargs)
def get_question_initial(self, question, data): """ Get the initial value that we should use in the Form :param Question question: The question :param dict data: Value from a POST request. :rtype: String or None """ initial = None answer = self._get_preexisting_answer(question) if answer: # Initialize the field with values from the database if any if question.type == Question.SELECT_MULTIPLE: initial = [] if answer.body == "[]": pass elif "[" in answer.body and "]" in answer.body: initial = [] unformated_choices = answer.body[1:-1].strip() for unformated_choice in unformated_choices.split(","): choice = unformated_choice.split("'")[1] initial.append(slugify(choice)) else: # Only one element initial.append(slugify(answer.body)) else: initial = answer.body if data: # Initialize the field field from a POST request, if any. # Replace values from the database initial = data.get('question_%d' % question.pk) return initial
def create_slug(self, user, new_slug=None): """Create User's slug If slug unique -- return slug Else create new slug Arguments: user {obj} -- User Keyword Arguments: new_slug {str} -- slug (default: {None}) Returns: [str] -- unique slug """ if user.username not in ['edit']: slug = slugify(user.username) else: slug = slugify('cus_edit') if new_slug is not None: slug = new_slug qs = User.objects.filter(profile__slug=slug).order_by('-pk') exists = qs.exists() if exists: new_slug = '%s-%s' % (slug, qs.first().pk) return self.create_slug(user, new_slug=new_slug) return slug
def archiveurl(context, page, *args): """[DEPRECATED] Returns the URL for the page that has the given slug. Use routablepageurl from wagtail.contrib.routable_page templatetag instead. for example: `{% archiveurl page author %}` should be: `{% routablepageurl page 'author' author %}` """ logger.warning( ('DEPRECATED: cms tag archiveurl is depracated. ' 'Use routablepageurl from wagtail.contrib.routable_page ' 'templatetag instead.')) try: url_name = 'author' a_args = [slugify(args[0].username)] except AttributeError: try: url_name = 'tag' a_args = [slugify(args[0].name)] except AttributeError: url_name = 'date' a_args = args except IndexError: url_name = '' a_args = [] return routablepageurl(context, page.specific, url_name, *a_args)
def add_places(apps, schema_editor): Country = apps.get_model('places', 'Country') Region = apps.get_model('places', 'Region') Area = apps.get_model('places', 'Area') Place = apps.get_model('places', 'Place') Location = apps.get_model('scheduler', 'Location') germany, _ = Country.objects.get_or_create(name='Deutschland', defaults=dict(slug=slugify('Deutschland'))) for location in Location.objects.all(): city = location.city region, _ = Region.objects.get_or_create(name=city, defaults=dict(slug=slugify(city), country=germany)) area, _ = Area.objects.get_or_create(name=city, defaults=dict(slug=slugify(city), region=region)) place, _ = Place.objects.get_or_create(name=city, defaults=dict(slug=slugify(city), area=area)) location.place = place location.save()
def guess_and_update_ldap_usernames(self, dn, model, commit=False): """ Finds pages whose 'ad_username' field is blank attempts to guess the correct value based on the 'first_name' and 'last_name' fields. The guess is checked against LDAP. If a user with that username exists, the value is saved, otherwise the value is discarded leaving 'ad_username' blank. This returns two lists of page ids, the first list contains ids of the pages that a username was successfully guessed for. The second list contains the ids of the pages for which a guess was attempted but the guessed username didn't exist in LDAP. """ updated = [] not_updated = [] for page in model.objects.filter(ad_username=''): ad_username_guess = ('%s.%s' % (slugify(page.first_name), slugify(page.last_name))) if self.check_ldap_username_exists(dn, ad_username_guess): page.ad_username = ad_username_guess if commit: page.save() updated.append(page.id) else: not_updated.append(page.id) return updated, not_updated
def create_first_instances(self): self.genre = Genre.objects.create(name=self.GENRE_NAME) self.artist = Artist.objects.create( name=self.ARTIST_NAME, slug=slugify(self.ARTIST_NAME), lat=self.ARTIST_LATITUDE, lon=self.ARTIST_LONGITUDE, location=self.ARTIST_LOCATION ) self.artist.genres.add(self.genre) self.update = Update.objects.create(artist=self.artist, text=self.ARTIST_UPDATE) self.campaign = Campaign.objects.create( artist=self.artist, amount=self.CAMPAIGN_AMOUNT, reason=self.CAMPAIGN_REASON, fans_percentage=self.CAMPAIGN_FANS_PERCENTAGE, end_datetime=timezone.now() + datetime.timedelta(days=14) ) self.revenue_report = RevenueReport.objects.create( campaign=self.campaign, amount=self.CAMPAIGN_REVENUE_REPORT_AMOUNT ) self.artist_no_campaign = Artist.objects.create( name=self.ARTIST_NO_CAMPAIGN_NAME, slug=slugify(self.ARTIST_NO_CAMPAIGN_NAME), lat=self.ARTIST_LATITUDE, lon=self.ARTIST_LONGITUDE, location=self.ARTIST_LOCATION )
def video_post_save_receiver(sender, instance, created, *args, **kwargs): print "signal sent" if created: #将该视频名称转化成slug格式 slug_title = slugify(instance.title) #将视频名称,视频所属分类的slug,视频id组合成视频的新slug new_slug = "%s %s %s" %(instance.title, instance.category.slug, instance.id) try: #如果slug已经有其他视频占用,则为新生成的视频生成新的slug obj_exists = Video.objects.get(slug=slug_title, category=instance.category) instance.slug = slugify(new_slug) instance.save() print "model exists, new slug generated" except Video.DoesNotExist: #如果slug没有被占用,则用视频名称为当前视频的slug instance.slug = slug_title instance.save() print "slug and model created" except Video.MultipleObjectsReturned: #如果有多个视频占用了名称slug,则为新生成的视频生成新的slug instance.slug = slugify(new_slug) instance.save() print "multiple models exists, new slug generated" except: pass
def __str__(self): parts = self.description_parts or self.parent.description_parts if parts: start = slugify(parts[0]) end = slugify(parts[-1]) same_score = self.starts_at(start) + self.ends_at(end) reverse_score = self.starts_at(end) + self.ends_at(start) if same_score > reverse_score or (reverse_score == 4 and same_score == 4): description = ' - '.join(parts) elif same_score < reverse_score: description = ' - '.join(reversed(parts)) else: description = None if description: if self.parent.via: description += ' via ' + self.parent.via return description if self.parent.service: return getattr(self.parent.service, self.direction + '_description') return self.direction.capitalize()
def save(self, *args, **kwargs): """save blog post to database check for duplicates, and update modified timestamps Args: self(BlogPost) *args: arguments **kwargs: parameters Returns: BlogPost.super() Raises: None """ if not self.post_id: self.created = timezone.now() # check if slug is a duplicate dup = BlogPost.objects.filter(title=self.title) if len(dup) > 0: # objects with the same slug exist -> duplicate! nos = str(len(dup)) # append number of duplicates as modifier self.slug = slugify(self.title[:49 - len(dup)] + '-' + nos) else: self.slug = slugify(self.title[:50]) self.modified = timezone.now() return super(BlogPost, self).save(*args, **kwargs)
def _add_test_methods(mcs, attrs, urlpatterns): # loop through every URL pattern for index, (func, regex, url_name) in enumerate(extract_views_from_urlpatterns(urlpatterns)): if func.__module__.startswith("%s." % attrs["module"]): pass elif func.__module__ == attrs["module"]: pass else: continue if hasattr(func, "__name__"): func_name = func.__name__ elif hasattr(func, "__class__"): func_name = "%s()" % func.__class__.__name__ else: func_name = re.sub(r" at 0x[0-9a-f]+", "", repr(func)) url_pattern = smart_text(simplify_regex(regex)) name = "_".join( ["test", func.__module__.replace(".", "_"), slugify("%s" % func_name)] + slugify(url_pattern.replace("/", "_") or "root").replace("_", " ").split() ) url = url_pattern for key, value in attrs["variables"].items(): url = url.replace("<%s>" % key, value) # bail out if we don't know how to visit this URL properly testfunc = unittest.skipIf( any(re.search(stop_pattern, url) for stop_pattern in [r"<.*>"]), "URL pattern %r contains stop pattern." % url, )(make_test_get_function(name, url, url_pattern)) attrs[name] = testfunc
def letter_prefix(self): name = ( self.lastname and slugify(self.lastname) or self.othername and slugify(self.othername) or self.firstname and slugify(self.firstname) ) return name.strip()[0].upper() if name else None
def org_structure(self): qs = DepartmentUser.objects.filter(**DepartmentUser.ACTIVE_FILTER) structure = [] orgunits = OrgUnit.objects.all() costcentres = CostCentre.objects.all() locations = Location.objects.all() slocations = SecondaryLocation.objects.all() defaultowner = "*****@*****.**" for obj in orgunits: structure.append({"id": 'db-org_{}'.format(obj.pk), "name": str(obj), "email": slugify(obj.name), "owner": getattr(obj.manager, "email", defaultowner), "members": [d[0] for d in qs.filter(org_unit__in=obj.get_descendants(include_self=True)).values_list("email")]}) for obj in costcentres: structure.append({"id": 'db-cc_{}'.format(obj.pk), "name": str(obj), "email": slugify(obj.name), "owner": getattr(obj.manager, "email", defaultowner), "members": [d[0] for d in qs.filter(cost_centre=obj).values_list("email")]}) for obj in locations: structure.append({"id": 'db-loc_{}'.format(obj.pk), "name": str(obj), "email": slugify(obj.name) + "-location", "owner": getattr(obj.manager, "email", defaultowner), "members": [d[0] for d in qs.filter(org_unit__location=obj).values_list("email")]}) for obj in slocations: structure.append({"id": 'db-locs_{}'.format(obj.pk), "name": str(obj), "email": slugify(obj.name) + "-location", "owner": getattr(obj.manager, "email", defaultowner), "members": [d[0] for d in qs.filter(org_unit__secondary_location=obj).values_list("email")]}) for row in structure: if row["members"]: row["email"] = "{}@{}".format( row["email"], row["members"][0].split("@", 1)[1]) return structure
def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.title) super().save(*args, **kwargs)
def save(self, *args, **kwargs): self.slug=slugify(self.name.replace('ı','i')) return super(Category, self).save(*args, **kwargs) # Call the real save() method
def get_absolute_url(self): return reverse('post:category-post', kwargs={'slug': self.slug, 'created':slugify(self.created_date)})
def get_absolute_url(self): return reverse('post:post-detail', kwargs={'slug': self.slug, 'publish':slugify(self.publishing_date)})
def get_user_url(self): return reverse('post:user-post', kwargs={'username':slugify(self.user.username),'index':slugify(self.user.date_joined)})
def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.key_statement) super().save(*args, **kwargs)
def create_slug_on_brand_wrapper(sender, instance, **kwargs): if not instance.slug: slug = slugify(instance.title) qs = BrandWrapper.objects.filter(slug=slug) instance.slug = f'{slug}-{instance.id}' if qs.exists() else slug instance.save()
def gener_slug(slug_text): generic_slug_text = slugify(slug_text, allow_unicode=True) return generic_slug_text + '-' + str(int(time()))
def save(self, *args, **kwargs): self.slug = slugify(self.name) super().save(*args, **kwargs)
def import_config_context_schema(context_schema_data, repository_record, job_result, logger): """Using data from schema file, create schema record in Nautobot.""" git_repository_content_type = ContentType.objects.get_for_model( GitRepository) created = False modified = False schema_metadata = context_schema_data.setdefault("_metadata", {}) if not schema_metadata.get("name"): raise RuntimeError("File has no name set.") try: schema_record = ConfigContextSchema.objects.get( name=schema_metadata["name"], owner_content_type=git_repository_content_type, owner_object_id=repository_record.pk, ) except ConfigContextSchema.DoesNotExist: schema_record = ConfigContextSchema( name=schema_metadata["name"], slug=slugify(schema_metadata["name"]), owner_content_type=git_repository_content_type, owner_object_id=repository_record.pk, data_schema=context_schema_data["data_schema"], ) created = True if schema_record.description != schema_metadata.get("description", ""): schema_record.description = schema_metadata.get("description", "") modified = True if schema_record.data_schema != context_schema_data["data_schema"]: schema_record.data_schema = context_schema_data["data_schema"] modified = True if created: schema_record.validated_save() job_result.log( "Successfully created config context schema", obj=schema_record, level_choice=LogLevelChoices.LOG_SUCCESS, grouping="config context schemas", logger=logger, ) elif modified: schema_record.validated_save() job_result.log( "Successfully refreshed config context schema", obj=schema_record, level_choice=LogLevelChoices.LOG_SUCCESS, grouping="config context schemas", logger=logger, ) else: job_result.log( "No change to config context schema", obj=schema_record, level_choice=LogLevelChoices.LOG_INFO, grouping="config context schemas", logger=logger, ) return schema_record.name if schema_record else None
def get_archived_file(cls, usr, url_id, mode='html', req=None, return_path=False): qset = Library.objects.filter(usr=usr, id=url_id) streaming_mode = False if not os.path.exists(settings.TMP_LOCATION): os.makedirs(settings.TMP_LOCATION) if qset: row = qset[0] media_path = row.media_path if mode in ['pdf', 'png', 'html'] and media_path: fln, ext = media_path.rsplit('.', 1) if mode == 'pdf': media_path = fln + '.pdf' elif mode == 'png': media_path = fln + '.png' elif mode == 'html': media_path = fln + '.htm' elif mode == 'archive' and media_path: mdir, _ = os.path.split(media_path) filelist = os.listdir(mdir) mlist = [] extset = set(['pdf', 'png', 'htm', 'html']) for fl in filelist: ext = fl.rsplit('.', 1) if ext and ext[-1] not in extset: mlist.append(os.path.join(mdir, fl)) for mfile in mlist: if os.path.isfile(mfile) and os.stat(mfile).st_size: media_path = mfile streaming_mode = True break if streaming_mode and req: qlist = UserSettings.objects.filter(usrid=usr) if qlist and not qlist[0].media_streaming: streaming_mode = False if media_path and os.path.exists(media_path): mtype = guess_type(media_path)[0] if not mtype: mtype = 'application/octet-stream' ext = media_path.rsplit('.')[-1] if ext: filename = row.title + '.' + ext if '.' in row.title: file_ext = row.title.rsplit('.', 1)[-1] if ext == file_ext: filename = row.title else: filename = row.title + '.bin' if mtype in ['text/html', 'text/htm']: data = cls.format_html(row, media_path) return HttpResponse(data) elif streaming_mode: if os.path.isfile(cls.CACHE_FILE): with open(cls.CACHE_FILE, 'rb') as fd: cls.VIDEO_ID_DICT = pickle.load(fd) uid = str(uuid.uuid4()) uid = uid.replace('-', '') while uid in cls.VIDEO_ID_DICT: logger.debug("no unique ID, Generating again") uid = str(uuid.uuid4()) uid = uid.replace('-', '') time.sleep(0.01) cls.VIDEO_ID_DICT.update({uid: [media_path, time.time()]}) cls.VIDEO_ID_DICT.move_to_end(uid, last=False) if len(cls.VIDEO_ID_DICT) > settings.VIDEO_PUBLIC_LIST: cls.VIDEO_ID_DICT.popitem() with open(cls.CACHE_FILE, 'wb') as fd: pickle.dump(cls.VIDEO_ID_DICT, fd) if return_path: title_slug = slugify(row.title, allow_unicode=True) if settings.ROOT_URL_LOCATION: root_loc = settings.ROOT_URL_LOCATION if root_loc.startswith('/'): root_loc = root_loc[1:] return '{}/{}/getarchivedvideo/{}-{}'.format( root_loc, usr.username, title_slug, uid) else: return '{}/getarchivedvideo/{}-{}'.format( usr.username, title_slug, uid) else: return cls.get_archived_video(req, usr.username, uid) else: response = FileResponse(open(media_path, 'rb')) mtype = 'video/webm' if mtype == 'video/x-matroska' else mtype response['mimetype'] = mtype response['content-type'] = mtype response['content-length'] = os.stat(media_path).st_size filename = filename.replace(' ', '.') logger.info('{} , {}'.format(filename, mtype)) if not cls.is_human_readable(mtype) and not streaming_mode: response[ 'Content-Disposition'] = 'attachment; filename="{}"'.format( quote(filename)) return response else: back_path = req.path_info.rsplit('/', 1)[0] + '/read' return render(req, 'archive_not_found.html', {'path': back_path}) else: return HttpResponse(status=404)
def save(self, *args, **kwargs): if not self.pk: self.slug = slugify(self.name) super(Person, self).save(*args, **kwargs)
def pre_save_post_receiver(sender, instance, *args, **kwagrs): exists = Problem.objects.filter( problem_code=instance.problem_code).exists() if not exists: slug = slugify(instance.problem_code) instance.slug = slug
def save(self, *args, **kwargs): if not self.id: self.slug = slugify(self.name) super(Category, self).save(*args, **kwargs)
def save(self, **kwargs): self.slug = slugify(self.title, allow_unicode=True) return super().save(**kwargs)
def generate_archive_media_playlist(cls, server, usr, directory): qset = Library.objects.filter(usr=usr, directory=directory) pls_txt = '#EXTM3U\n' extset = set(['pdf', 'png', 'htm', 'html']) if not os.path.exists(settings.TMP_LOCATION): os.makedirs(settings.TMP_LOCATION) if os.path.isfile(cls.CACHE_FILE): with open(cls.CACHE_FILE, 'rb') as fd: cls.VIDEO_ID_DICT = pickle.load(fd) for row in qset: streaming_mode = False media_path = row.media_path media_element = row.media_element title = row.title if media_path and media_element: mdir, _ = os.path.split(media_path) filelist = os.listdir(mdir) mlist = [] for fl in filelist: ext = fl.rsplit('.', 1) if ext and ext[-1] not in extset: mlist.append(os.path.join(mdir, fl)) for mfile in mlist: if os.path.isfile(mfile) and os.stat(mfile).st_size: media_path = mfile streaming_mode = True break if media_path and os.path.exists(media_path): mtype = guess_type(media_path)[0] if not mtype: mtype = 'application/octet-stream' if streaming_mode: uid = str(uuid.uuid4()) uid = uid.replace('-', '') while uid in cls.VIDEO_ID_DICT: logger.debug("no unique ID, Generating again") uid = str(uuid.uuid4()) uid = uid.replace('-', '') time.sleep(0.01) cls.VIDEO_ID_DICT.update({uid: [media_path, time.time()]}) cls.VIDEO_ID_DICT.move_to_end(uid, last=False) if len(cls.VIDEO_ID_DICT) > settings.VIDEO_PUBLIC_LIST: cls.VIDEO_ID_DICT.popitem() title_slug = slugify(title, allow_unicode=True) if settings.ROOT_URL_LOCATION: root_loc = settings.ROOT_URL_LOCATION if root_loc.startswith('/'): root_loc = root_loc[1:] return_path = '{}/{}/{}/getarchivedvideo/{}-{}'.format( server, root_loc, usr.username, title_slug, uid) else: return_path = '{}/{}/getarchivedvideo/{}-{}'.format( server, usr.username, title_slug, uid) pls_txt = pls_txt + '#EXTINF:0, {0}\n{1}\n'.format( title, return_path) with open(cls.CACHE_FILE, 'wb') as fd: pickle.dump(cls.VIDEO_ID_DICT, fd) uid = str(uuid.uuid4()) uid = uid.replace('-', '') plfile = os.path.join(settings.TMP_LOCATION, uid) if not os.path.isfile(plfile): with open(plfile, 'wb') as fd: pickle.dump(pls_txt, fd) pls_path = '{}/{}/getarchivedplaylist/{}/playlist/{}'.format( settings.ROOT_URL_LOCATION, usr.username, directory, uid) logger.debug(pls_path) return pls_path
def save(self, *args, **kwargs): # to automatically generate the slug field based on the value of the title field when no slug is provided # slugify function provided to automatically generate the image slug so that users don't have to manually enter a slug for each image if not self.slug: self.slug = slugify(self.title) super(Image, self).save(*args, **kwargs)
def make_choices(choice): slugs = [] for cho in choice: cho = str(cho).lower() slugs.append((slugify(cho), cho)) return slugs
def save(self, *args, **kwargs): self.slug = slugify(unicode(self.name)) super(Project, self).save(*args, **kwargs)
def save(self, *args, **kwargs): self.slug = slugify(self.name) super(YogaClass, self).save(*args, **kwargs)
def popular_slug(apps, schema_editor): Modulo = apps.get_model('modulos', 'Modulo') for modulo in Modulo.objects.all(): modulo.slug = slugify(modulo.titulo) modulo.save()
def save(self, *args, **kwargs): """Custom save functions that populates automatically 'slug' field""" self.slug = slugify(self.title) super(Post, self).save(*args, **kwargs)
def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.user) super(Profile, self).save(*args, **kwargs)
def save(self, *args, **kwargs): self.slug = slugify(unicode(self.name)) self.data_folder = self.get_dumped_data_path() super(Index, self).save(*args, **kwargs)
def get_absolute_url(self): slugged_title = slugify(self.title) return f"{slugged_title}/{self.id}"
def save(self, *args, **kwargs): self.url = slugify(self.title) super(Post, self).save(*args, **kwargs)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): self.slug = slugify(self.post_title)
def save(self, *args, **kwargs): if not self.slug and self.name: self.slug = slugify(self.name) super(Meals, self).save(*args, **kwargs)
def make_colors(self): ################################################################# #### Product Material Attribute creating #### ################################################################# data_folder = os.path.join(PROJECT_ROOT, 'saleor', 'api_par_com', 'resources', 'json_file') product_type = ProductType.objects.get(name='Сувенірна продукція').id attr_update = { # "name": 'Material', # "slug": 'material', # "product_type_id": product_type, # "product_variant_type_id": product_type } try: attribute = Attribute.objects.get(product_type_id=product_type) for key, value in attr_update.items(): setattr(attribute, key, value) attribute.save() display_format = "\nAttribute, {}, has been edited." # print(display_format.format(attribute)) except Attribute.DoesNotExist: attr_create = { "name": 'Material', "slug": 'material', "product_type_id": product_type, # "product_variant_type_id": product_type } attr_create.update(attr_update) attribute = Attribute(**attr_create) attribute.save() display_format = "\nAttribbute, {}, has been created." print(display_format.format(attribute)) attr_update = { # "name": 'Color', # "slug": 'color', # "product_type_id": product_type, # "product_variant_type_id": product_type } try: attribute = Attribute.objects.get(product_variant_type_id=product_type) for key, value in attr_update.items(): setattr(attribute, key, value) attribute.save() display_format = "\nAttribute, {}, has been edited." # print(display_format.format(attribute)) except Attribute.DoesNotExist: attr_create = { "name": 'Color', "slug": 'color', # "product_type_id": product_type, "product_variant_type_id": product_type } attr_create.update(attr_update) attribute = Attribute(**attr_create) attribute.save() display_format = "\nAttribbute, {}, has been created." print(display_format.format(attribute)) #################################################################################### # Product material attributes # #################################################################################### with open(os.path.join(data_folder, "products_en.json"), encoding='utf-8') as data_file: data = json.loads(data_file.read()) for data_object in data['products']['product']: id = data_object.get('id', None) material = data_object.get('material_wykonania', None) attr_id = Attribute.objects.get(name='Material').id attr_slug = slugify(material) if material: attr_update = { "name": material, "attribute_id": attr_id, "slug": attr_slug, "value": material } try: attribute = AttributeValue.objects.get(slug=attr_slug) for key, value in attr_update.items(): setattr(attribute, key, value) attribute.save() display_format = "\nMaterial-attribute, {}, has been edited." print(display_format.format(attribute)) except AttributeValue.DoesNotExist: attr_create = { "name": material, "attribute_id": attr_id, "slug": attr_slug, "value": material } attr_create.update(attr_update) attribute = AttributeValue(**attr_create) attribute.save() display_format = "\nMaterial-attribute, {}, has been created." print(display_format.format(attribute)) try: products = Product.objects.get(id=id) # print(type(products)) # for product in products: attr_val_id = AttributeValue.objects.get(name=material).id ai = str(attr_id) avi = str(attr_val_id) out = '"' + ai + '"=>"' + avi + '"' prod_attr_upd = { "attributes": out } try: obj = Product.objects.get(id=products.id) for key, value in prod_attr_upd.items(): setattr(obj, key, value) obj.save() display_format = "\nMaterial-attribute, {}, has been edited." print(display_format.format(obj)) except Product.DoesNotExist: prod_attr_crt = { "attributes": out } prod_attr_crt.update(prod_attr_upd) obj = Product(**prod_attr_crt) obj.save() display_format = "\nMaterial-attribute, {}, has been created." print(display_format.format(obj)) except Product.DoesNotExist: pass else: pass ################################################################# #### Product Color Attribute creating #### ################################################################# data_folder = os.path.join(PROJECT_ROOT, 'saleor', 'api_par_com', 'resources', 'json_file') product_type = ProductType.objects.get(name='Сувенірна продукція').id attr_update = { # "name": 'Color', # "slug": 'color', # "product_type_id": product_type, # "product_variant_type_id": product_type } try: attribute = Attribute.objects.get(product_variant_type_id=product_type) for key, value in attr_update.items(): setattr(attribute, key, value) attribute.save() display_format = "\nAttribute, {}, has been edited." # print(display_format.format(attribute)) except Attribute.DoesNotExist: attr_create = { "name": 'Color', "slug": 'color', # "product_type_id": product_type, "product_variant_type_id": product_type } attr_create.update(attr_update) attribute = Attribute(**attr_create) attribute.save() display_format = "\nColor-attribute, {}, has been created." print(display_format.format(attribute)) #################################################################################### # Product color attributes # #################################################################################### with open(os.path.join(data_folder, "products_en.json"), encoding='utf-8') as data_file: data = json.loads(data_file.read()) for data_object in data['products']['product']: id = data_object.get('id', None) color = data_object.get('kolor_podstawowy', None) attr_id = Attribute.objects.get(name='Color').id attr_slug = slugify(color) color_attr_update = { "name": color, "attribute_id": attr_id, "slug": attr_slug, "value": color } try: attribute = AttributeValue.objects.get(slug=attr_slug) for key, value in color_attr_update.items(): setattr(attribute, key, value) attribute.save() display_format = "\nColor-attribute, {}, has been edited." print(display_format.format(attribute)) except AttributeValue.DoesNotExist: color_attr_create = { "name": color, "attribute_id": attr_id, "slug": attr_slug, "value": color } color_attr_create.update(color_attr_update) attribute = AttributeValue(**color_attr_create) attribute.save() display_format = "\nColor-attribute, {}, has been created." print(display_format.format(attribute)) attribute = Attribute.objects.get(name="Color") variants = ProductVariant.objects.all().exclude(sku__contains="-", quantity=123456789) for variant in variants: attr_val_id = AttributeValue.objects.get(name=variant.name).id ai = str(attr_id) avi = str(attr_val_id) out = '"' + ai + '"=>"' + avi + '"' prod_attr_upd = { "attributes": out } try: obj = ProductVariant.objects.get(id=variant.id) for key, value in prod_attr_upd.items(): setattr(obj, key, value) obj.save() display_format = "\nKey, {}, has been edited." print(display_format.format(obj)) except ProductVariant.DoesNotExist: prod_attr_crt = { "attributes": out } prod_attr_crt.update(prod_attr_upd) obj = ProductVariant(**prod_attr_crt) obj.save() display_format = "\nKey, {}, has been created." print(display_format.format(obj)) ####################################################################### # Translation # ####################################################################### translator = Translator(service_urls=[ 'translate.google.com', 'translate.google.co.kr', ]) ####################################################################### # Attribute Translation # ####################################################################### attr_id = Attribute.objects.all() for attr_id in attr_id: ########### Polska ############################## pl_name = attr_id.name language_code = 'pl' # print(pl_color) pl_attr_update = { # "name": pl_color, # "language_code": 'pl', # "product_type_id": product_type, # "attribute_id": attr_id.id } try: attribute = AttributeTranslation.objects.get(attribute_id=attr_id.id, language_code=language_code) for key, value in pl_attr_update.items(): setattr(attribute, key, value) attribute.save() # display_format = "\nAttribute, {}, has been edited." # print(display_format.format(attribute)) except AttributeTranslation.DoesNotExist: try: pl_color = translator.translate(pl_name, dest=language_code).text pl_attr_create = { "name": pl_color, "language_code": language_code, # "product_type_id": product_type, "attribute_id": attr_id.id } pl_attr_create.update(pl_attr_update) attribute = AttributeTranslation(**pl_attr_create) attribute.save() display_format = "\nAttribbute, {}, has been created." print(display_format.format(attribute)) except ValueError: # includes simplejson.decoder.JSONDecodeError print('Decoding JSON has failed PL', attribute.id) ############ Ukrainian ########################## uk_name = attr_id.name language_code = 'uk' # print(pl_color) uk_attr_update = { # "name": uk_color, # "language_code": 'uk', # "product_type_id": product_type, # "attribute_id": attr_id.id } try: attribute = AttributeTranslation.objects.get(attribute_id=attr_id.id, language_code=language_code) for key, value in uk_attr_update.items(): setattr(attribute, key, value) attribute.save() # display_format = "\nAttribute, {}, has been edited." # print(display_format.format(attribute)) except AttributeTranslation.DoesNotExist: try: uk_color = translator.translate(uk_name, dest=language_code).text uk_attr_create = { "name": uk_color, "language_code": language_code, # "product_type_id": product_type, "attribute_id": attr_id.id } uk_attr_create.update(uk_attr_update) attribute = AttributeTranslation(**uk_attr_create) attribute.save() display_format = "\nAttribbute, {}, has been created." print(display_format.format(attribute)) except ValueError: # includes simplejson.decoder.JSONDecodeError print('Decoding JSON has failed UK', attribute.id) ############ Russian ########################## ru_name = attr_id.name language_code = 'ru' # print(pl_color) ru_attr_update = { # "name": ru_color, # "language_code": 'ru', # "product_type_id": product_type, # "attribute_id": attr_id.id } try: attribute = AttributeTranslation.objects.get(attribute_id=attr_id.id, language_code=language_code) for key, value in ru_attr_update.items(): setattr(attribute, key, value) attribute.save() # display_format = "\nAttribute, {}, has been edited." # print(display_format.format(attribute)) except AttributeTranslation.DoesNotExist: try: ru_color = translator.translate(ru_name, dest=language_code).text ru_attr_create = { "name": ru_color, "language_code": language_code, # "product_type_id": product_type, "attribute_id": attr_id.id } ru_attr_create.update(ru_attr_update) attribute = AttributeTranslation(**ru_attr_create) attribute.save() display_format = "\nAttribbute, {}, has been created." print(display_format.format(attribute)) except ValueError: # includes simplejson.decoder.JSONDecodeError print('Decoding JSON has failed RU', attribute.id) ####################################################################### # Attribute Value Translation # ####################################################################### attr_value_id = AttributeValue.objects.all() for attr_id in attr_value_id: ########### Polska ############################## pl_name = attr_id.name language_code = 'pl' # print(pl_color) pl_attr_update = { # "name": pl_color, # "language_code": 'pl', # "product_type_id": product_type, # "attribute_value_id": attr_id.id } try: attribute = AttributeValueTranslation.objects.get(attribute_value_id=attr_id.id, language_code=language_code) for key, value in pl_attr_update.items(): setattr(attribute, key, value) attribute.save() # display_format = "\nAttribute, {}, has been edited." # print(display_format.format(attribute)) except AttributeValueTranslation.DoesNotExist: try: pl_color = translator.translate(pl_name, dest=language_code).text pl_attr_create = { "name": pl_color, "language_code": language_code, # "product_type_id": product_type, "attribute_value_id": attr_id.id } pl_attr_create.update(pl_attr_update) attribute = AttributeValueTranslation(**pl_attr_create) attribute.save() display_format = "\nAttribbute, {}, has been created." print(display_format.format(attribute)) except ValueError: # includes simplejson.decoder.JSONDecodeError print('Decoding JSON has failed PL', attribute.id) ############ Ukrainian ########################## uk_name = attr_id.name language_code = 'uk' # print(pl_color) uk_attr_update = { # "name": uk_color, # "language_code": 'uk', # "product_type_id": product_type, # "attribute_value_id": attr_id.id } try: attribute = AttributeValueTranslation.objects.get(attribute_value_id=attr_id.id, language_code=language_code) for key, value in uk_attr_update.items(): setattr(attribute, key, value) attribute.save() # display_format = "\nAttribute, {}, has been edited." # print(display_format.format(attribute)) except AttributeValueTranslation.DoesNotExist: try: uk_color = translator.translate(uk_name, dest=language_code).text uk_attr_create = { "name": uk_color, "language_code": language_code, # "product_type_id": product_type, "attribute_value_id": attr_id.id } uk_attr_create.update(uk_attr_update) attribute = AttributeValueTranslation(**uk_attr_create) attribute.save() display_format = "\nAttribbute, {}, has been created." print(display_format.format(attribute)) except ValueError: # includes simplejson.decoder.JSONDecodeError print('Decoding JSON has failed UK', attribute.id) ############ Russian ########################## ru_name = attr_id.name language_code = 'ru' # print(pl_color) ru_attr_update = { # "name": ru_color, # "language_code": 'ru', # "product_type_id": product_type, # "attribute_value_id": attr_id.id } try: attribute = AttributeValueTranslation.objects.get(attribute_value_id=attr_id.id, language_code=language_code) for key, value in ru_attr_update.items(): setattr(attribute, key, value) attribute.save() # display_format = "\nAttribute, {}, has been edited." # print(display_format.format(attribute)) except AttributeValueTranslation.DoesNotExist: try: ru_color = translator.translate(ru_name, dest=language_code).text ru_attr_create = { "name": ru_color, "language_code": language_code, # "product_type_id": product_type, "attribute_value_id": attr_id.id } ru_attr_create.update(ru_attr_update) attribute = AttributeValueTranslation(**ru_attr_create) attribute.save() display_format = "\nAttribbute, {}, has been created." print(display_format.format(attribute)) except ValueError: # includes simplejson.decoder.JSONDecodeError print('Decoding JSON has failed RU', attribute.id)
def save(self, *args, **kwargs): if not self.slug: slug = '{}_{}'.format(self.author, timezone.now()) self.slug = slugify(slug) return super(Post, self).save(*args, **kwargs)