def _synonym(self,item): tag=self._find_tag(item['id'],item['name']) properTags=Tag.objects.filter(name=item['proper_name']) if len(properTags)>0: properTag=properTags[0] else: properTag=None if tag is not None: if properTag is None: # got the synonym tag but not the proper tag # create the proper tag - so we can link it to the synonym properTag=Tag(name=item['proper_name']) properTag.save() if properTag is not None and properTag!=tag: # got the synonym tag and the proper tag - just create the synonym try: ts=TagSynonym(tag=properTag,synonym_tag=tag) if self._options['nodryrun']: ts.save() self.stdout.write('done') except IntegrityError as e: if str(e)=='column synonym_tag_id is not unique': self.stdout.write('-- warning --') self.stdout.write('synonym already exists') self.stdout.write('--------------------') else: raise e
def setUp(self): self.tags = [] for line in open(os.path.join(os.path.dirname(__file__), 'test_tags.txt')).readlines(): name, count = line.rstrip().split() tag = Tag(name=name) tag.count = int(count) self.tags.append(tag)
def setUp(self): self.tags = [] for line in default_tags: name, count = line.rstrip().split() tag = Tag(name=name) tag.count = int(count) self.tags.append(tag)
def testTagClouds(self): tags = [] for line in open(os.path.join(os.path.dirname(__file__), 'tags.txt')).readlines(): name, count = line.rstrip().split() tag = Tag(name=name) tag.count = int(count) tags.append(tag) sizes = {} for tag in calculate_cloud(tags, steps=5): sizes[tag.font_size] = sizes.get(tag.font_size, 0) + 1 # This isn't a pre-calculated test, just making sure it's consistent self.assertEqual({1: 48, 2: 30, 3: 19, 4: 15, 5: 10}, sizes) sizes = {} for tag in calculate_cloud(tags, steps=5, distribution=LINEAR): sizes[tag.font_size] = sizes.get(tag.font_size, 0) + 1 # This isn't a pre-calculated test, just making sure it's consistent self.assertEqual({1: 97, 2: 12, 3: 7, 4: 2, 5: 4}, sizes) self.assertRaises(ValueError, calculate_cloud, tags, steps=5, distribution='cheese')
def setUp(self): self.tags = [] for line in open(os.path.join(os.path.dirname(__file__), 'tags.txt')).readlines(): name, count = line.rstrip().split() tag = Tag(name=name) tag.count = int(count) self.tags.append(tag)
def list_tag(request, name, **kwargs): try: tag = Tag.objects.get(name=name) except: tag = Tag() tag.name = name queryset = SoftwareCollection.tagged.with_all(tag) dictionary = {'tag': tag} return _list(request, 'scls/list_tag.html', queryset, dictionary, **kwargs)
def list_tag(request, name, **kwargs): try: tag = Tag.objects.get(name=name) except: tag = Tag() tag.name = name queryset = SoftwareCollection.tagged.with_all(tag).filter(has_content=True) dictionary = {'tag': tag} return _list(request, 'scls/list_tag.html', queryset, dictionary, **kwargs)
def test_search_by_not_tag(self): client = self.client self.login(client) unused_tag = Tag(name="Unused") unused_tag.save() response = client.post('/contacts/', {'search_term':"My", "tags":[unused_tag.id]}) self.assertContains(response, 'No Results found.')
def test_search_by_not_tag(self): client = self.client self.login(client) unused_tag = Tag(name="Unused") unused_tag.save() response = client.post('/contacts/', { 'search_term': "My", "tags": [unused_tag.id] }) self.assertContains(response, 'No Results found.')
def TagsView(request, template_name="news/tags_list.html"): if request.method == 'POST': for field in request.POST: region_field = field.split('_') tag_name = request.POST[field] if region_field[0] == 'tag' and tag_name != 'None': # What region are we dealing with? if region_field[1] == '0': region = None # 0 = All else: region = Region.objects.get(id=region_field[1]) # Get region tag if available if region is not None: tag = list(Tag.objects.get_for_object(region)) else: tag = [ get_redmap_tag(), ] if not tag: # create tag if not exists tag = Tag() tag.name = tag_name tag.save() elif tag[0].name != tag_name: # update tag if different tag[0].name = tag_name tag[0].save() # TODO: If we update a tag, we will need to update zinnia # entries with updated tags because the app stores # tags in a plain-text string rather than by model # association if region is not None: # attach tag to region Tag.objects.update_tags(region, tag_name + ',') return HttpResponseRedirect(reverse('news_tags_list')) return render(request, template_name)
def add_tag(request, tag_id=False, project_id=False, task_id=False): """ Provides a page where tags can be created by the user. """ if request.method == 'POST': postData = deepcopy(request.POST) if ('public' not in request.POST): postData['public'] = 'off' form = AddTagForm(postData) if form.is_valid(): if (not tag_id): newTag = Tag() else: newTag = Tag.objects.get(id=tag_id) newTag.name = form.cleaned_data['name'] newTag.description = form.cleaned_data['description'] newTag.public = form.cleaned_data['public'] newTag.owner = request.user if ('public' not in request.POST): newTag.public = False newTag.save() if (task_id): task = ProjectTask.objects.get(id=task_id) task.tags.add(newTag) task.save() if (project_id): project = Project.objects.get(id=project_id) project.tags.add(newTag) project.save() if ('returnUrl' in request.session): return HttpResponseRedirect(request.session['returnUrl']) else: return HttpResponseRedirect('/tags/' + str(newTag.id)) else: pageData = {} pageData['form'] = form pageData['postTo'] = request.get_full_path() messages.warning(request, "Please check your inputs.") return render_to_response('tag_create.html', RequestContext(request, pageData)) else: form = AddTagForm() args = {} args['form'] = form args['postTo'] = request.get_full_path() return render_to_response('tag_create.html', RequestContext(request, args))
def _tag_usage_for_queryset(model, counts=False, min_count=None, extra_joins=None, extra_criteria=None, params=None): from tagging.models import Tag """ Perform the custom SQL query for ``usage_for_model`` and ``usage_for_queryset``. """ if min_count is not None: counts = True model_table = qn(model._meta.db_table) model_pk = '%s.%s' % (model_table, "object_id") query = """ SELECT DISTINCT %(tag)s.id, %(tag)s.name%(count_sql)s FROM %(tag)s INNER JOIN %(tagged_item)s ON %(tag)s.id = %(tagged_item)s.tag_id INNER JOIN %(model)s ON %(tagged_item)s.object_id = %(model_pk)s %%s WHERE %(tagged_item)s.content_type_id = %(model)s.content_type_id %%s GROUP BY %(tag)s.id, %(tag)s.name %%s ORDER BY %(tag)s.name ASC""" % { 'tag': qn(Tag._meta.db_table), 'count_sql': counts and (', COUNT(%s)' % model_pk) or '', 'tagged_item': qn(TaggedItem._meta.db_table), 'model': model_table, 'model_pk': model_pk, } min_count_sql = '' if min_count is not None: min_count_sql = 'HAVING COUNT(%s) >= %%s' % model_pk params.append(min_count) cursor = connection.cursor() cursor.execute(query % (extra_joins, extra_criteria, min_count_sql), params) tags = [] for row in cursor.fetchall(): t = Tag(*row[:2]) if counts: t.count = row[2] tags.append(t) return calculate_cloud(tags, 4, LOGARITHMIC)
def create_tag(request, user_id): if request.method != 'POST': error_dict = {"message": "Method not allowed"} error_data = json.dumps(error_dict) return HttpResponseNotAllowed(error_data, content_type="application/json") received_json_data = json.loads(request.body) form = TagForm(received_json_data) if form.is_valid(): new_tag = Tag(tag_name=form.cleaned_data['tag_name']) new_tag.user = request.user new_tag.save() data = json.dumps(new_tag.to_dict()) logger.info("New Tag created name: %s id: %s", new_tag.tag_name, new_tag.id) return HttpResponse(data) else: logger.error("Invalid Tag Form. %s", form.errors) error_data = json.dumps(form.errors) return HttpResponseBadRequest(error_data)
def test_tag_title_non_ascii(self): entry = self.create_published_entry() tag_unicode = smart_text('accentué') entry.tags = tag_unicode entry.save() feed = TagEntries() tag = Tag(name=tag_unicode) self.assertEqual(feed.get_title(tag), 'Entries for the tag %s' % tag_unicode) self.assertEqual(feed.description(tag), 'The last entries tagged with %s' % tag_unicode)
def test_tag_entries(self): self.create_published_entry() feed = TagEntries() tag = Tag(name='tests') self.assertEquals(feed.get_object('request', 'tests').name, 'tests') self.assertEquals(len(feed.items('tests')), 1) self.assertEquals(feed.link(tag), '/tags/tests/') self.assertEquals(feed.get_title(tag), 'Entries for the tag %s' % tag.name) self.assertEquals(feed.description(tag), 'The latest entries for the tag %s' % tag.name)
def test_tag_gbobjects(self): self.create_published_gbobject() feed = TagGbobjects() tag = Tag(name='tests') self.assertEquals(feed.get_object('request', 'tests').name, 'tests') self.assertEquals(len(feed.items('tests')), 1) self.assertEquals(feed.link(tag), '/tags/tests/') self.assertEquals(feed.title(tag), _('Gbobjects for the tag %s') % tag.name) self.assertEquals(feed.description(tag), _('The latest gbobjects for the tag %s') % tag.name)
def import_tags(): fields = ['id', 'name', 'alias'] sql = "SELECT %s FROM jos_tegs" % (', '.join(fields)) cursor = db.cursor() cursor.execute(sql) all_tags = cursor.fetchall() count = 0 bar = progressbar.ProgressBar(maxval=len(all_tags), widgets=[ 'import tags: ', progressbar.SimpleProgress(), ]).start() for row in all_tags: Tag(pk=int(row[0]), name=str(row[1]), slug=str(row[2])).save() count += 1 bar.update(count) bar.finish()
def prepare (self, ids): # change to 'compile'! if trace: print 'PREPARE SOUP', ids if not self.head: self.html.insert (0, Tag (self, 'head')) if 'base' in ids: # set up base tag - get from request, at render/call-time ids.remove ('base') if self.base: self.base ['href'] = '%(base)s' else: self.head.insert (0, '<base href="%(base)s" />') if 'title' in ids: ids.remove ('title') if self.title: self.title.contents = '%(title)s' else: self.head.insert (1, '<title>%(title)s</title>') # if 'meta' in ids: # need to enh to deal with meta kw, meta desc, etc if 'header_extras' in ids: ids.remove ('header_extras') self.head.append ('%(header_extras)s') # bsoup 3.04 requires this: # self.head.insert (99,'%(header_extras)s') #print self.head if 'footer_extras' in ids: ids.remove ('footer_extras') self.body.append ('%(footer_extras)s') kw = dict ([(id, '%(' + id + ')s') for id in ids]) # WARNING: Don't we want to double-up the %s => %%? # could do it this way: #for k in 'doctype base title meta header_extras footer_extras'.split(): # if k in kw: # v = kw.pop (k) self.replaceIds (**kw)
def form_valid(self, form: TagForm) -> HttpResponseRedirect: social_token: SocialToken = SocialToken.objects.get( account__user=self.request.user.id) api = Twitter( social_token.app.client_id, social_token.app.secret, social_token.token, social_token.token_secret, ) tag = api.create_list( form.cleaned_data["name"], form.cleaned_data["description"], form.cleaned_data["is_private"], ) Tag(list_id=tag.id, account=social_token.account, name=form.cleaned_data["name"]).save() return super().form_valid(form)
def test_location(self): tag = Tag(name="meeker") self.assertEqual('/tag/meeker/', self.sitemap.location(tag))
def ingest_edits(cls, json_batch): # Map from (toolid, uid, user) to Batch object batches = {} model_edits = [] reverted_ids = [] new_tags = defaultdict(set) tools = Tool.objects.all() for edit_json in json_batch: if not edit_json: continue timestamp = datetime.fromtimestamp(edit_json['timestamp'], tz=UTC) # First, check if this is a revert revert_match = cls.reverted_re.match(edit_json['comment']) if revert_match: reverted_ids.append(int(revert_match.group(1))) # Otherwise, try to match the edit with a tool match = None matching_tool = None for tool in tools: match = tool.match(edit_json['user'], edit_json['comment']) if match is not None: matching_tool = tool break if match is None: continue # Try to find an existing batch for that edit batch_key = (matching_tool.shortid, match.uid) batch = batches.get(batch_key) created = False if not batch: batch, created = Batch.objects.get_or_create(tool=tool, uid=match.uid, defaults={ 'user': match.user, 'summary': match.summary, 'started': timestamp, 'ended': timestamp, 'nb_edits': 0, }) # Check that the batch is owned by the right user if batch.user != match.user: if created: batch.delete() continue batch.nb_edits += 1 batch.ended = max(batch.ended, timestamp) batches[batch_key] = batch # Create the edit object model_edit = Edit.from_json(edit_json, batch) model_edits.append(model_edit) # Extract tags from the edit edit_tags = Tag.extract(model_edit) missing_tags = [ tag.id for tag in edit_tags if tag.id not in batch.tag_ids ] new_tags[batch.id].update(missing_tags) # Create all Edit objects update all the batch objects if batches: # Create all the edit objects try: with transaction.atomic(): Edit.objects.bulk_create(model_edits) except IntegrityError as e: # Oops! Some of them existed already! # Let's add them one by one instead. for edit in model_edits: try: existing_edit = Edit.objects.get(id=edit.id) # this edit was already seen: we need to remove it # from the associated batch count batch_key = (edit.batch.tool.shortid, edit.batch.uid) batch = batches.get(batch_key) if batch: batch.nb_edits -= 1 except Edit.DoesNotExist: edit.save() # update batch objects Batch.objects.bulk_update(list(batches.values()), update_fields=['ended', 'nb_edits']) # update tags for batches if new_tags: Tag.add_tags_to_batches(new_tags) # If we saw any "undo" edit, mark all matching edits as reverted if reverted_ids: Edit.objects.filter(newrevid__in=reverted_ids).update( reverted=True)
def ingest_edits(cls, json_batch): # Map from (toolid, uid, user) to Batch object batches = {} model_edits = [] reverted_ids = [] deleted_pages = {} # map: title -> latest deletion timestamp restored_pages = {} # map: title -> latest restoration timestamp modified_pages = defaultdict( set) # map: batch_key -> set of touched pages new_tags = defaultdict(set) tools = Tool.objects.all() for edit_json in json_batch: if not edit_json or edit_json.get( 'namespace') not in settings.WATCHED_NAMESPACES: continue timestamp = datetime.fromtimestamp(edit_json['timestamp'], tz=UTC) # First, check if this is a revert revert_match = cls.reverted_re.match(edit_json['comment']) if revert_match: reverted_ids.append(int(revert_match.group(1))) # or a deletion if edit_json.get('log_action') == 'delete': deleted_pages[edit_json['title']] = timestamp # or a restore if edit_json.get('log_action') == 'restore': restored_pages[edit_json['title']] = timestamp # Then, try to match the edit with a tool match = None matching_tool = None for tool in tools: match = tool.match(edit_json['user'], edit_json['comment']) if match is not None: matching_tool = tool break if match is None: continue # Try to find an existing batch for that edit batch_key = (matching_tool.shortid, match.uid) batch = batches.get(batch_key) created = False if not batch: batch, created = Batch.objects.get_or_create( tool=tool, uid=match.uid, defaults={ 'user': match.user[:MAX_CHARFIELD_LENGTH], 'summary': match.summary[:MAX_CHARFIELD_LENGTH], 'started': timestamp, 'ended': timestamp, 'nb_edits': 0, 'nb_distinct_pages': 0, 'nb_new_pages': 0, 'nb_reverted_edits': 0, 'total_diffsize': 0, }) # Check that the batch is owned by the right user if batch.user != match.user: if created: batch.delete() continue batch.nb_edits += 1 length_obj = edit_json.get('length') or {} batch.total_diffsize += (length_obj.get('new') or 0) - (length_obj.get('old') or 0) batch.ended = max(batch.ended, timestamp) batches[batch_key] = batch # Create the edit object model_edit = Edit.from_json(edit_json, batch) model_edits.append(model_edit) # Extract tags from the edit edit_tags = Tag.extract(model_edit) missing_tags = [ tag.id for tag in edit_tags if tag.id not in batch.tag_ids ] new_tags[batch.id].update(missing_tags) # Take note of the modified page, for computation of the number of entities edited by a batch modified_pages[batch_key].add(edit_json['title']) # And the number of new pages if model_edit.changetype == 'new': batch.nb_new_pages += 1 # if we saw some deletions which match any creations or undeletions we know of, mark them as deleted. # We do this before creating the previous edits in the same batch, because deletions and restorations # do not come with unique ids to identify the creation, deletion or restoration that they undo # (this is a notion that we introduce ourselves) so if a deletion and the corresponding revert happen # in the same batch we need to inspect the order in which they happened. if deleted_pages: cls.mark_as_reverted( Edit.objects.filter(title__in=deleted_pages.keys(), changetype__in=['new', 'restore'])) for edit in model_edits: if (edit.title in deleted_pages and edit.changetype in ['new', 'restore'] and edit.timestamp < deleted_pages.get(edit.title)): edit.reverted = True edit.batch.nb_reverted_edits += 1 # finally if we saw some undeletions which match any deletions we know of, mark them as undone if restored_pages: cls.mark_as_reverted( Edit.objects.filter(title__in=restored_pages.keys(), changetype='delete')) for edit in model_edits: if (edit.title in restored_pages and edit.changetype == 'delete' and edit.timestamp < restored_pages.get(edit.title)): edit.reverted = True edit.batch.nb_reverted_edits += 1 # Create all Edit objects update all the batch objects if batches: # Update the number of modified pages for batch_key, pages in modified_pages.items(): batch = batches.get(batch_key) existing_pages = set( batch.edits.filter(title__in=pages).values_list('title', flat=True)) unseen_pages = pages - existing_pages batch.nb_distinct_pages += len(unseen_pages) # Create all the edit objects try: with transaction.atomic(): Edit.objects.bulk_create(model_edits) except IntegrityError as e: # Oops! Some of them existed already! # Let's add them one by one instead. for edit in model_edits: try: existing_edit = Edit.objects.get(id=edit.id) # this edit was already seen: we need to remove it # from the associated batch count batch_key = (edit.batch.tool.shortid, edit.batch.uid) batch = batches.get(batch_key) if batch: batch.nb_edits -= 1 batch.total_diffsize -= edit.newlength - edit.oldlength if edit.changetype == 'new': batch.nb_new_pages -= 1 if edit.reverted: batch.nb_reverted_edits -= 1 except Edit.DoesNotExist: edit.save() # update batch objects Batch.objects.bulk_update(list(batches.values()), update_fields=[ 'ended', 'nb_edits', 'nb_distinct_pages', 'nb_reverted_edits', 'nb_new_pages', 'total_diffsize' ]) # update tags for batches if new_tags: Tag.add_tags_to_batches(new_tags) # If we saw any "undo" edit, mark all matching edits as reverted. # We do this after creating the latest edits because it could be possible that # an edit from the batch we just processed was undone in the same go. if reverted_ids: cls.mark_as_reverted( Edit.objects.filter(newrevid__in=reverted_ids))
def tag_object_list(request, cls, tag_string='', template_name='tag_object_list.html'): tags = [] rss = False for tag_name in tag_string.split('/'): try: tag = Tag.objects.get(name__iexact=tag_name.strip()) tags.append(tag) except Tag.DoesNotExist: if tag_name == 'rss': rss = True continue items = cls.objects.all() for tag in tags: items = items.filter(tags__id=tag.id) items = items.order_by('-id') try: page = int(request.GET.get('page', '1')) except ValueError: page = 1 try: mode = request.GET['view_mode'] if mode == 'all': request.session["view_mode"] = 'all' else: request.session["view_mode"] = '' except: pass try: if request.session["view_mode"] == 'all': mode = 'all' else: mode = '' except: request.session["view_mode"] = '' mode = '' if mode == 'all': display_tags = Tag.getSubsetTags(cls, tags, limit=False) else: display_tags = Tag.getSubsetTags(cls, tags) extra_context = { 'display_tags': display_tags, 'viewing_tags': tags, 'view_mode': mode, } if rss is True: template_name = 'tag_object_list_rss.html' if len(items): extra_context['last_build'] = items[0].date else: extra_context['last_build'] = 0 return object_list(request, items, extra_context=extra_context, template_name=template_name, page=page, paginate_by=PAGINATE_BY)
def test_tag_entries(self): self.create_published_entry() feed = TagEntries() self.assertEquals(feed.get_object('request', 'tests').name, 'tests') self.assertEquals(len(feed.items('tests')), 1) self.assertEquals(feed.link(Tag(name='tests')), '/tags/tests/')