def amend(self, input, comments): spannifier = Spannifier() soup = spannifier.get_the_soup(input) res = spannifier.spannify_new(soup) soup = spannifier.get_the_soup(res) for start, end, content in comments : inserted = False for word in range(start, end + 1) : spantoremove = soup.find(id="w_%s"%word) if not inserted : # replace the first word with the comment's content spantoremove.replaceWith(content) inserted = True else : spantoremove.extract() return soup
def test_spannify(self): spannifier = Spannifier() for [expectedResult, input] in string_tests : soup = spannifier.get_the_soup(input) res2 = spannifier.unspannify_new(soup) self.assertEqual(unicode(res2),expectedResult) # for [input, expectedResult] in string_tests : # # doc = xml.dom.minidom.parseString(input) # soup = spannifier.get_the_soup(input) # # res = spannifier.spannify(doc) # res2 = spannifier.spannify_new(soup) # ## print res ## print res2 ## print expectedResult # # self.assertEqual(res,unicode(expectedResult, 'utf-8')) ## self.assertEqual(res,res2) # for filename in file_tests : if filename[:5] == "span_" : doc = xml.dom.minidom.parse('cm/tests/data/%s' % filename) soup = BeautifulSoup('cm/tests/data/%s' % filename, convertEntities=["xml", "html"]) res = spannifier.spannify(doc) res2 = spannifier.spannify_new(soup) expectedResult = file('cm/tests/data/res_%s' % filename).read() # print res self.assertEqual(res2,expectedResult)
def export_add_comment(request, version): # ADDING MARKERS spannifier = Spannifier() soup = spannifier.get_the_soup(version.content) res = spannifier.spannify_new(soup) spanned_soup = BeautifulSoup(res) version_id = int(request.POST['versionId']) comments = get_comments(request, -1, -1, "start_word", version_id = version_id) comments_starts = {} comments_ends = {} for comment in comments : starts = comments_starts.setdefault(comment.start_word, []) starts.append(comment) comment_count = 1 sortedstarts = comments_starts.keys() sortedstarts.sort() for start in sortedstarts : ss = "" for comment in comments_starts[start] : ss = "%s[%s>"%(ss, comment_count) ends = comments_ends.setdefault(comment.end_word, []) ends.append(comment_count) comment_count = comment_count + 1 start_span = spanned_soup.find(id="w_%s"%start) start_span.contents[0].replaceWith(u"%s%s"%(ss, start_span.contents[0])) for end in comments_ends.keys() : ss = "" comments_ends[end].sort() for ind in comments_ends[end] : #.reverse(): ss = "<%s]%s"%(ind , ss) end_span = spanned_soup.find(id="w_%s"%end) end_span.contents[0].replaceWith(u"%s%s"%(end_span.contents[0], ss)) export_colorize_comment(comments, spanned_soup) # REQUESTING COMMENTS REPLIES discs = get_discussion_items(request, [comment.id for comment in comments], version_id = version.id) # ADDING COMMENTS CONTENT output = unicode(spanned_soup) comment_count = 1 can_manage_comment = user_has_perm_on_text(request.user, 'can_manage_comment_local_text', version.text_id) ss=[] for start in sortedstarts : for comment in comments_starts[start] : ss.append("<hr />") ss.append(format_attach(comment, can_manage_comment, "","[%s] "%comment_count)) di_count = 1 for di in discs.get(comment.id,[]) : ss.append("<br /><br />") ss.append(format_attach(di, can_manage_comment, " ", "[%s.%s] "%(comment_count, di_count))) di_count = di_count + 1 comment_count = comment_count + 1 output = output + ''.join(ss) return output
def compute_word_list(versionContent): spannifier = Spannifier() soup = spannifier.get_the_soup(versionContent) word_list = spannifier.word_list(soup) return word_list
def test_beautifulsoup_preserve_spanned_whitespaces(self): spannifier = Spannifier() test = unicode("<span> </span>") self.assertEqual(test, unicode(spannifier.get_the_soup(test)))