def update_from_dict(self, dt, default_thumb=None): self.title = dt.get('title',None) self.published = dt.get('published',None) self.content = dt.get('content',None) self.sticky = dt.get('sticky',None) self.can_comment = dt.get('can_comment',None) self.page_id = dt.get('page_id',None) self.metacontent = html.striphtml(self.content) self.summary = html.stripobjects(self.content) media_data = html.get_metamedia_data(self.content, default_thumb) self.links = media_data.get('link',{}) self.metatags = media_data.get('meta',{}) self.tags.clear() for label in dt.get('tags',[]): tag = Tag.get_from_id(slugify(label)) or Tag(label=label) self.tags.add(tag) self.updated = rfc3339.now().replace(tzinfo=None)
def from_dict(cls, dt, default_thumb=None): item = cls() item.title = dt.get('title',None) item.published = dt.get('published',None) item.content = dt.get('content',None) item.sticky = dt.get('sticky',None) item.can_comment = dt.get('can_comment',None) item.page_id = dt.get('page_id',None) item.contributors = [] item.metacontent = html.striphtml(item.content) item.summary = html.stripobjects(item.content) media_data = html.get_metamedia_data(item.content, default_thumb) item.links = media_data.get('link',{}) item.metatags = media_data.get('meta',{}) for label in dt.get('tags',[]): tag = Tag.get_from_id(slugify(label)) or Tag(label=label) item.tags.add(tag) item.updated = rfc3339.now().replace(tzinfo=None) if item.created is None: item.created = item.updated return item
def test_basic(self): # make sure the shuffled sequence does not lose any elements response = striphtml(self.content) self.assertEqual(response, self.response)
def test_blank(self): # make sure the shuffled sequence does not lose any elements response = striphtml(13) self.assertEqual(response, u'')
def pingback_ping(self, sourceURI, targetURI): try: doc = urlopen(sourceURI) except (HTTPError, URLError): return Fault(16, "The source URI does not exist.") # does the source refer to the target? soup = BeautifulSoup(doc.read()) mylink = soup.find("a", attrs={"href": targetURI}) if not mylink: return Fault( 17, "The source URI does not contain a link to the target URI, and so cannot be used as a source." ) # grab the title of the pingback source title = soup.find("title") if title: title = html.striphtml(unicode(title)) else: title = "Unknown title" # extract the text around the incoming link content = unicode(mylink.findParent()) i = content.index(unicode(mylink)) content = html.striphtml(content) max_length = config.get("PINGBACK_RESPONSE_LENGTH", 200) if len(content) > max_length: start = i - max_length / 2 if start < 0: start = 0 end = i + len(unicode(mylink)) + max_length / 2 if end > len(content): end = len(content) content = content[start:end] scheme, server, path, query, fragment = urlsplit(targetURI) # check if the target is valid target if request.headers["SERVER_NAME"] not in [server, server.split(":")[0]]: return Fault( 33, "The specified target URI cannot be used as a target. It either doesn't exist, or it is not a pingback-enabled resource.", ) route = config["routes.map"].match(path) try: article = meta.Session.query(Article).filter(Article.permalink == path).one() except: article = None if route is None or article is None: return Fault(32, "The specified target URI does not exist.") # Check if view accept pingbacks if route["controller"] not in ["blog"]: return Fault( 33, "The specified target URI cannot be used as a target. It either doesn't exist, or it is not a pingback-enabled resource.", ) pingbacks = ( meta.Session.query(Comment) .filter(Comment.article_id == article.id) .filter(Comment.is_pingback == True) .all() ) if any([x.author["uri"] == sourceURI for x in pingbacks]): return Fault(48, "The pingback has already been registered.") pb = Comment.from_dict( {"title": title.encode("utf-8"), "content": content.encode("utf-8"), "parent": article.id} ) pb.author = {"uri": sourceURI, "name": "", "email": None} pb.save() return "pingback from %s to %s saved" % (sourceURI, targetURI)