示例#1
0
def clean_document(node):
    """Clean up the final document we return as the readable article"""
    if node is None or len(node) == 0:
        return

    LNODE.log(node, 2, "Processing doc")
    clean_list = ['object', 'h1']
    to_drop = []

    # If there is only one h2, they are probably using it as a header and
    # not a subheader, so remove it since we already have a header.
    if len(node.findall('.//h2')) == 1:
        LOG.debug('Adding H2 to list of nodes to clean.')
        clean_list.append('h2')

    for n in node.iter():
        LNODE.log(n, 2, "Cleaning iter node")
        # clean out any in-line style properties
        if 'style' in n.attrib:
            n.set('style', '')

        # remove all of the following tags
        # Clean a node of all elements of type "tag".
        # (Unless it's a youtube/vimeo video. People love movies.)
        is_embed = True if n.tag in ['object', 'embed'] else False
        if n.tag in clean_list:
            allow = False

            # Allow youtube and vimeo videos through as people usually
            # want to see those.
            if is_embed:
                if ok_embedded_video(n):
                    allow = True

            if not allow:
                LNODE.log(n, 2, "Dropping Node")
                to_drop.append(n)

        if n.tag in ['h1', 'h2', 'h3', 'h4']:
            # clean headings
            # if the heading has no css weight or a high link density,
            # remove it
            if get_class_weight(n) < 0 or get_link_density(n) > .33:
                LNODE.log(n, 2, "Dropping <hX>, it's insignificant")
                to_drop.append(n)

        # clean out extra <p>
        if n.tag == 'p':
            # if the p has no children and has no content...well then down
            # with it.
            if not n.getchildren() and len(n.text_content()) < 5:
                LNODE.log(n, 2, 'Dropping extra <p>')
                to_drop.append(n)

        # finally try out the conditional cleaning of the target node
        if clean_conditionally(n):
            to_drop.append(n)

    [n.drop_tree() for n in to_drop if n.getparent() is not None]
    return node
示例#2
0
 def hash_id(self):
     content = tounicode(self.node)
     hashed = md5()
     try:
         hashed.update(content.encode('utf-8', errors="replace"))
     except Exception, e:
         LOG.error("BOOM! " + str(e))
示例#3
0
def clean_document(node):
    """Clean up the final document we return as the readable article"""
    if node is None or len(node) == 0:
        return

    LNODE.log(node, 2, "Processing doc")
    clean_list = ['object', 'h1']
    to_drop = []

    # If there is only one h2, they are probably using it as a header and
    # not a subheader, so remove it since we already have a header.
    if len(node.findall('.//h2')) == 1:
        LOG.debug('Adding H2 to list of nodes to clean.')
        clean_list.append('h2')

    for n in node.iter():
        LNODE.log(n, 2, "Cleaning iter node")
        # clean out any in-line style properties
        if 'style' in n.attrib:
            n.set('style', '')

        # remove all of the following tags
        # Clean a node of all elements of type "tag".
        # (Unless it's a youtube/vimeo video. People love movies.)
        is_embed = True if n.tag in ['object', 'embed'] else False
        if n.tag in clean_list:
            allow = False

            # Allow youtube and vimeo videos through as people usually
            # want to see those.
            if is_embed:
                if ok_embedded_video(n):
                    allow = True

            if not allow:
                LNODE.log(n, 2, "Dropping Node")
                to_drop.append(n)

        if n.tag in ['h1', 'h2', 'h3', 'h4']:
            # clean headings
            # if the heading has no css weight or a high link density,
            # remove it
            if get_class_weight(n) < 0 or get_link_density(n) > .33:
                LNODE.log(n, 2, "Dropping <hX>, it's insignificant")
                to_drop.append(n)

        # clean out extra <p>
        if n.tag == 'p':
            # if the p has no children and has no content...well then down
            # with it.
            if not n.getchildren() and len(n.text_content()) < 5:
                LNODE.log(n, 2, 'Dropping extra <p>')
                to_drop.append(n)

        # finally try out the conditional cleaning of the target node
        if clean_conditionally(n):
            to_drop.append(n)

    [n.drop_tree() for n in to_drop if n.getparent() is not None]
    return node
示例#4
0
 def _parse(self, html):
     """Generate an lxml document from our html."""
     html = replace_multi_br_to_paragraphs(html)
     doc = build_doc(html)
     # doc = html_cleaner.clean_html(doc)
     base_href = self.url
     if base_href:
         LOG.debug('Making links absolute')
         doc.make_links_absolute(base_href, resolve_base_href=True)
     else:
         doc.resolve_base_href()
     return doc
示例#5
0
    def __init__(self, html, url=None, fragment=True):
        """Create the Article we're going to use.

        :param html: The string of html we're going to parse.
        :param url: The url so we can adjust the links to still work.
        :param fragment: Should we return a <div> fragment or a full <html>
        doc.

        """
        LOG.debug('Url: ' + str(url))
        self.orig = OriginalDocument(html, url=url)
        self.fragment = fragment
示例#6
0
def generate_hash_id(node):
    """Generate a hash_id for the node in question.

    :param node: lxml etree node

    """
    content = tounicode(node)
    hashed = md5()
    try:
        hashed.update(content.encode('utf-8', "replace"))
    except Exception, e:
        LOG.error("BOOM! " + str(e))
示例#7
0
    def __init__(self, html, url=None, fragment=True):
        """Create the Article we're going to use.

        :param html: The string of html we're going to parse.
        :param url: The url so we can adjust the links to still work.
        :param fragment: Should we return a <div> fragment or a full <html>
        doc.

        """
        LOG.debug('Url: ' + str(url))
        self.orig = OriginalDocument(html, url=url)
        self.fragment = fragment
示例#8
0
    def _parse(self, html):
        """Generate an lxml document from our html."""
        html = replace_multi_br_to_paragraphs(html)
        doc = build_doc(html)

        # doc = html_cleaner.clean_html(doc)
        base_href = self.url
        if base_href:
            LOG.debug('Making links absolute')
            doc.make_links_absolute(base_href, resolve_base_href=True)
        else:
            doc.resolve_base_href()
        return doc
示例#9
0
def build_doc(page):
    """Requires that the `page` not be None"""
    if page is None:
        LOG.error("Page content is None, can't build_doc")
        return ''
    if isinstance(page, unicode):
        page_unicode = page
    else:
        enc = get_encoding(page)
        page_unicode = page.decode(enc, 'replace')
    doc = document_fromstring(
        page_unicode.encode('utf-8', 'replace'),
        parser=utf8_parser)
    return doc
示例#10
0
def generate_hash_id(node):
    """Generate a hash_id for the node in question.

    :param node: lxml etree node

    """
    content = tounicode(node)
    hashed = md5()
    try:
        hashed.update(content.encode('utf-8', "replace"))
    except Exception as e:
        LOG.error("BOOM! " + str(e))

    return hashed.hexdigest()[0:8]
示例#11
0
    def _handle_no_candidates(self):
        """If we fail to find a good candidate we need to find something else."""
        # since we've not found a good candidate we're should help this
        if self.doc is not None and len(self.doc):
            # cleanup by removing the should_drop we spotted.
            [n.drop_tree() for n in self._should_drop
                if n.getparent() is not None]
            doc = prep_article(self.doc)
            doc = build_base_document(doc, self.fragment)
        else:
            LOG.warning('No document to use.')
            doc = build_error_document(self.fragment)

        return doc
示例#12
0
    def _handle_no_candidates(self):
        """If we fail to find a good candidate we need to find something else."""
        # since we've not found a good candidate we're should help this
        if self.doc is not None and len(self.doc):
            # cleanup by removing the should_drop we spotted.
            [
                n.drop_tree() for n in self._should_drop
                if n.getparent() is not None
            ]
            doc = prep_article(self.doc)
            doc = build_base_document(doc, self.fragment)
        else:
            LOG.warning('No document to use.')
            doc = build_error_document(self.fragment)

        return doc
示例#13
0
def build_doc(page):
    """Requires that the `page` not be None"""
    if page is None:
        LOG.error("Page content is None, can't build_doc")
        return ""
    if isinstance(page, unicode):
        page_unicode = page
    else:
        enc = get_encoding(page)
        page_unicode = page.decode(enc, "replace")
    try:
        doc = document_fromstring(page_unicode.encode("utf-8", "replace"), parser=utf8_parser)
        return doc
    except XMLSyntaxError, exc:
        LOG.error("Failed to parse: " + str(exc))
        raise ValueError("Failed to parse document contents.")
示例#14
0
def build_doc(page):
    """Requires that the `page` not be None"""
    if page is None:
        LOG.error("Page content is None, can't build_doc")
        return ''
    if isinstance(page, str):
        page_unicode = page
    else:
        enc = get_encoding(page)
        page_unicode = page.decode(enc, 'replace')
    try:
        doc = document_fromstring(
            page_unicode.encode('utf-8', 'replace'),
            parser=utf8_parser)
        return doc
    except XMLSyntaxError as exc:
        LOG.error('Failed to parse: ' + str(exc))
        raise ValueError('Failed to parse document contents.')
示例#15
0
def find_candidates(doc):
    """Find cadidate nodes for the readable version of the article.

    Here's we're going to remove unlikely nodes, find scores on the rest, and
    clean up and return the final best match.

    """
    scorable_node_tags = SCORABLE_TAGS
    nodes_to_score = []
    should_remove = []

    for node in doc.iter():
        if is_unlikely_node(node):
            LOG.debug("We should drop unlikely: " + str(node))
            should_remove.append(node)
            continue
        if node.tag in scorable_node_tags and node not in nodes_to_score:
            nodes_to_score.append(node)
    return score_candidates(nodes_to_score), should_remove
示例#16
0
def main():
    args = parse_args()

    if args.verbose:
        set_logging_level('DEBUG')

    if args.debug:
        LNODE.activate()

    target = args.path[0]
    LOG.debug("Target: " + target)

    if target.startswith('http') or target.startswith('www'):
        is_url = True
        url = target
    else:
        is_url = False
        url = None

    if is_url:
        req = urllib.urlopen(target)
        content = req.read()
        ucontent = unicode(content, 'utf-8')
    else:
        ucontent = codecs.open(target, "r", "utf-8").read()

    doc = Article(ucontent, url=url, fragment=args.fragment)
    if args.browser:
        fg, pathname = mkstemp(suffix='.html')
        out = codecs.open(pathname, 'w', 'utf-8')
        out.write(doc.readable)
        out.close()
        webbrowser.open(pathname)
    else:
        # Wrap sys.stdout into a StreamWriter to allow writing unicode.
        sys.stdout = codecs.getwriter(
                        locale.getpreferredencoding())(sys.stdout)
        sys.stdout.write(doc.readable)
示例#17
0
def find_candidates(doc):
    """Find cadidate nodes for the readable version of the article.

    Here's we're going to remove unlikely nodes, find scores on the rest, and
    clean up and return the final best match.

    """
    scorable_node_tags = SCORABLE_TAGS
    nodes_to_score = []
    should_remove = []

    for node in doc.iter():
        if is_unlikely_node(node):
            LOG.debug('We should drop unlikely: ' + str(node))
            should_remove.append(node)
            continue
        if node.tag == 'a' and is_bad_link(node):
            LOG.debug('We should drop bad link: ' + str(node))
            should_remove.append(node)
            continue
        if node.tag in scorable_node_tags and node not in nodes_to_score:
            nodes_to_score.append(node)
    return score_candidates(nodes_to_score), should_remove
示例#18
0
    def _readable(self):
        """The readable parsed article"""
        if self.candidates:
            LOG.debug('Candidates found:')
            pp = PrettyPrinter(indent=2)

            # cleanup by removing the should_drop we spotted.
            [n.drop_tree() for n in self._should_drop
                if n.getparent() is not None]

            # right now we return the highest scoring candidate content
            by_score = sorted([c for c in self.candidates.values()],
                key=attrgetter('content_score'), reverse=True)
            LOG.debug(pp.pformat(by_score))

            # since we have several candidates, check the winner's siblings
            # for extra content
            winner = by_score[0]
            LOG.debug('Selected winning node: ' + str(winner))
            updated_winner = check_siblings(winner, self.candidates)
            LOG.debug('Begin final prep of article')
            updated_winner.node = prep_article(updated_winner.node)
            if updated_winner.node is not None:
                doc = build_base_document(updated_winner.node, self.fragment)
            else:
                LOG.warning('Had candidates but failed to find a cleaned winning doc.')
                doc = self._handle_no_candidates()
        else:
            LOG.warning('No candidates found: using document.')
            LOG.debug('Begin final prep of article')
            doc = self._handle_no_candidates()

        return doc
示例#19
0
    def _readable(self):
        """The readable parsed article"""
        doc = self.orig.html
        # cleaning doesn't return, just wipes in place
        html_cleaner(doc)
        doc = drop_tag(doc, "noscript", "iframe")
        doc = transform_misused_divs_into_paragraphs(doc)
        candidates, should_drop = find_candidates(doc)

        if candidates:
            LOG.debug("Candidates found:")
            pp = PrettyPrinter(indent=2)

            # right now we return the highest scoring candidate content
            by_score = sorted([c for c in candidates.values()], key=attrgetter("content_score"), reverse=True)
            LOG.debug(pp.pformat(by_score))

            # since we have several candidates, check the winner's siblings
            # for extra content
            winner = by_score[0]
            LOG.debug("Selected winning node: " + str(winner))
            updated_winner = check_siblings(winner, candidates)
            LOG.debug("Begin final prep of article")
            updated_winner.node = prep_article(updated_winner.node)
            doc = build_base_document(updated_winner.node, self.fragment)
        else:
            LOG.warning("No candidates found: using document.")
            LOG.debug("Begin final prep of article")
            # since we've not found a good candidate we're should help this
            # cleanup by removing the should_drop we spotted.
            [n.drop_tree() for n in should_drop]
            doc = prep_article(doc)
            doc = build_base_document(doc, self.fragment)

        return doc
示例#20
0
def clean_conditionally(node):
    """Remove the clean_el if it looks like bad content based on rules."""
    target_tags = ['form', 'table', 'ul', 'div', 'p']

    LNODE.log(node, 2, 'Cleaning conditionally node.')

    if node.tag not in target_tags:
        # this is not the tag you're looking for
        LNODE.log(node, 2, 'Node cleared.')
        return

    weight = get_class_weight(node)
    # content_score = LOOK up the content score for this node we found
    # before else default to 0
    content_score = 0

    if (weight + content_score < 0):
        LNODE.log(node, 2, 'Dropping conditional node')
        LNODE.log(node, 2, 'Weight + score < 0')
        return True

    if node.text_content().count(',') < 10:
        LOG.debug("There aren't 10 ,s so we're processing more")

        # If there are not very many commas, and the number of
        # non-paragraph elements is more than paragraphs or other ominous
        # signs, remove the element.
        p = len(node.findall('.//p'))
        img = len(node.findall('.//img'))
        li = len(node.findall('.//li')) - 100
        inputs = len(node.findall('.//input'))

        embed = 0
        embeds = node.findall('.//embed')
        for e in embeds:
            if ok_embedded_video(e):
                embed += 1
        link_density = get_link_density(node)
        content_length = len(node.text_content())

        remove_node = False

        if li > p and node.tag != 'ul' and node.tag != 'ol':
            LNODE.log(node, 2, 'Conditional drop: li > p and not ul/ol')
            remove_node = True
        elif inputs > p / 3.0:
            LNODE.log(node, 2, 'Conditional drop: inputs > p/3.0')
            remove_node = True
        elif content_length < 25 and (img == 0 or img > 2):
            LNODE.log(node, 2,
                'Conditional drop: len < 25 and 0/>2 images')
            remove_node = True
        elif weight < 25 and link_density > 0.2:
            LNODE.log(node, 2,
                'Conditional drop: weight small and link is dense')
            remove_node = True
        elif weight >= 25 and link_density > 0.5:
            LNODE.log(node, 2,
                'Conditional drop: weight big but link heavy')
            remove_node = True
        elif (embed == 1 and content_length < 75) or embed > 1:
            LNODE.log(node, 2,
                'Conditional drop: embed w/o much content or many embed')
            remove_node = True

        if remove_node:
            LNODE.log(node, 2, 'Node will be removed')
        else:
            LNODE.log(node, 2, 'Node cleared')
        return remove_node

    # nope, don't remove anything
    LNODE.log(node, 2, 'Node Cleared final.')
    return False
示例#21
0
    def clean_document(node):
        """Clean up the final document we return as the readable article"""
        LOG.debug("Cleaning document")
        clean_list = ["object", "h1"]

        # If there is only one h2, they are probably using it as a header and
        # not a subheader, so remove it since we already have a header.
        if len(node.findall(".//h2")) == 1:
            LOG.debug("Adding H2 to list of nodes to clean.")
            clean_list.append("h2")

        for n in node.iter():
            # clean out any incline style properties
            if "style" in n.attrib:
                n.set("style", "")

            # remove all of the following tags
            # Clean a node of all elements of type "tag".
            # (Unless it's a youtube/vimeo video. People love movies.)
            is_embed = True if n.tag in ["object", "embed"] else False
            if n.tag in clean_list:
                allow = False

                # Allow youtube and vimeo videos through as people usually
                # want to see those.
                if is_embed:
                    if ok_embedded_video(n):
                        allow = True

                if not allow:
                    LNODE.log(n, 2, "Dropping Node")
                    n.drop_tree()
                    # go on with next loop, this guy is gone
                    continue

            if n.tag in ["h1", "h2", "h3", "h4"]:
                # clean headings
                # if the heading has no css weight or a high link density,
                # remove it
                if get_class_weight(n) < 0 or get_link_density(n) > 0.33:
                    # for some reason we get nodes here without a parent
                    if n.getparent() is not None:
                        LNODE.log(n, 2, "Dropping <hX>, it's insignificant")
                        n.drop_tree()
                        # go on with next loop, this guy is gone
                        continue

            # clean out extra <p>
            if n.tag == "p":
                # if the p has no children and has no content...well then down
                # with it.
                if not n.getchildren() and len(n.text_content()) < 5:
                    LNODE.log(n, 2, "Dropping extra <p>")
                    n.drop_tree()
                    # go on with next loop, this guy is gone
                    continue

            # finally try out the conditional cleaning of the target node
            if clean_conditionally(n):
                # For some reason the parent is none so we can't drop, we're
                # not in a tree that can take dropping this node.
                if n.getparent() is not None:
                    n.drop_tree()

        return node
示例#22
0
def replace_multi_br_to_paragraphs(html):
    """Convert multiple <br>s into paragraphs"""
    LOG.debug('Replacing multiple <br/> to <p>')
    rep = re.compile("(<br[^>]*>[ \n\r\t]*){2,}", re.I)
    return rep.sub('</p><p>', html)
示例#23
0
def clean_conditionally(node):
    """Remove the clean_el if it looks like bad content based on rules."""
    target_tags = ['form', 'table', 'ul', 'div', 'p']

    LNODE.log(node, 2, 'Cleaning conditionally node.')

    if node.tag not in target_tags:
        # this is not the tag you're looking for
        LNODE.log(node, 2, 'Node cleared.')
        return

    weight = get_class_weight(node)
    # content_score = LOOK up the content score for this node we found
    # before else default to 0
    content_score = 0

    if (weight + content_score < 0):
        LNODE.log(node, 2, 'Dropping conditional node')
        LNODE.log(node, 2, 'Weight + score < 0')
        return True

    if node.text_content().count(',') < 10:
        LOG.debug("There aren't 10 ,s so we're processing more")

        # If there are not very many commas, and the number of
        # non-paragraph elements is more than paragraphs or other ominous
        # signs, remove the element.
        p = len(node.findall('.//p'))
        img = len(node.findall('.//img'))
        li = len(node.findall('.//li')) - 100
        inputs = len(node.findall('.//input'))

        embed = 0
        embeds = node.findall('.//embed')
        for e in embeds:
            if ok_embedded_video(e):
                embed += 1
        link_density = get_link_density(node)
        content_length = len(node.text_content())

        remove_node = False

        if li > p and node.tag != 'ul' and node.tag != 'ol':
            LNODE.log(node, 2, 'Conditional drop: li > p and not ul/ol')
            remove_node = True
        elif inputs > p / 3.0:
            LNODE.log(node, 2, 'Conditional drop: inputs > p/3.0')
            remove_node = True
        elif content_length < 25 and (img == 0 or img > 2):
            LNODE.log(node, 2, 'Conditional drop: len < 25 and 0/>2 images')
            remove_node = True
        elif weight < 25 and link_density > 0.2:
            LNODE.log(node, 2,
                      'Conditional drop: weight small and link is dense')
            remove_node = True
        elif weight >= 25 and link_density > 0.5:
            LNODE.log(node, 2, 'Conditional drop: weight big but link heavy')
            remove_node = True
        elif (embed == 1 and content_length < 75) or embed > 1:
            LNODE.log(
                node, 2,
                'Conditional drop: embed w/o much content or many embed')
            remove_node = True

        if remove_node:
            LNODE.log(node, 2, 'Node will be removed')
        else:
            LNODE.log(node, 2, 'Node cleared')
        return remove_node

    # nope, don't remove anything
    LNODE.log(node, 2, 'Node Cleared final.')
    return False
示例#24
0
    def _readable(self):
        """The readable parsed article"""
        if self.candidates:
            LOG.debug('Candidates found:')
            pp = PrettyPrinter(indent=2)

            # cleanup by removing the should_drop we spotted.
            [
                n.drop_tree() for n in self._should_drop
                if n.getparent() is not None
            ]

            # right now we return the highest scoring candidate content
            by_score = sorted([c for c in self.candidates.values()],
                              key=attrgetter('content_score'),
                              reverse=True)
            LOG.debug(pp.pformat(by_score))

            # since we have several candidates, check the winner's siblings
            # for extra content
            winner = by_score[0]
            LOG.debug('Selected winning node: ' + str(winner))
            updated_winner = check_siblings(winner, self.candidates)
            LOG.debug('Begin final prep of article')
            updated_winner.node = prep_article(updated_winner.node)
            if updated_winner.node is not None:
                doc = build_base_document(updated_winner.node, self.fragment)
            else:
                LOG.warning(
                    'Had candidates but failed to find a cleaned winning doc.')
                doc = self._handle_no_candidates()
        else:
            LOG.warning('No candidates found: using document.')
            LOG.debug('Begin final prep of article')
            doc = self._handle_no_candidates()

        return doc
示例#25
0
def replace_multi_br_to_paragraphs(html):
    """Convert multiple <br>s into paragraphs"""
    LOG.debug('Replacing multiple <br/> to <p>')
    rep = re.compile("(<br[^>]*>[ \n\r\t]*){2,}", re.I)
    return rep.sub('</p><p>', html)
示例#26
0
    def clean_conditionally(node):
        """Remove the clean_el if it looks like bad content based on rules."""
        target_tags = ["form", "table", "ul", "div", "p"]

        if node.tag not in target_tags:
            # this is not the tag you're looking for
            return

        weight = get_class_weight(node)
        # content_score = LOOK up the content score for this node we found
        # before else default to 0
        content_score = 0

        if weight + content_score < 0:
            LNODE.log(node, 2, "Dropping conditional node")
            return True

        if node.text_content().count(",") < 10:
            LOG.debug("There aren't 10 ,s so we're processing more")

            # If there are not very many commas, and the number of
            # non-paragraph elements is more than paragraphs or other ominous
            # signs, remove the element.
            p = len(node.findall(".//p"))
            img = len(node.findall(".//img"))
            li = len(node.findall(".//li")) - 100
            inputs = len(node.findall(".//input"))

            embed = 0
            embeds = node.findall(".//embed")
            for e in embeds:
                if ok_embedded_video(e):
                    embed += 1
            link_density = get_link_density(node)
            content_length = len(node.text_content())

            remove_node = False

            if img > p:
                # this one has shown to do some extra image removals.
                # we could get around this by checking for caption info in the
                # images to try to do some scoring of good v. bad images.
                # failing example:
                # arstechnica.com/science/news/2012/05/1859s-great-auroral-stormthe-week-the-sun-touched-the-earth.ars
                LNODE.log(node, 2, "Conditional drop: img > p")
                remove_node = True
            elif li > p and node.tag != "ul" and node.tag != "ol":
                LNODE.log(node, 2, "Conditional drop: li > p and not ul/ol")
                remove_node = True
            elif inputs > p / 3.0:
                LNODE.log(node, 2, "Conditional drop: inputs > p/3.0")
                remove_node = True
            elif content_length < 25 and (img == 0 or img > 2):
                LNODE.log(node, 2, "Conditional drop: len < 25 and 0/>2 images")
                remove_node = True
            elif weight < 25 and link_density > 0.2:
                LNODE.log(node, 2, "Conditional drop: weight small and link is dense")
                remove_node = True
            elif weight >= 25 and link_density > 0.5:
                LNODE.log(node, 2, "Conditional drop: weight big but link heavy")
                remove_node = True
            elif (embed == 1 and content_length < 75) or embed > 1:
                LNODE.log(node, 2, "Conditional drop: embed without much content or many embed")
                remove_node = True
            return remove_node

        # nope, don't remove anything
        return False