def __init__(self, parent):
        """
        Initialize
        """
        RenderableResource.__init__(self, parent)

        #Extract source and target langs from dublincore.xul
        xul = Path(self.config.xulDir).joinpath('templates', 'dublincore.xul')
        bs = BeautifulSoup(xul.text())
        self.langs = bs.find(id="dc_language")
        self.updateLocaleNames()
Example #2
0
def loadNodesIdevices(node, s):
    soup = BeautifulSoup(s)
    body = soup.find('body')
    if body:
        idevices = body.findAll(name='div', 
                attrs={'class' : re.compile('Idevice$') })
        if len(idevices) > 0:
            for i in idevices: 
                if i.attrMap['class']=="activityIdevice":
                    idevice = burstIdevice('Activity', i, node)
                elif i.attrMap['class']=="objectivesIdevice":
                    idevice = burstIdevice('Objectives', i, node)
                elif i.attrMap['class']=="preknowledgeIdevice":
                    idevice = burstIdevice('Preknowledge', i, node)
                elif i.attrMap['class']=="readingIdevice":
                    idevice = burstIdevice('Reading Activity', i, node)
                elif i.attrMap['class']=="RssIdevice":
                    idevice = burstIdevice('RSS', i, node)
                elif i.attrMap['class']=="WikipediaIdevice":
                    idevice = burstIdevice('Wiki Article', i, node)
                elif i.attrMap['class']=="ReflectionIdevice":
                    idevice = burstIdevice('Reflection', i, node)
                elif i.attrMap['class']=="GalleryIdevice":
                    idevice = burstIdevice('Image Gallery', i, node)
                elif i.attrMap['class']=="ImageMagnifierIdevice":
                    idevice = burstIdevice('Image Magnifier', i, node)
                elif i.attrMap['class']=="AppletIdevice":
                    idevice = burstIdevice('Java Applet', i, node)
                elif i.attrMap['class']=="ExternalUrlIdevice":
                    idevice = burstIdevice('External Web Site', i, node)
                elif i.attrMap['class']=="ClozeIdevice":
                    idevice = burstIdevice('Cloze Activity', i, node)
                elif i.attrMap['class']=="FreeTextIdevice":
                    idevice = burstIdevice('Free Text', i, node)
                elif i.attrMap['class']=="CasestudyIdevice":
                    idevice = burstIdevice('Case Study', i, node)
                elif i.attrMap['class']=="MultichoiceIdevice":
                    idevice = burstIdevice('Multi-choice', i, node)
                elif i.attrMap['class']=="MultiSelectIdevice":
                    idevice = burstIdevice('Multi-select', i, node)
                elif i.attrMap['class']=="QuizTestIdevice":
                    idevice = burstIdevice('SCORM Quiz', i, node)
                elif i.attrMap['class']=="TrueFalseIdevice":
                    idevice = burstIdevice('True-False Question', i, node)
                else:
                    log.warn("unburstable idevice " + i.attrMap['class'] + 
                            "; bursting into Free Text")
                    idevice = burstIdevice('Free Text', i, node)
        else:
            log.warn("no idevices found on this node, bursting into Free Text.")
            idevice = burstIdevice('Free Text', i, node)
    else:
        log.warn("unable to read the body of this node.")
Example #3
0
    def parseAndImport(self, import_from_source=False):

        from exe.engine.beautifulsoup import BeautifulSoup

        fp = open(self.filename)
        bs = BeautifulSoup(fp.read().replace(CDATA_BEGIN, "").replace(CDATA_END, ""))
        fp.close()

        for transunit in bs.findAll("trans-unit"):
            item_id = transunit.get("id", None)
            if item_id is None:
                log.info("Item id not found: %s" % item_id)
                continue

            field = self.getFieldFromPackage(self.package, item_id)
            if field is None:
                log.info("Field not found: %s" % item_id)
                continue

            if import_from_source:
                tar = transunit.find("source")
            else:
                tar = transunit.find("target")

            if item_id.endswith("title"):
                # It's a idevice, set the title
                field.set_title(u" ".join([unicode(u) for u in tar.contents]))
                log.debug("Title set for: %s" % item_id)
            elif item_id.endswith("nodename"):
                # It's a node, set the title
                field.setTitle(u" ".join([unicode(u) for u in tar.contents]))
                log.debug("Title set for: %s" % item_id)
            else:
                # It's a field
                field.content_w_resourcePaths = u" ".join([unicode(u) for u in tar.contents])
                field.TwistedRePersist()
                log.debug("Content set for: %s" % item_id)

            self.package.isChanged = True
Example #4
0
    def parseAndImport(self, import_from_source=False):

        from exe.engine.beautifulsoup import BeautifulSoup
        fp = open(self.filename)
        bs = BeautifulSoup(fp.read())
        fp.close()
        
        for transunit in bs.findAll('trans-unit'):
            item_id = transunit.get('id', None)
            if item_id is None:
                log.info('Item id not found: %s' % item_id)
                continue

            field = self.getFieldFromPackage(self.package, item_id)
            if field is None:
                log.info('Field not found: %s' % item_id)
                continue

            if import_from_source:
                tar = transunit.find('source')
            else:
                tar = transunit.find('target')

            if item_id.endswith('title'):
                # It's a idevice, set the title
                field.set_title(u' '.join([unicode(u) for u in tar.contents]))
                log.debug('Title set for: %s' % item_id)
            elif item_id.endswith('nodename'):
                # It's a node, set the title
                field.setTitle(u' '.join([unicode(u) for u in tar.contents]))
                log.debug('Title set for: %s' % item_id)
            else:
                # It's a field
                field.content_w_resourcePaths = u' '.join([unicode(u) for u in tar.contents])\
.replace(CDATA_BEGIN, "").replace(CDATA_END, "")
                field.TwistedRePersist()
                log.debug('Content set for: %s' % item_id)
Example #5
0
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>eXe</title>
<style type="text/css">
@import url(eXe_manual.css);
</style>
</head>
<body>
"""
html_epilogue = """</body></html>
"""

base_url = "http://wikieducator.org/"
collection = urllib2.urlopen(url)
soup = BeautifulSoup(collection)

# make sure the URL is a collection, and not a "page not found" page
if not soup.find("span", {"class": "mw-headline"}):
    print "missing or malformed collection page"
    sys.exit()

collection_title = str(soup.find("span", {"class": "mw-headline"}).string).strip()

print "fetching manual..."
sys.stdout.flush()

for page in soup("dd"):
    if not page.a:
        continue
    print "  ", page.a.string,
Example #6
0
 def getAppletcodeDescartes(self, filename):
     """
     xhtml string for DescartesApplet
     """
     global SCENE_NUM
     html = ""
     if not filename.endswith(".jar"):
         if filename.endswith(".html") or filename.endswith(".htm"):
             from exe.engine.beautifulsoup import BeautifulSoup, BeautifulStoneSoup   
             import urllib2
             if filename.find(",") == -1:    
                 # firstly verify the URL is reachable, or come back:
                 if self.verifyConn(filename) == False:
                     assert self.parentNode.package, _('Sorry, this URL is unreachable') 
                     return
                 # filename is reachable, go on:                    
                 htmlbytes = urllib2.urlopen(filename)
             else:
                 if self.verifyConn(filename[2:]) == False:
                     return html == ''                   
                 htmlbytes = urllib2.urlopen(filename[2:])
             content = htmlbytes.read()
             # content = content.replace('""','"') Galo swears it won't be necessary
             soup = BeautifulSoup(content)
             i = 0
             appletslist = []
             for ap_old in soup.findAll("applet",{"code":"Descartes.class"}):
                 for resource in reversed(self.userResources):
                     if resource._storageName != ap_old["archive"]:
                         resource.delete()
                 global DESC_PLUGIN
                 DESC_PLUGIN = 0
                 ap_old["codebase"] = "./"
                 appletslist.append(ap_old)   
             for ap_new in soup.findAll("applet",{"code":"descinst.Descartes.class"}):
                 DESC_PLUGIN = 1
                 for resource in reversed(self.userResources):
                     if resource._storageName != 'descinst.jar':
                         resource.delete()
                 ap_new["codebase"] = "./"
                 appletslist.append(ap_new)
             for ap_supernew in soup.findAll("applet",{"code":"descinst.DescartesWeb2_0.class"}):
                 DESC_PLUGIN = 1
                 for resource in reversed(self.userResources):
                     if resource._storageName != 'descinst.jar':
                         resource.delete()
                 ap_supernew["codebase"] = "./"
                 appletslist.append(ap_supernew)
             # TO_DO sometimes applets are included in frame labels (no applets found in the url): 
             # it could begin...:
             # if appletslist == []: # because none <applet> was founded
             #    for ap_frame in soup.findAll("frame src"): # could be problems with that whitespace
             #        DESC_PLUGIN = 1
             #        for resource in reversed(self.userResources):
             #            if resource._storageName != 'descinst.jar':
             #                resource.delete()
             #        if ap_frame["codebase"]:
             #            ap_frame["codebase"] = "./"
             #        appletslist.append(ap_frame)                      
             
             # if none applet was found:
             if appletslist == []:
                 html == ''
                 
                 return html
             
             # finally:                  
             for x in appletslist:
                 u = ''
                 if i == SCENE_NUM -1:
                     u = unicode(x)
                     umod = self.downloadFiles(u)
                     break
                 i = i+1
             htmlbytes.close()
             html = umod
     # now html has the code of the applet for eXe:
     return html
Example #7
0
 def downloadFiles(self, stringapplet):
     """
     only for DescartesApplet initially; three jobs:
     1 look for image and macros files in the URL indicated by the user,
     2 modify applet code for a correct exe detection of them after this,
     3 download and store them into the exe project (absolute urls are required).
     Return the code modified.
     """
     from exe.engine.beautifulsoup import BeautifulSoup, BeautifulStoneSoup
     import re
     import urllib
     import urllib2
     import string
     import os
     # import urllib.request
     stringappletmod = stringapplet
     soup = BeautifulSoup(stringapplet)
     
     # ONE: image files:
     key_image = ['archivo=', 'imagem_de_fundo=', 'imagem=', 'imagen=', 'file=', 'fitxer=',
                          'artxibo=', 'image=', 'bg_image=', 'imatge=', 'immagine=', 'irudia=',
                          'irundia=', 'fichier=', 'imaxe=', 'arquivo=', 'immagine_fondo=']
     # paths to the images indicated in the applet code:
     imageslist = []
     for x in key_image:
         if string.find(stringapplet, x) != -1:
             expression = r"%s'([\w\./]+)'" % x
             patron = re.compile(expression)
             for tag in soup.findAll('param'):
                 result = patron.search(tag['value'])
                 if result:
                     if result.group(1) not in imageslist:
                         imageslist.append(result.group(1))
     # modify applet code:
     urlimageslist = []
     for im in imageslist: 
         # put as locals the images' path inside exe editor...
         stringappletmod = stringappletmod.replace(im,im[im.rfind("/")+1:]) 
         # from imageslist, it's neccesary to create the list of absolute paths to the image
         # files because we want to download this images and load them in the project:
         # first quit scene number
         urlnoesc = url[url.find(",")+1:]
         # cut the right side of the last /:
         urlcut = urlnoesc[: urlnoesc.rfind("/")]
         # and extend with the image from the applet code:
         urlimageslist.append(urlcut+"/"+im)
     # repeated no thanks:
     urlimageslist = list(set(urlimageslist))
     # do not forget that it could be image_down and image_over versions
     # of the file in the same place, so... a new extended list:
     urlimgslistextended = []
     for pathimg in urlimageslist:     
         # we trick to urlimageslist adding files that haven't been detected really 
         if pathimg not in urlimgslistextended:
             urlimgslistextended.append(pathimg)
             if string.find(pathimg, '.png') != -1:
                 urlimgslistextended.append(pathimg.replace('.png', '_down.png'))
                 urlimgslistextended.append(pathimg.replace('.png', '_over.png'))
             if string.find(pathimg, '.jpg') != -1:
                 urlimgslistextended.append(pathimg.replace('.jpg', '_down.jpg'))
                 urlimgslistextended.append(pathimg.replace('.jpg', '_over.jpg'))
             if string.find(pathimg, '.gif') != -1:
                 urlimgslistextended.append(pathimg.replace('.gif', '_down.gif')) 
                 urlimgslistextended.append(pathimg.replace('.gif', '_over.gif'))                
     urlimgslistextended = list(set(urlimgslistextended))
     # now we can: download all you can find:
     for pathimgext in urlimgslistextended:
         # the clean name of the image file
         img = pathimgext[pathimgext.rfind("/")+1:]                
         # firstly to test the existence of the file:
         try:
             resp = urllib2.urlopen(pathimgext)
         except urllib2.URLError, e:
             if not hasattr(e, "code"):
                 raise
             resp = e            
         try:
         # download whith its original name:                
             img_down = urllib.urlretrieve(pathimgext, img)
         except:
             print 'Unable to download file'           
         # be sure the file was found:
         if img_down[1].maintype == 'image':
             self.uploadFile(img_down[0])
         os.remove(img_down[0])
def loadNodesIdevices(node, s):
    soup = BeautifulSoup(s)
    body = soup.find("body")

    if body:
        idevices = body.findAll(name="div", attrs={"class": re.compile("Idevice$")})
        if len(idevices) > 0:
            for i in idevices:
                # WARNING: none of the idevices yet re-attach their media,
                # but they do attempt to re-attach images and other links.

                if i.attrMap["class"] == "activityIdevice":
                    idevice = burstIdevice("Activity", i, node)
                elif i.attrMap["class"] == "objectivesIdevice":
                    idevice = burstIdevice("Objectives", i, node)
                elif i.attrMap["class"] == "preknowledgeIdevice":
                    idevice = burstIdevice("Preknowledge", i, node)
                elif i.attrMap["class"] == "readingIdevice":
                    idevice = burstIdevice("Reading Activity", i, node)
                # the above are all Generic iDevices;
                # below are all others:
                elif i.attrMap["class"] == "RssIdevice":
                    idevice = burstIdevice("RSS", i, node)
                elif i.attrMap["class"] == "WikipediaIdevice":
                    # WARNING: Wiki problems loading images with accents, etc:
                    idevice = burstIdevice("Wiki Article", i, node)
                elif i.attrMap["class"] == "ReflectionIdevice":
                    idevice = burstIdevice("Reflection", i, node)
                elif i.attrMap["class"] == "GalleryIdevice":
                    # WARNING: Gallery problems with the popup html:
                    idevice = burstIdevice("Image Gallery", i, node)
                elif i.attrMap["class"] == "ImageMagnifierIdevice":
                    # WARNING: Magnifier missing major bursting components:
                    idevice = burstIdevice("Image Magnifier", i, node)
                elif i.attrMap["class"] == "AppletIdevice":
                    # WARNING: Applet missing file bursting components:
                    idevice = burstIdevice("Java Applet", i, node)
                elif i.attrMap["class"] == "ExternalUrlIdevice":
                    idevice = burstIdevice("External Web Site", i, node)
                elif i.attrMap["class"] == "ClozeIdevice":
                    idevice = burstIdevice("Cloze Activity", i, node)
                elif i.attrMap["class"] == "FreeTextIdevice":
                    idevice = burstIdevice("Free Text", i, node)
                elif i.attrMap["class"] == "CasestudyIdevice":
                    idevice = burstIdevice("Case Study", i, node)
                elif i.attrMap["class"] == "MultichoiceIdevice":
                    idevice = burstIdevice("Multi-choice", i, node)
                elif i.attrMap["class"] == "MultiSelectIdevice":
                    idevice = burstIdevice("Multi-select", i, node)
                elif i.attrMap["class"] == "QuizTestIdevice":
                    idevice = burstIdevice("SCORM Quiz", i, node)
                elif i.attrMap["class"] == "TrueFalseIdevice":
                    idevice = burstIdevice("True-False Question", i, node)
                else:
                    # NOTE: no custom idevices burst yet,
                    # nor any deprecated idevices. Just burst into a FreeText:
                    log.warn("unburstable idevice " + i.attrMap["class"] + "; bursting into Free Text")
                    idevice = burstIdevice("Free Text", i, node)

        else:
            # no idevices listed on this page,
            # just create a free-text for the entire page:
            log.warn("no idevices found on this node, bursting into Free Text.")
            idevice = burstIdevice("Free Text", i, node)

    else:
        log.warn("unable to read the body of this node.")
Example #9
0
def loadNodesIdevices(node, s):
    soup = BeautifulSoup(s)
    body = soup.find('body')

    if body:
        idevices = body.findAll(name='div', 
                attrs={'class' : re.compile('Idevice$') })
        if len(idevices) > 0:
            for i in idevices: 
                # WARNING: none of the idevices yet re-attach their media,
                # but they do attempt to re-attach images and other links.

                if i.attrMap['class']=="activityIdevice":
                    idevice = burstIdevice('Activity', i, node)
                elif i.attrMap['class']=="objectivesIdevice":
                    idevice = burstIdevice('Objectives', i, node)
                #added kthamm 111028 
                elif i.attrMap['class']=="devsummaryIdevice":
                    idevice = burstIdevice('Devsummary', i, node)
                elif i.attrMap['class']=="devpreviewIdevice":
                    idevice = burstIdevice('Devpreview', i, node)
                elif i.attrMap['class']=="devresourceIdevice":
                    idevice = burstIdevice('Devresource', i, node)
                elif i.attrMap['class']=="devdiscussionIdevice":
                    idevice = burstIdevice('Devdiscussion', i, node)
                #end added kthamm
                elif i.attrMap['class']=="preknowledgeIdevice":
                    idevice = burstIdevice('Preknowledge', i, node)
                elif i.attrMap['class']=="readingIdevice":
                    idevice = burstIdevice('Reading Activity', i, node)
                # the above are all Generic iDevices;
                # below are all others:
                elif i.attrMap['class']=="RssIdevice":
                    idevice = burstIdevice('RSS', i, node)
                elif i.attrMap['class']=="WikipediaIdevice":
                    # WARNING: Wiki problems loading images with accents, etc:
                    idevice = burstIdevice('Wiki Article', i, node)
                elif i.attrMap['class']=="ReflectionIdevice":
                    idevice = burstIdevice('Reflection', i, node)
                elif i.attrMap['class']=="GalleryIdevice":
                    # WARNING: Gallery problems with the popup html:
                    idevice = burstIdevice('Image Gallery', i, node)
                elif i.attrMap['class']=="ImageMagnifierIdevice":
                    # WARNING: Magnifier missing major bursting components:
                    idevice = burstIdevice('Image Magnifier', i, node)
                elif i.attrMap['class']=="AppletIdevice":
                    # WARNING: Applet missing file bursting components:
                    idevice = burstIdevice('Java Applet', i, node)
                elif i.attrMap['class']=="ExternalUrlIdevice":
                    idevice = burstIdevice('External Web Site', i, node)
                elif i.attrMap['class']=="ClozeIdevice":
                    idevice = burstIdevice('Cloze Activity', i, node)
                elif i.attrMap['class']=="FreeTextIdevice":
                    idevice = burstIdevice('Free Text', i, node)
                elif i.attrMap['class']=="CasestudyIdevice":
                    idevice = burstIdevice('Case Study', i, node)
                elif i.attrMap['class']=="MultichoiceIdevice":
                    idevice = burstIdevice('Multi-choice', i, node)
                elif i.attrMap['class']=="MultiSelectIdevice":
                    idevice = burstIdevice('Multi-select', i, node)
                elif i.attrMap['class']=="QuizTestIdevice":
                    idevice = burstIdevice('SCORM Quiz', i, node)
                elif i.attrMap['class']=="TrueFalseIdevice":
                    idevice = burstIdevice('True-False Question', i, node)
                else:
                    # NOTE: no custom idevices burst yet,
                    # nor any deprecated idevices. Just burst into a FreeText:
                    log.warn("unburstable idevice " + i.attrMap['class'] + 
                            "; bursting into Free Text")
                    idevice = burstIdevice('Free Text', i, node)

        else:
            # no idevices listed on this page,
            # just create a free-text for the entire page:
            log.warn("no idevices found on this node, bursting into Free Text.")
            idevice = burstIdevice('Free Text', i, node)

    else:
        log.warn("unable to read the body of this node.")
Example #10
0
html_prologue = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>eXe</title>
<style type="text/css">
@import url(eXe_manual.css);
</style>
</head>
<body>
'''
html_epilogue = '''</body></html>
'''
base_url = 'http://wikieducator.org/'
collection = urllib2.urlopen(url)
soup = BeautifulSoup(collection)
if not soup.find('span', {'class': 'mw-headline'}):
    print 'missing or malformed collection page'
    sys.exit()
collection_title = str(soup.find('span', {'class': 'mw-headline'}).string).strip()
print "fetching manual..."
sys.stdout.flush()
for page in soup('dd'):
    if not page.a:
        continue
    print '  ', page.a.string,
    sys.stdout.flush()
    page_url = url_join(base_url, page.a['href'])
    sys.stdout.flush()
    p1 = urllib2.urlopen(page_url)
    p1_soup = BeautifulSoup(p1)
Example #11
0
 def _computeLinks(self):
     self._computeRelpaths()
     htmls = self.resources['mimes']['text/html']
     total = len(htmls)
     i = 1
     for url in htmls:
         if self.cancel:
            return
         if self.client:
             self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Analyzing HTML file labels %d of %d: %s') % (i, total, str(url)))
         content = open(url.path).read()
         encoding = detect(content)['encoding']
         ucontent = unicode(content,encoding)
         soup = BeautifulSoup(ucontent,fromEncoding=encoding)
         declaredHTMLEncoding = getattr(soup, 'declaredHTMLEncoding')
         if declaredHTMLEncoding:
             ucontent = UnicodeDammit(content,[declaredHTMLEncoding]).unicode
             encoding = declaredHTMLEncoding
         else:
             pass
         url.setContent(ucontent,encoding)
         url.setSoup(soup)
         for tag in soup.findAll():
             if self.cancel:
                 return
             if not tag.attrs:
                 continue
             matches = []
             for key, value in tag.attrs:
                 if value == "":
                     continue
                 unq_value = unquote(value)
                 unq_low_value = unquote(value.lower())
                 for l, rl in self.resources['urls'][url.parentpath].relpaths:
                     low_rl = rl.lower()
                     if rl in unq_value:
                         L = Link(self.resources['urls'][l],rl,url,tag,key,rl)
                         matches.append(L)
                     elif low_rl in unq_value:
                         L = Link(self.resources['urls'][l],rl,url,tag,key,low_rl)
                         matches.append(L)
                     elif l in unq_value:
                         L = Link(self.resources['urls'][l],rl,url,tag,key,l)
                         matches.append(L)
             matches_final = []
             for l1 in matches:
                 matches_ = [ m for m in matches if m != l1 ]
                 found = False
                 for l2 in matches_:
                     if re.search(re.escape(l1.relative),l2.relative):
                         found = True
                 if not found:
                     matches_final.append(l1)
             if matches_final:
                 for match in matches_final:
                     url.addLink( match )
                     url.addRLink( str(match.url) )
         i += 1
     csss = self.resources['mimes']['text/css'] if 'text/css' in self.resources['mimes'].keys() else None
     csss_and_htmls = csss + htmls if csss else htmls
     total = len(csss_and_htmls)
     i = 1
     for url in csss_and_htmls:
         if self.cancel:
             return
         if url.mime == 'text/css':
             tipo = 'CSS'
         else:
             tipo = 'HTML'
         content = url.getContent()
         if not content:
             content = open(url.path).read()
             encoding = detect(content)['encoding']
             content = unicode(content,encoding)
             url.setContent(content,encoding)                
         if self.client:
             self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Exhaustively analyzed file %s %d of %d: %s') % (tipo, i, total, str(url)))
         matches = []
         for l, rl in self.resources['urls'][url.parentpath].relpaths:
             low_rl = rl.lower()
             if rl in content:
                 L = Link(self.resources['urls'][l],rl,url,match=rl)
                 matches.append(L)
             elif low_rl in content:
                 L = Link(self.resources['urls'][l],rl,url,match=low_rl)
                 matches.append(L)                    
         matches_final = []
         for l1 in matches:
             matches_ = [ m for m in matches if m != l1 ]
             found = False
             for l2 in matches_:
                 if re.search(re.escape(l1.relative),l2.relative):
                     found = True
             if not found:
                 matches_final.append(l1)
         if matches_final:
             for match in matches_final:
                 if not [ link for link in url.links if link.relative == match.relative ]:
                     url.addLink( match )
                     url.addRLink( str(match.url) )
         i += 1
Example #12
0
            page = FILE.read()
            FILE.close()
        except IOError, error:
            log.warning(unicode(error))
            self.article.content = _(u"Unable to read file: %s.") % path
            return

        page = u'<div>' + unicode(page, "utf8") + u'</div>'
        # FIXME avoid problems with numeric entities in attributes
        page = page.replace(u'&#160;', u'&nbsp;')

        # avoidParserProblems is set to False because BeautifulSoup's
        # cleanup was causing a "concatenating Null+Str" error,
        # and Wikipedia's HTML doesn't need cleaning up.
        # BeautifulSoup is faster this way too.
        soup = BeautifulSoup(page, False)
        content = soup.first('div')

        # remove the wiktionary, wikimedia commons, and categories boxes
        #  and the protected icon and the needs citations box
        if content:
            infoboxes = content.findAll('div',
                    {'class' : 'infobox sisterproject'})
            [infobox.extract() for infobox in infoboxes]
            catboxes = content.findAll('div', {'id' : 'catlinks'})
            [catbox.extract() for catbox in catboxes]
            amboxes = content.findAll('table',
                    {'class' : re.compile(r'.*\bambox\b.*')})
            [ambox.extract() for ambox in amboxes]
            protecteds = content.findAll('div', {'id' : 'protected-icon'})
            [protected.extract() for protected in protecteds]