コード例 #1
0
    def __init__(self, parent):
        """
        Initialize
        """
        RenderableResource.__init__(self, parent)

        #Extract source and target langs from dublincore.xul
        xul = Path(self.config.xulDir).joinpath('templates', 'dublincore.xul')
        bs = BeautifulSoup(xul.text())
        self.langs = bs.find(id="dc_language")
        self.updateLocaleNames()
コード例 #2
0
    def __init__(self, parent):
        """
        Initialize
        """
        RenderableResource.__init__(self, parent)

        #Extract source and target langs from dublincore.xul
        xul = self.config.webDir / 'templates' / 'dublincore.xul'
        bs = BeautifulSoup(xul.text())
        self.langs = bs.find(id="dc_language")
        self.updateLocaleNames()
コード例 #3
0
def loadNodesIdevices(node, s):
    soup = BeautifulSoup(s)
    body = soup.find('body')
    if body:
        idevices = body.findAll(name='div', 
                attrs={'class' : re.compile('Idevice$') })
        if len(idevices) > 0:
            for i in idevices: 
                if i.attrMap['class']=="activityIdevice":
                    idevice = burstIdevice('Activity', i, node)
                elif i.attrMap['class']=="objectivesIdevice":
                    idevice = burstIdevice('Objectives', i, node)
                elif i.attrMap['class']=="preknowledgeIdevice":
                    idevice = burstIdevice('Preknowledge', i, node)
                elif i.attrMap['class']=="readingIdevice":
                    idevice = burstIdevice('Reading Activity', i, node)
                elif i.attrMap['class']=="RssIdevice":
                    idevice = burstIdevice('RSS', i, node)
                elif i.attrMap['class']=="WikipediaIdevice":
                    idevice = burstIdevice('Wiki Article', i, node)
                elif i.attrMap['class']=="ReflectionIdevice":
                    idevice = burstIdevice('Reflection', i, node)
                elif i.attrMap['class']=="GalleryIdevice":
                    idevice = burstIdevice('Image Gallery', i, node)
                elif i.attrMap['class']=="ImageMagnifierIdevice":
                    idevice = burstIdevice('Image Magnifier', i, node)
                elif i.attrMap['class']=="AppletIdevice":
                    idevice = burstIdevice('Java Applet', i, node)
                elif i.attrMap['class']=="ExternalUrlIdevice":
                    idevice = burstIdevice('External Web Site', i, node)
                elif i.attrMap['class']=="ClozeIdevice":
                    idevice = burstIdevice('Cloze Activity', i, node)
                elif i.attrMap['class']=="FreeTextIdevice":
                    idevice = burstIdevice('Free Text', i, node)
                elif i.attrMap['class']=="CasestudyIdevice":
                    idevice = burstIdevice('Case Study', i, node)
                elif i.attrMap['class']=="MultichoiceIdevice":
                    idevice = burstIdevice('Multi-choice', i, node)
                elif i.attrMap['class']=="MultiSelectIdevice":
                    idevice = burstIdevice('Multi-select', i, node)
                elif i.attrMap['class']=="QuizTestIdevice":
                    idevice = burstIdevice('SCORM Quiz', i, node)
                elif i.attrMap['class']=="TrueFalseIdevice":
                    idevice = burstIdevice('True-False Question', i, node)
                else:
                    log.warn("unburstable idevice " + i.attrMap['class'] + 
                            "; bursting into Free Text")
                    idevice = burstIdevice('Free Text', i, node)
        else:
            log.warn("no idevices found on this node, bursting into Free Text.")
            idevice = burstIdevice('Free Text', i, node)
    else:
        log.warn("unable to read the body of this node.")
コード例 #4
0
ファイル: xliffimport.py プロジェクト: RichDijk/eXe
    def parseAndImport(self, import_from_source=False):

        from exe.engine.beautifulsoup import BeautifulSoup

        fp = open(self.filename)
        bs = BeautifulSoup(fp.read().replace(CDATA_BEGIN, "").replace(CDATA_END, ""))
        fp.close()

        for transunit in bs.findAll("trans-unit"):
            item_id = transunit.get("id", None)
            if item_id is None:
                log.info("Item id not found: %s" % item_id)
                continue

            field = self.getFieldFromPackage(self.package, item_id)
            if field is None:
                log.info("Field not found: %s" % item_id)
                continue

            if import_from_source:
                tar = transunit.find("source")
            else:
                tar = transunit.find("target")

            if item_id.endswith("title"):
                # It's a idevice, set the title
                field.set_title(u" ".join([unicode(u) for u in tar.contents]))
                log.debug("Title set for: %s" % item_id)
            elif item_id.endswith("nodename"):
                # It's a node, set the title
                field.setTitle(u" ".join([unicode(u) for u in tar.contents]))
                log.debug("Title set for: %s" % item_id)
            else:
                # It's a field
                field.content_w_resourcePaths = u" ".join([unicode(u) for u in tar.contents])
                field.TwistedRePersist()
                log.debug("Content set for: %s" % item_id)

            self.package.isChanged = True
コード例 #5
0
    def parseAndImport(self, import_from_source=False):

        from exe.engine.beautifulsoup import BeautifulSoup
        fp = open(self.filename)
        bs = BeautifulSoup(fp.read().replace(CDATA_BEGIN, "").replace(CDATA_END, ""))
        fp.close()
        
        for transunit in bs.findAll('trans-unit'):
            item_id = transunit.get('id', None)
            if item_id is None:
                log.info('Item id not found: %s' % item_id)
                continue

            field = self.getFieldFromPackage(self.package, item_id)
            if field is None:
                log.info('Field not found: %s' % item_id)
                continue

            if import_from_source:
                tar = transunit.find('source')
            else:
                tar = transunit.find('target')

            if item_id.endswith('title'):
                # It's a idevice, set the title
                field.set_title(u' '.join([unicode(u) for u in tar.contents]))
                log.debug('Title set for: %s' % item_id)
            elif item_id.endswith('nodename'):
                # It's a node, set the title
                field.setTitle(u' '.join([unicode(u) for u in tar.contents]))
                log.debug('Title set for: %s' % item_id)
            else:
                # It's a field
                field.content_w_resourcePaths = u' '.join([unicode(u) for u in tar.contents])
                field.TwistedRePersist()
                log.debug('Content set for: %s' % item_id)

            self.package.isChanged = True
コード例 #6
0
    def parseAndImport(self, import_from_source=False):

        from exe.engine.beautifulsoup import BeautifulSoup
        fp = open(self.filename)
        bs = BeautifulSoup(fp.read())
        fp.close()
        
        for transunit in bs.findAll('trans-unit'):
            item_id = transunit.get('id', None)
            if item_id is None:
                log.info('Item id not found: %s' % item_id)
                continue

            field = self.getFieldFromPackage(self.package, item_id)
            if field is None:
                log.info('Field not found: %s' % item_id)
                continue

            if import_from_source:
                tar = transunit.find('source')
            else:
                tar = transunit.find('target')

            if item_id.endswith('title'):
                # It's a idevice, set the title
                field.set_title(u' '.join([unicode(u) for u in tar.contents]))
                log.debug('Title set for: %s' % item_id)
            elif item_id.endswith('nodename'):
                # It's a node, set the title
                field.setTitle(u' '.join([unicode(u) for u in tar.contents]))
                log.debug('Title set for: %s' % item_id)
            else:
                # It's a field
                field.content_w_resourcePaths = u' '.join([unicode(u) for u in tar.contents])\
.replace(CDATA_BEGIN, "").replace(CDATA_END, "")
                field.TwistedRePersist()
                log.debug('Content set for: %s' % item_id)
コード例 #7
0
ファイル: appletidevice.py プロジェクト: RichDijk/eXe
 def getAppletcodeDescartes(self, filename):
     """
     xhtml string for DescartesApplet
     """
     global SCENE_NUM
     html = ""
     if not filename.endswith(".jar"):
         if filename.endswith(".html") or filename.endswith(".htm"):
             from exe.engine.beautifulsoup import BeautifulSoup, BeautifulStoneSoup   
             import urllib2
             if filename.find(",") == -1:    
                 # firstly verify the URL is reachable, or come back:
                 if self.verifyConn(filename) == False:
                     assert self.parentNode.package, _('Sorry, this URL is unreachable') 
                     return
                 # filename is reachable, go on:                    
                 htmlbytes = urllib2.urlopen(filename)
             else:
                 if self.verifyConn(filename[2:]) == False:
                     return html == ''                   
                 htmlbytes = urllib2.urlopen(filename[2:])
             content = htmlbytes.read()
             # content = content.replace('""','"') Galo swears it won't be necessary
             soup = BeautifulSoup(content)
             i = 0
             appletslist = []
             for ap_old in soup.findAll("applet",{"code":"Descartes.class"}):
                 for resource in reversed(self.userResources):
                     if resource._storageName != ap_old["archive"]:
                         resource.delete()
                 global DESC_PLUGIN
                 DESC_PLUGIN = 0
                 ap_old["codebase"] = "./"
                 appletslist.append(ap_old)   
             for ap_new in soup.findAll("applet",{"code":"descinst.Descartes.class"}):
                 DESC_PLUGIN = 1
                 for resource in reversed(self.userResources):
                     if resource._storageName != 'descinst.jar':
                         resource.delete()
                 ap_new["codebase"] = "./"
                 appletslist.append(ap_new)
             for ap_supernew in soup.findAll("applet",{"code":"descinst.DescartesWeb2_0.class"}):
                 DESC_PLUGIN = 1
                 for resource in reversed(self.userResources):
                     if resource._storageName != 'descinst.jar':
                         resource.delete()
                 ap_supernew["codebase"] = "./"
                 appletslist.append(ap_supernew)
             # TO_DO sometimes applets are included in frame labels (no applets found in the url): 
             # it could begin...:
             # if appletslist == []: # because none <applet> was founded
             #    for ap_frame in soup.findAll("frame src"): # could be problems with that whitespace
             #        DESC_PLUGIN = 1
             #        for resource in reversed(self.userResources):
             #            if resource._storageName != 'descinst.jar':
             #                resource.delete()
             #        if ap_frame["codebase"]:
             #            ap_frame["codebase"] = "./"
             #        appletslist.append(ap_frame)                      
             
             # if none applet was found:
             if appletslist == []:
                 html == ''
                 
                 return html
             
             # finally:                  
             for x in appletslist:
                 u = ''
                 if i == SCENE_NUM -1:
                     u = unicode(x)
                     umod = self.downloadFiles(u)
                     break
                 i = i+1
             htmlbytes.close()
             html = umod
     # now html has the code of the applet for eXe:
     return html
コード例 #8
0
 def downloadFiles(self, stringapplet):
     """
     only for DescartesApplet initially; three jobs:
     1 look for image and macros files in the URL indicated by the user,
     2 modify applet code for a correct exe detection of them after this,
     3 download and store them into the exe project (absolutes urls are required).
     Return the code modified.
     """
     from exe.engine.beautifulsoup import BeautifulSoup, BeautifulStoneSoup
     import re
     import urllib
     import urllib2
     import string
     import os
     # import urllib.request
     stringappletmod = stringapplet
     soup = BeautifulSoup(stringapplet)
     
     # ONE: image files:
     key_image = ['archivo=', 'imagem_de_fundo=', 'imagem=', 'imagen=', 'file=', 'fitxer=',
                          'artxibo=', 'image=', 'bg_image=', 'imatge=', 'immagine=', 'irudia=',
                          'irundia=', 'fichier=', 'imaxe=', 'arquivo=', 'immagine_fondo=']
     # paths to the images indicated in the applet code:
     imageslist = []
     for x in key_image:
         if string.find(stringapplet, x) != -1:
             expression = r"%s'([\w\./]+)'" % x
             patron = re.compile(expression)
             for tag in soup.findAll('param'):
                 result = patron.search(tag['value'])
                 if result:
                     if result.group(1) not in imageslist:
                         imageslist.append(result.group(1))
     # modify applet code:
     urlimageslist = []
     for im in imageslist: 
         # put as locals the images' path inside exe editor...
         stringappletmod = stringappletmod.replace(im,im[im.rfind("/")+1:]) 
         # from imageslist, it's neccesary to create the list of absolute paths to the image
         # files because we want to download this images and load them in the project:
         # first quit scene number
         urlnoesc = url[url.find(",")+1:]
         # cut the right side of the last /:
         urlcut = urlnoesc[: urlnoesc.rfind("/")]
         # and extend with the image from the applet code:
         urlimageslist.append(urlcut+"/"+im)
     # repeated no thanks:
     urlimageslist = list(set(urlimageslist))
     # do not forget that it could be image_down and image_over versions
     # of the file in the same place, so... a new extended list:
     urlimgslistextended = []
     for pathimg in urlimageslist:     
         # we trick to urlimageslist adding files that haven't been detected really 
         if pathimg not in urlimgslistextended:
             urlimgslistextended.append(pathimg)
             if string.find(pathimg, '.png') != -1:
                 urlimgslistextended.append(pathimg.replace('.png', '_down.png'))
                 urlimgslistextended.append(pathimg.replace('.png', '_over.png'))
             if string.find(pathimg, '.jpg') != -1:
                 urlimgslistextended.append(pathimg.replace('.jpg', '_down.jpg'))
                 urlimgslistextended.append(pathimg.replace('.jpg', '_over.jpg'))
             if string.find(pathimg, '.gif') != -1:
                 urlimgslistextended.append(pathimg.replace('.gif', '_down.gif')) 
                 urlimgslistextended.append(pathimg.replace('.gif', '_over.gif'))                
     urlimgslistextended = list(set(urlimgslistextended))
     # now we can: download all you can find:
     for pathimgext in urlimgslistextended:
         # the clean name of the image file
         img = pathimgext[pathimgext.rfind("/")+1:]                
         # firstly to test the existence of the file:
         try:
             resp = urllib2.urlopen(pathimgext)
         except urllib2.URLError, e:
             if not hasattr(e, "code"):
                 raise
             resp = e            
         try:
         # download whith its original name:                
             img_down = urllib.urlretrieve(pathimgext, img)
         except:
             print 'Unable to download file'           
         # be sure the file was found:
         if img_down[1].maintype == 'image':
             self.uploadFile(img_down[0])
         os.remove(img_down[0])
コード例 #9
0
def loadNodesIdevices(node, s):
    soup = BeautifulSoup(s)
    body = soup.find("body")

    if body:
        idevices = body.findAll(name="div", attrs={"class": re.compile("Idevice$")})
        if len(idevices) > 0:
            for i in idevices:
                # WARNING: none of the idevices yet re-attach their media,
                # but they do attempt to re-attach images and other links.

                if i.attrMap["class"] == "activityIdevice":
                    idevice = burstIdevice("Activity", i, node)
                elif i.attrMap["class"] == "objectivesIdevice":
                    idevice = burstIdevice("Objectives", i, node)
                elif i.attrMap["class"] == "preknowledgeIdevice":
                    idevice = burstIdevice("Preknowledge", i, node)
                elif i.attrMap["class"] == "readingIdevice":
                    idevice = burstIdevice("Reading Activity", i, node)
                # the above are all Generic iDevices;
                # below are all others:
                elif i.attrMap["class"] == "RssIdevice":
                    idevice = burstIdevice("RSS", i, node)
                elif i.attrMap["class"] == "WikipediaIdevice":
                    # WARNING: Wiki problems loading images with accents, etc:
                    idevice = burstIdevice("Wiki Article", i, node)
                elif i.attrMap["class"] == "ReflectionIdevice":
                    idevice = burstIdevice("Reflection", i, node)
                elif i.attrMap["class"] == "GalleryIdevice":
                    # WARNING: Gallery problems with the popup html:
                    idevice = burstIdevice("Image Gallery", i, node)
                elif i.attrMap["class"] == "ImageMagnifierIdevice":
                    # WARNING: Magnifier missing major bursting components:
                    idevice = burstIdevice("Image Magnifier", i, node)
                elif i.attrMap["class"] == "AppletIdevice":
                    # WARNING: Applet missing file bursting components:
                    idevice = burstIdevice("Java Applet", i, node)
                elif i.attrMap["class"] == "ExternalUrlIdevice":
                    idevice = burstIdevice("External Web Site", i, node)
                elif i.attrMap["class"] == "ClozeIdevice":
                    idevice = burstIdevice("Cloze Activity", i, node)
                elif i.attrMap["class"] == "FreeTextIdevice":
                    idevice = burstIdevice("Free Text", i, node)
                elif i.attrMap["class"] == "CasestudyIdevice":
                    idevice = burstIdevice("Case Study", i, node)
                elif i.attrMap["class"] == "MultichoiceIdevice":
                    idevice = burstIdevice("Multi-choice", i, node)
                elif i.attrMap["class"] == "MultiSelectIdevice":
                    idevice = burstIdevice("Multi-select", i, node)
                elif i.attrMap["class"] == "QuizTestIdevice":
                    idevice = burstIdevice("SCORM Quiz", i, node)
                elif i.attrMap["class"] == "TrueFalseIdevice":
                    idevice = burstIdevice("True-False Question", i, node)
                else:
                    # NOTE: no custom idevices burst yet,
                    # nor any deprecated idevices. Just burst into a FreeText:
                    log.warn("unburstable idevice " + i.attrMap["class"] + "; bursting into Free Text")
                    idevice = burstIdevice("Free Text", i, node)

        else:
            # no idevices listed on this page,
            # just create a free-text for the entire page:
            log.warn("no idevices found on this node, bursting into Free Text.")
            idevice = burstIdevice("Free Text", i, node)

    else:
        log.warn("unable to read the body of this node.")
コード例 #10
0
ファイル: appletidevice.py プロジェクト: RichDijk/eXe
 def downloadFiles(self, stringapplet):
     """
     only for DescartesApplet initially; three jobs:
     1 look for image and macros files in the URL indicated by the user,
     2 modify applet code for a correct exe detection of them after this,
     3 download and store them into the exe project (absolute urls are required).
     Return the code modified.
     """
     from exe.engine.beautifulsoup import BeautifulSoup, BeautifulStoneSoup
     import re
     import urllib
     import urllib2
     import string
     import os
     # import urllib.request
     stringappletmod = stringapplet
     soup = BeautifulSoup(stringapplet)
     
     # ONE: image files:
     key_image = ['archivo=', 'imagem_de_fundo=', 'imagem=', 'imagen=', 'file=', 'fitxer=',
                          'artxibo=', 'image=', 'bg_image=', 'imatge=', 'immagine=', 'irudia=',
                          'irundia=', 'fichier=', 'imaxe=', 'arquivo=', 'immagine_fondo=']
     # paths to the images indicated in the applet code:
     imageslist = []
     for x in key_image:
         if string.find(stringapplet, x) != -1:
             expression = r"%s'([\w\./]+)'" % x
             patron = re.compile(expression)
             for tag in soup.findAll('param'):
                 result = patron.search(tag['value'])
                 if result:
                     if result.group(1) not in imageslist:
                         imageslist.append(result.group(1))
     # modify applet code:
     urlimageslist = []
     for im in imageslist: 
         # put as locals the images' path inside exe editor...
         stringappletmod = stringappletmod.replace(im,im[im.rfind("/")+1:]) 
         # from imageslist, it's neccesary to create the list of absolute paths to the image
         # files because we want to download this images and load them in the project:
         # first quit scene number
         urlnoesc = url[url.find(",")+1:]
         # cut the right side of the last /:
         urlcut = urlnoesc[: urlnoesc.rfind("/")]
         # and extend with the image from the applet code:
         urlimageslist.append(urlcut+"/"+im)
     # repeated no thanks:
     urlimageslist = list(set(urlimageslist))
     # do not forget that it could be image_down and image_over versions
     # of the file in the same place, so... a new extended list:
     urlimgslistextended = []
     for pathimg in urlimageslist:     
         # we trick to urlimageslist adding files that haven't been detected really 
         if pathimg not in urlimgslistextended:
             urlimgslistextended.append(pathimg)
             if string.find(pathimg, '.png') != -1:
                 urlimgslistextended.append(pathimg.replace('.png', '_down.png'))
                 urlimgslistextended.append(pathimg.replace('.png', '_over.png'))
             if string.find(pathimg, '.jpg') != -1:
                 urlimgslistextended.append(pathimg.replace('.jpg', '_down.jpg'))
                 urlimgslistextended.append(pathimg.replace('.jpg', '_over.jpg'))
             if string.find(pathimg, '.gif') != -1:
                 urlimgslistextended.append(pathimg.replace('.gif', '_down.gif')) 
                 urlimgslistextended.append(pathimg.replace('.gif', '_over.gif'))                
     urlimgslistextended = list(set(urlimgslistextended))
     # now we can: download all you can find:
     for pathimgext in urlimgslistextended:
         # the clean name of the image file
         img = pathimgext[pathimgext.rfind("/")+1:]                
         # firstly to test the existence of the file:
         try:
             resp = urllib2.urlopen(pathimgext)
         except urllib2.URLError, e:
             if not hasattr(e, "code"):
                 raise
             resp = e            
         try:
         # download whith its original name:                
             img_down = urllib.urlretrieve(pathimgext, img)
         except:
             print 'Unable to download file'           
         # be sure the file was found:
         if img_down[1].maintype == 'image':
             self.uploadFile(img_down[0])
         os.remove(img_down[0])
コード例 #11
0
ファイル: package.py プロジェクト: kohnle-lernmodule/palama
def loadNodesIdevices(node, s):
    soup = BeautifulSoup(s)
    body = soup.find('body')

    if body:
        idevices = body.findAll(name='div', 
                attrs={'class' : re.compile('Idevice$') })
        if len(idevices) > 0:
            for i in idevices: 
                # WARNING: none of the idevices yet re-attach their media,
                # but they do attempt to re-attach images and other links.

                if i.attrMap['class']=="activityIdevice":
                    idevice = burstIdevice('Activity', i, node)
                elif i.attrMap['class']=="objectivesIdevice":
                    idevice = burstIdevice('Objectives', i, node)
                #added kthamm 111028 
                elif i.attrMap['class']=="devsummaryIdevice":
                    idevice = burstIdevice('Devsummary', i, node)
                elif i.attrMap['class']=="devpreviewIdevice":
                    idevice = burstIdevice('Devpreview', i, node)
                elif i.attrMap['class']=="devresourceIdevice":
                    idevice = burstIdevice('Devresource', i, node)
                elif i.attrMap['class']=="devdiscussionIdevice":
                    idevice = burstIdevice('Devdiscussion', i, node)
                #end added kthamm
                elif i.attrMap['class']=="preknowledgeIdevice":
                    idevice = burstIdevice('Preknowledge', i, node)
                elif i.attrMap['class']=="readingIdevice":
                    idevice = burstIdevice('Reading Activity', i, node)
                # the above are all Generic iDevices;
                # below are all others:
                elif i.attrMap['class']=="RssIdevice":
                    idevice = burstIdevice('RSS', i, node)
                elif i.attrMap['class']=="WikipediaIdevice":
                    # WARNING: Wiki problems loading images with accents, etc:
                    idevice = burstIdevice('Wiki Article', i, node)
                elif i.attrMap['class']=="ReflectionIdevice":
                    idevice = burstIdevice('Reflection', i, node)
                elif i.attrMap['class']=="GalleryIdevice":
                    # WARNING: Gallery problems with the popup html:
                    idevice = burstIdevice('Image Gallery', i, node)
                elif i.attrMap['class']=="ImageMagnifierIdevice":
                    # WARNING: Magnifier missing major bursting components:
                    idevice = burstIdevice('Image Magnifier', i, node)
                elif i.attrMap['class']=="AppletIdevice":
                    # WARNING: Applet missing file bursting components:
                    idevice = burstIdevice('Java Applet', i, node)
                elif i.attrMap['class']=="ExternalUrlIdevice":
                    idevice = burstIdevice('External Web Site', i, node)
                elif i.attrMap['class']=="ClozeIdevice":
                    idevice = burstIdevice('Cloze Activity', i, node)
                elif i.attrMap['class']=="FreeTextIdevice":
                    idevice = burstIdevice('Free Text', i, node)
                elif i.attrMap['class']=="CasestudyIdevice":
                    idevice = burstIdevice('Case Study', i, node)
                elif i.attrMap['class']=="MultichoiceIdevice":
                    idevice = burstIdevice('Multi-choice', i, node)
                elif i.attrMap['class']=="MultiSelectIdevice":
                    idevice = burstIdevice('Multi-select', i, node)
                elif i.attrMap['class']=="QuizTestIdevice":
                    idevice = burstIdevice('SCORM Quiz', i, node)
                elif i.attrMap['class']=="TrueFalseIdevice":
                    idevice = burstIdevice('True-False Question', i, node)
                else:
                    # NOTE: no custom idevices burst yet,
                    # nor any deprecated idevices. Just burst into a FreeText:
                    log.warn("unburstable idevice " + i.attrMap['class'] + 
                            "; bursting into Free Text")
                    idevice = burstIdevice('Free Text', i, node)

        else:
            # no idevices listed on this page,
            # just create a free-text for the entire page:
            log.warn("no idevices found on this node, bursting into Free Text.")
            idevice = burstIdevice('Free Text', i, node)

    else:
        log.warn("unable to read the body of this node.")
コード例 #12
0
def loadNodesIdevices(node, s):
    soup = BeautifulSoup(s)
    body = soup.find('body')

    if body:
        idevices = body.findAll(name='div', 
                attrs={'class' : re.compile('Idevice$') })
        if len(idevices) > 0:
            for i in idevices: 
                # WARNING: none of the idevices yet re-attach their media,
                # but they do attempt to re-attach images and other links.

                if i.attrMap['class']=="activityIdevice":
                    idevice = burstIdevice('Activity', i, node)
                elif i.attrMap['class']=="objectivesIdevice":
                    idevice = burstIdevice('Objectives', i, node)
                elif i.attrMap['class']=="preknowledgeIdevice":
                    idevice = burstIdevice('Preknowledge', i, node)
                elif i.attrMap['class']=="readingIdevice":
                    idevice = burstIdevice('Reading Activity', i, node)
                # the above are all Generic iDevices;
                # below are all others:
                elif i.attrMap['class']=="RssIdevice":
                    idevice = burstIdevice('RSS', i, node)
                elif i.attrMap['class']=="WikipediaIdevice":
                    # WARNING: Wiki problems loading images with accents, etc:
                    idevice = burstIdevice('Wiki Article', i, node)
                elif i.attrMap['class']=="ReflectionIdevice":
                    idevice = burstIdevice('Reflection', i, node)
                elif i.attrMap['class']=="GalleryIdevice":
                    # WARNING: Gallery problems with the popup html:
                    idevice = burstIdevice('Image Gallery', i, node)
                elif i.attrMap['class']=="ImageMagnifierIdevice":
                    # WARNING: Magnifier missing major bursting components:
                    idevice = burstIdevice('Image Magnifier', i, node)
                elif i.attrMap['class']=="AppletIdevice":
                    # WARNING: Applet missing file bursting components:
                    idevice = burstIdevice('Java Applet', i, node)
                elif i.attrMap['class']=="ExternalUrlIdevice":
                    idevice = burstIdevice('External Web Site', i, node)
                elif i.attrMap['class']=="ClozeIdevice":
                    idevice = burstIdevice('Cloze Activity', i, node)
                elif i.attrMap['class']=="FreeTextIdevice":
                    idevice = burstIdevice('Free Text', i, node)
                elif i.attrMap['class']=="CasestudyIdevice":
                    idevice = burstIdevice('Case Study', i, node)
                elif i.attrMap['class']=="MultichoiceIdevice":
                    idevice = burstIdevice('Multi-choice', i, node)
                elif i.attrMap['class']=="MultiSelectIdevice":
                    idevice = burstIdevice('Multi-select', i, node)
                elif i.attrMap['class']=="QuizTestIdevice":
                    idevice = burstIdevice('SCORM Quiz', i, node)
                elif i.attrMap['class']=="TrueFalseIdevice":
                    idevice = burstIdevice('True-False Question', i, node)
                else:
                    # NOTE: no custom idevices burst yet,
                    # nor any deprecated idevices. Just burst into a FreeText:
                    log.warn("unburstable idevice " + i.attrMap['class'] + 
                            "; bursting into Free Text")
                    idevice = burstIdevice('Free Text', i, node)

        else:
            # no idevices listed on this page,
            # just create a free-text for the entire page:
            log.warn("no idevices found on this node, bursting into Free Text.")
            idevice = burstIdevice('Free Text', i, node)

    else:
        log.warn("unable to read the body of this node.")
コード例 #13
0
ファイル: getManual.py プロジェクト: jsanchez91/iteexe
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>eXe</title>
<style type="text/css">
@import url(eXe_manual.css);
</style>
</head>
<body>
'''
html_epilogue = '''</body></html>
'''

base_url = 'http://wikieducator.org/'
collection = urllib2.urlopen(url)
soup = BeautifulSoup(collection)

# make sure the URL is a collection, and not a "page not found" page
if not soup.find('span', {'class': 'mw-headline'}):
    print 'missing or malformed collection page'
    sys.exit()

collection_title = str(soup.find('span', {
    'class': 'mw-headline'
}).string).strip()

print "fetching manual..."
sys.stdout.flush()

for page in soup('dd'):
    if not page.a:
コード例 #14
0
html_prologue = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>eXe</title>
<style type="text/css">
@import url(eXe_manual.css);
</style>
</head>
<body>
'''
html_epilogue = '''</body></html>
'''
base_url = 'http://wikieducator.org/'
collection = urllib2.urlopen(url)
soup = BeautifulSoup(collection)
if not soup.find('span', {'class': 'mw-headline'}):
    print 'missing or malformed collection page'
    sys.exit()
collection_title = str(soup.find('span', {'class': 'mw-headline'}).string).strip()
print "fetching manual..."
sys.stdout.flush()
for page in soup('dd'):
    if not page.a:
        continue
    print '  ', page.a.string,
    sys.stdout.flush()
    page_url = url_join(base_url, page.a['href'])
    sys.stdout.flush()
    p1 = urllib2.urlopen(page_url)
    p1_soup = BeautifulSoup(p1)
コード例 #15
0
    def render(self):
        """
        Returns an XHTML string rendering this page.
        """
        old_dT = common.getExportDocType()
        common.setExportDocType('HTML5')
        dT = common.getExportDocType()
        lb = "\n"  # Line breaks
        sectionTag = "div"
        articleTag = "div"
        headerTag = "div"
        if dT == "HTML5":
            sectionTag = "section"
            articleTag = "article"
            headerTag = "header"
        html = common.docType()
        lenguaje = G.application.config.locale
        if self.node.package.lang != "":
            lenguaje = self.node.package.lang
        html += u"<html lang=\"" + lenguaje + "\" xml:lang=\"" + lenguaje + "\" xmlns=\"http://www.w3.org/1999/xhtml\">" + lb
        html += u"<head>" + lb
        html += u"<title>"
        if self.node.id == '0':
            if self.node.package.title != '':
                html += escape(self.node.package.title)
            else:
                html += escape(self.node.titleLong)
        else:
            if self.node.package.title != '':
                html += escape(self.node.titleLong) + " | " + escape(
                    self.node.package.title)
            else:
                html += escape(self.node.titleLong)
        html += u" </title>" + lb
        html += u'<meta charset="utf-8" />' + lb
        if dT != "HTML5" and self.node.package.lang != "":
            html += '<meta http-equiv="content-language" content="' + lenguaje + '" />' + lb
        if self.node.package.author != "":
            html += '<meta name="author" content="' + escape(
                self.node.package.author, True) + '" />' + lb
        html += '<meta name="generator" content="eXeLearning ' + release + ' - exelearning.net" />' + lb
        if self.node.id == '0':
            if self.node.package.description != "":
                html += '<meta name="description" content="' + escape(
                    self.node.package.description, True) + '" />' + lb
        html += u"<link rel=\"stylesheet\" type=\"text/css\" href=\"base.css\" />" + lb
        if common.hasWikipediaIdevice(self.node):
            html += u"<link rel=\"stylesheet\" type=\"text/css\" href=\"exe_wikipedia.css\" />" + lb
        if common.hasGalleryIdevice(self.node):
            html += u"<link rel=\"stylesheet\" type=\"text/css\" href=\"exe_lightbox.css\" />" + lb
        html += u"<link rel=\"stylesheet\" type=\"text/css\" href=\"content.css\" />" + lb
        if dT == "HTML5" or common.nodeHasMediaelement(self.node):
            html += u'<!--[if lt IE 9]><script type="text/javascript" src="exe_html5.js"></script><![endif]-->' + lb
        style = G.application.config.styleStore.getStyle(
            self.node.package.style)

        # jQuery
        if style.hasValidConfig:
            if style.get_jquery() == True:
                html += u'<script type="text/javascript" src="exe_jquery.js"></script>' + lb
            else:
                html += u'<script type="text/javascript" src="' + style.get_jquery(
                ) + '"></script>' + lb
        else:
            html += u'<script type="text/javascript" src="exe_jquery.js"></script>' + lb

        if common.hasGalleryIdevice(self.node):
            html += u'<script type="text/javascript" src="exe_lightbox.js"></script>' + lb
        html += common.getJavaScriptStrings() + lb
        html += u'<script type="text/javascript" src="common.js"></script>' + lb
        if common.hasMagnifier(self.node):
            html += u'<script type="text/javascript" src="mojomagnify.js"></script>' + lb
        # Some styles might have their own JavaScript files (see their config.xml file)
        if style.hasValidConfig:
            html += style.get_extra_head()
        html += u"</head>" + lb
        html += u'<body class="exe-epub3"><script type="text/javascript">document.body.className+=" js"</script>' + lb
        html += u"<div id=\"outer\">" + lb
        html += u"<" + sectionTag + " id=\"main\">" + lb
        html += u"<" + headerTag + " id=\"nodeDecoration\">"
        html += u'<h1 id=\"nodeTitle\">'
        html += escape(self.node.titleLong)
        html += u'</h1>'
        html += u"</" + headerTag + ">" + lb

        for idevice in self.node.idevices:
            if idevice.klass != 'NotaIdevice':
                e = " em_iDevice"
                if unicode(idevice.emphasis) == '0':
                    e = ""
                html += u'<' + articleTag + ' class="iDevice_wrapper %s%s" id="id%s">%s' % (
                    idevice.klass, e, idevice.id, lb)
                block = g_blockFactory.createBlock(None, idevice)
                if not block:
                    log.critical("Unable to render iDevice.")
                    raise Error("Unable to render iDevice.")
                if hasattr(idevice, "isQuiz"):
                    html += htmlentitydecode(block.renderJavascriptForWeb())
                if idevice.title != "Forum Discussion":
                    html += htmlentitydecode(
                        self.processInternalLinks(
                            block.renderView(self.node.package.style)))
            html += u'</' + articleTag + '>' + lb  # iDevice div

        html += u"</" + sectionTag + ">" + lb  # /#main
        html += self.renderLicense()
        html += unicode(
            BeautifulSoup(self.renderFooter(),
                          convertEntities=BeautifulSoup.XHTML_ENTITIES))
        html += u"</div>" + lb  # /#outer
        if style.hasValidConfig:
            html += style.get_extra_body()
        html += u'</body></html>'
        html = html.encode('utf8')
        # JR: Eliminamos los atributos de las ecuaciones
        aux = re.compile("exe_math_latex=\"[^\"]*\"")
        html = aux.sub("", html)
        aux = re.compile("exe_math_size=\"[^\"]*\"")
        html = aux.sub("", html)
        # JR: Cambio el & en los enlaces del glosario
        html = html.replace("&concept", "&amp;concept")
        # Remove "resources/" from data="resources/ and the url param
        html = html.replace("video/quicktime\" data=\"resources/",
                            "video/quicktime\" data=\"")
        html = html.replace("application/x-mplayer2\" data=\"resources/",
                            "application/x-mplayer2\" data=\"")
        html = html.replace("audio/x-pn-realaudio-plugin\" data=\"resources/",
                            "audio/x-pn-realaudio-plugin\" data=\"")
        html = html.replace("<param name=\"url\" value=\"resources/",
                            "<param name=\"url\" value=\"")

        common.setExportDocType(old_dT)
        return html
コード例 #16
0
ファイル: getManual.py プロジェクト: kohnle-lernmodule/palama
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>eXe</title>
<style type="text/css">
@import url(eXe_manual.css);
</style>
</head>
<body>
"""
html_epilogue = """</body></html>
"""

base_url = "http://wikieducator.org/"
collection = urllib2.urlopen(url)
soup = BeautifulSoup(collection)

# make sure the URL is a collection, and not a "page not found" page
if not soup.find("span", {"class": "mw-headline"}):
    print "missing or malformed collection page"
    sys.exit()

collection_title = str(soup.find("span", {"class": "mw-headline"}).string).strip()

print "fetching manual..."
sys.stdout.flush()

for page in soup("dd"):
    if not page.a:
        continue
    print "  ", page.a.string,
コード例 #17
0
ファイル: latexidevice.py プロジェクト: go38/exeLearning
            page = FILE.read()
            FILE.close()
        except IOError, error:
            log.warning(unicode(error))
            self.article.content = _(u"Unable to read file: %s.") % path
            return

        page = u'<div>' + unicode(page, "utf8") + u'</div>'
        # FIXME avoid problems with numeric entities in attributes
        page = page.replace(u'&#160;', u'&nbsp;')

        # avoidParserProblems is set to False because BeautifulSoup's
        # cleanup was causing a "concatenating Null+Str" error,
        # and Wikipedia's HTML doesn't need cleaning up.
        # BeautifulSoup is faster this way too.
        soup = BeautifulSoup(page, False)
        content = soup.first('div')

        # remove the wiktionary, wikimedia commons, and categories boxes
        #  and the protected icon and the needs citations box
        if content:
            infoboxes = content.findAll('div',
                                        {'class': 'infobox sisterproject'})
            [infobox.extract() for infobox in infoboxes]
            catboxes = content.findAll('div', {'id': 'catlinks'})
            [catbox.extract() for catbox in catboxes]
            amboxes = content.findAll('table',
                                      {'class': re.compile(r'.*\bambox\b.*')})
            [ambox.extract() for ambox in amboxes]
            protecteds = content.findAll('div', {'id': 'protected-icon'})
            [protected.extract() for protected in protecteds]
コード例 #18
0
 def _computeLinks(self):
     self._computeRelpaths()
     htmls = self.resources['mimes']['text/html']
     total = len(htmls)
     i = 1
     for url in htmls:
         if self.cancel:
            return
         if self.client:
             self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Analyzing HTML file labels %d of %d: %s') % (i, total, str(url)))
         content = open(url.path).read()
         encoding = detect(content)['encoding']
         ucontent = unicode(content,encoding)
         soup = BeautifulSoup(ucontent,fromEncoding=encoding)
         declaredHTMLEncoding = getattr(soup, 'declaredHTMLEncoding')
         if declaredHTMLEncoding:
             ucontent = UnicodeDammit(content,[declaredHTMLEncoding]).unicode
             encoding = declaredHTMLEncoding
         else:
             pass
         url.setContent(ucontent,encoding)
         url.setSoup(soup)
         for tag in soup.findAll():
             if self.cancel:
                 return
             if not tag.attrs:
                 continue
             matches = []
             for key, value in tag.attrs:
                 if value == "":
                     continue
                 unq_value = unquote(value)
                 unq_low_value = unquote(value.lower())
                 for l, rl in self.resources['urls'][url.parentpath].relpaths:
                     low_rl = rl.lower()
                     if rl in unq_value:
                         L = Link(self.resources['urls'][l],rl,url,tag,key,rl)
                         matches.append(L)
                     elif low_rl in unq_value:
                         L = Link(self.resources['urls'][l],rl,url,tag,key,low_rl)
                         matches.append(L)
                     elif l in unq_value:
                         L = Link(self.resources['urls'][l],rl,url,tag,key,l)
                         matches.append(L)
             matches_final = []
             for l1 in matches:
                 matches_ = [ m for m in matches if m != l1 ]
                 found = False
                 for l2 in matches_:
                     if re.search(re.escape(l1.relative),l2.relative):
                         found = True
                 if not found:
                     matches_final.append(l1)
             if matches_final:
                 for match in matches_final:
                     url.addLink( match )
                     url.addRLink( str(match.url) )
         i += 1
     csss = self.resources['mimes']['text/css'] if 'text/css' in self.resources['mimes'].keys() else None
     csss_and_htmls = csss + htmls if csss else htmls
     total = len(csss_and_htmls)
     i = 1
     for url in csss_and_htmls:
         if self.cancel:
             return
         if url.mime == 'text/css':
             tipo = 'CSS'
         else:
             tipo = 'HTML'
         content = url.getContent()
         if not content:
             content = open(url.path).read()
             encoding = detect(content)['encoding']
             content = unicode(content,encoding)
             url.setContent(content,encoding)                
         if self.client:
             self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Exhaustively analyzed file %s %d of %d: %s') % (tipo, i, total, str(url)))
         matches = []
         for l, rl in self.resources['urls'][url.parentpath].relpaths:
             low_rl = rl.lower()
             if rl in content:
                 L = Link(self.resources['urls'][l],rl,url,match=rl)
                 matches.append(L)
             elif low_rl in content:
                 L = Link(self.resources['urls'][l],rl,url,match=low_rl)
                 matches.append(L)                    
         matches_final = []
         for l1 in matches:
             matches_ = [ m for m in matches if m != l1 ]
             found = False
             for l2 in matches_:
                 if re.search(re.escape(l1.relative),l2.relative):
                     found = True
             if not found:
                 matches_final.append(l1)
         if matches_final:
             for match in matches_final:
                 if not [ link for link in url.links if link.relative == match.relative ]:
                     url.addLink( match )
                     url.addRLink( str(match.url) )
         i += 1
コード例 #19
0
class WikipediaIdevice(Idevice):
    """
    A Wikipedia Idevice is one built from a Wikipedia article.
    """
    persistenceVersion = 9

    def __init__(self, defaultSite):
        Idevice.__init__(
            self, x_(u"Wiki Article"), x_(u"University of Auckland"),
            x_(u"""<p>The Wikipedia iDevice allows you to locate 
existing content from within Wikipedia and download this content into your eXe 
resource. The Wikipedia Article iDevice takes a snapshot copy of the article 
content. Changes in Wikipedia will not automatically update individual snapshot 
copies in eXe, a fresh copy of the article will need to be taken. Likewise, 
changes made in eXe will not be updated in Wikipedia. </p> <p>Wikipedia content 
is covered by the GNU free documentation license.</p>"""), u"", u"")
        self.emphasis = Idevice.NoEmphasis
        self.articleName = u""
        self.article = TextAreaField(x_(u"Article"))
        self.article.idevice = self
        self.images = {}
        self.site = defaultSite
        self.icon = u"inter"
        self._langInstruc = x_(u"""Select the appropriate language version 
of Wikipedia to search and enter search term.""")
        self._searchInstruc = x_("""Enter a phrase or term you wish to search 
within Wikipedia.""")
        self.ownUrl = ""
        self.systemResources += ['exe_wikipedia.css']

    # Properties
    langInstruc = lateTranslate('langInstruc')
    searchInstruc = lateTranslate('searchInstruc')

    def loadArticle(self, name):
        """
        Load the article from Wikipedia
        """
        self.articleName = name
        url = ""
        name = urllib.quote(name.replace(" ", "_").encode('utf-8'))
        try:
            url = (self.site or self.ownUrl)
            if not url.endswith('/') and name <> '': url += '/'
            if '://' not in url: url = 'http://' + url
            url += name
            net = urllib.urlopen(url)
            page = net.read()
            net.close()
        except IOError, error:
            log.warning(unicode(error))
            self.article.content = _(
                u"Unable to download from %s <br/>Please check the spelling and connection and try again."
            ) % url
            self.article.content_w_resourcePaths = self.article.content
            self.article.content_wo_resourcePaths = self.article.content
            return

        page = unicode(page, "utf8")
        # FIXME avoid problems with numeric entities in attributes
        page = page.replace(u'&#160;', u'&nbsp;')
        # avoidParserProblems is set to False because BeautifulSoup's
        # cleanup was causing a "concatenating Null+Str" error,
        # and Wikipedia's HTML doesn't need cleaning up.
        # BeautifulSoup is faster this way too.
        soup = BeautifulSoup(page, False)
        content = soup.first('div', {'id': "content"})
        #Fix bug #1359: El estilo ITE no respeta ancho de página al exportar
        #a páginas web si se usa iDevice wikipedia
        content['id'] = "wikipedia-content"

        # remove the wiktionary, wikimedia commons, and categories boxes
        #  and the protected icon and the needs citations box
        if content:
            infoboxes = content.findAll('div',
                                        {'class': 'infobox sisterproject'})
            [infobox.extract() for infobox in infoboxes]
            catboxes = content.findAll('div', {'id': 'catlinks'})
            [catbox.extract() for catbox in catboxes]
            amboxes = content.findAll('table',
                                      {'class': re.compile(r'.*\bambox\b.*')})
            [ambox.extract() for ambox in amboxes]
            protecteds = content.findAll('div', {'id': 'protected-icon'})
            [protected.extract() for protected in protecteds]
            # Extract HTML comments
            comments = content.findAll(
                text=lambda text: isinstance(text, Comment))
            [comment.extract() for comment in comments]
        else:
            content = soup.first('body')

        if not content:
            log.error("no content")
            self.article.content = _(
                u"Unable to download from %s <br/>Please check the spelling and connection and try again."
            ) % url
            # set the other elements as well
            self.article.content_w_resourcePaths = self.article.content
            self.article.content_wo_resourcePaths = self.article.content
            return

        # clear out any old images
        while self.userResources:
            self.userResources[0].delete()
        self.images = {}

        # Download the images
        bits = url.split('/')
        netloc = '%s//%s' % (bits[0], bits[2])
        path = '/'.join(bits[3:-1])
        tmpDir = TempDirPath()
        for imageTag in content.fetch('img'):
            imageSrc = unicode(imageTag['src'])
            imageName = imageSrc.split('/')[-1]
            imageName = imageName.replace('&gt;', '>')
            imageName = imageName.replace('&lt;', '<')
            imageName = imageName.replace('&quot;', '"')
            imageName = imageName.replace('&nbsp;', '')
            imageName = imageName.replace('%2C', ',')
            imageName = imageName.replace('%22', '"')
            imageName = imageName.replace('%28', '(')
            imageName = imageName.replace('%29', ')')
            imageName = imageName.replace('%C3%A5', 'å')
            #JR: decodificamos el nombre de la imagen
            imageName = urllib.unquote(imageName)
            # Search if we've already got this image
            if imageName not in self.images:
                if not imageSrc.startswith("http://"):
                    if imageSrc.startswith("/"):
                        # imageSrc = netloc + imageSrc
                        imageSrc = bits[0] + imageSrc
                    else:
                        imageSrc = '%s/%s/%s' % (netloc, path, imageSrc)
                try:
                    # download whith its original name... in ASCII:
                    ## er... just because some repositories do not undestand no ascii names of files:
                    imageName = imageName.encode('ascii', 'ignore')
                    urllib.urlretrieve(imageSrc, tmpDir / imageName)
                    new_resource = Resource(self, tmpDir / imageName)
                except:
                    print 'Unable to download file'
                if new_resource._storageName != imageName:
                    # looks like it was changed due to a possible conflict,
                    # so reset the imageName accordingly for the content:
                    imageName = new_resource._storageName
                self.images[imageName] = True
            imageTag['src'] = (u"resources/" + imageName)
        self.article.content = self.reformatArticle(netloc, unicode(content))
        # now that these are supporting images, any direct manipulation
        # of the content field must also store this updated information
        # into the other corresponding fields of TextAreaField:
        # (perhaps eventually a property should be made for TextAreaField
        #  such that these extra set's are not necessary, but for now, here:)
        self.article.content_w_resourcePaths = self.article.content
        self.article.content_wo_resourcePaths = self.article.content
コード例 #20
0
 def getAppletcodeDescartes(self, filename):
     """
     xhtml string for DescartesApplet
     """
     global SCENE_NUM
     html = ""
     if not filename.endswith(".jar"):
         if filename.endswith(".html") or filename.endswith(".htm"):
             from exe.engine.beautifulsoup import BeautifulSoup, BeautifulStoneSoup   
             import urllib2
             if filename.find(",") == -1:    
                 # firstly verify the URL is reachable, or come back:
                 if self.verifyConn(filename) == False:
                     assert self.parentNode.package, _('Sorry, this URL is unreachable') 
                     return
                 # filename is reachable, go on:                    
                 htmlbytes = urllib2.urlopen(filename)
             else:
                 if self.verifyConn(filename[2:]) == False:
                     return html == ''                   
                 htmlbytes = urllib2.urlopen(filename[2:])
             content = htmlbytes.read()
             # content = content.replace('""','"') Galo swears it won't be necessary
             soup = BeautifulSoup(content)
             i = 0
             appletslist = []
             for ap_old in soup.findAll("applet",{"code":"Descartes.class"}):
                 for resource in reversed(self.userResources):
                     if resource._storageName != ap_old["archive"]:
                         resource.delete()
                 global DESC_PLUGIN
                 DESC_PLUGIN = 0
                 ap_old["codebase"] = "./"
                 appletslist.append(ap_old)   
             for ap_new in soup.findAll("applet",{"code":"descinst.Descartes.class"}):
                 DESC_PLUGIN = 1
                 for resource in reversed(self.userResources):
                     if resource._storageName != 'descinst.jar':
                         resource.delete()
                 ap_new["codebase"] = "./"
                 appletslist.append(ap_new)
             for ap_supernew in soup.findAll("applet",{"code":"descinst.DescartesWeb2_0.class"}):
                 DESC_PLUGIN = 1
                 for resource in reversed(self.userResources):
                     if resource._storageName != 'descinst.jar':
                         resource.delete()
                 ap_supernew["codebase"] = "./"
                 appletslist.append(ap_supernew)
             # TO_DO sometimes applets are included in frame labels (no applets found in the url): 
             # it could begin...:
             # if appletslist == []: # because none <applet> was founded
             #    for ap_frame in soup.findAll("frame src"): # could be problems with that whitespace
             #        DESC_PLUGIN = 1
             #        for resource in reversed(self.userResources):
             #            if resource._storageName != 'descinst.jar':
             #                resource.delete()
             #        if ap_frame["codebase"]:
             #            ap_frame["codebase"] = "./"
             #        appletslist.append(ap_frame)                      
             
             # if none applet was found:
             if appletslist == []:
                 html == ''
                 
                 return html
             
             # finally:                  
             for x in appletslist:
                 u = ''
                 if i == SCENE_NUM -1:
                     u = unicode(x)
                     umod = self.downloadFiles(u)
                     break
                 i = i+1
             htmlbytes.close()
             html = umod
     # now html has the code of the applet for eXe:
     return html
コード例 #21
0
            page = FILE.read()
            FILE.close()
        except IOError, error:
            log.warning(unicode(error))
            self.article.content = _(u"Unable to read file: %s.") % path
            return

        page = u'<div>' + unicode(page, "utf8") + u'</div>'
        # FIXME avoid problems with numeric entities in attributes
        page = page.replace(u'&#160;', u'&nbsp;')

        # avoidParserProblems is set to False because BeautifulSoup's
        # cleanup was causing a "concatenating Null+Str" error,
        # and Wikipedia's HTML doesn't need cleaning up.
        # BeautifulSoup is faster this way too.
        soup = BeautifulSoup(page, False)
        content = soup.first('div')

        # remove the wiktionary, wikimedia commons, and categories boxes
        #  and the protected icon and the needs citations box
        if content:
            infoboxes = content.findAll('div',
                    {'class' : 'infobox sisterproject'})
            [infobox.extract() for infobox in infoboxes]
            catboxes = content.findAll('div', {'id' : 'catlinks'})
            [catbox.extract() for catbox in catboxes]
            amboxes = content.findAll('table',
                    {'class' : re.compile(r'.*\bambox\b.*')})
            [ambox.extract() for ambox in amboxes]
            protecteds = content.findAll('div', {'id' : 'protected-icon'})
            [protected.extract() for protected in protecteds]