Example #1
0
 def _path(self, node):
     self.path = self.canvas.beginPath()
     self.path.moveTo(**utils.getAttrsAsDict(node, ['x', 'y']))
     for n in node.childNodes:
         if n.nodeType == node.ELEMENT_NODE:
             if n.localName=='moveto':
                 vals = utils.getText(n).split()
                 self.path.moveTo(utils.as_pt(vals[0]), utils.as_pt(vals[1]))
             elif n.localName=='curvesto':
                 vals = utils.getText(n).split()
                 while len(vals)>5:
                     pos=[]
                     while len(pos)<6:
                         pos.append(utils.as_pt(vals.pop(0)))
                     self.path.curveTo(*pos)
         elif (n.nodeType == node.TEXT_NODE):
             data = n.data.split()    # Not sure if I must merge all TEXT_NODE ?
             while len(data)>1:
                 x = utils.as_pt(data.pop(0))
                 y = utils.as_pt(data.pop(0))
                 self.path.lineTo(x, y)
     if ((not node.hasAttribute('close'))
         or utils.as_bool(node.getAttribute('close'))):
         self.path.close()
     self.canvas.drawPath(
         self.path,
         **utils.getAttrsAsDict(node, [], {'fill':'bool','stroke':'bool'}))
def find_episodes(params):
  import xml.dom.minidom
  baseurl = 'http://www.uitzendinggemist.nl'
  url = urllib.unquote(params['url'])
  module = params['module']
  page = ""
  pagecount = 1
  while pagecount<10:
    rssurl = baseurl + url + '.rss?page=' + str(pagecount)
    request = common.fetchPage({"link": rssurl, "cookie": "site_cookie_consent=yes"})
    if not request["status"] == 200:
      break
    page = request["content"].encode('utf-8')
    try:
      dom = xml.dom.minidom.parseString(page)
    except:
      page = page.replace("&","&amp;")
      dom = xml.dom.minidom.parseString(page)
    if len(dom.getElementsByTagName('item'))==0:
      break
    else:
      for item in dom.getElementsByTagName('item'):
        videourl = utils.getText(item.getElementsByTagName('link')[0].childNodes)
        videourl = urllib.quote_plus(videourl)
        videourl = sys.argv[0]+"?module="+module+"&action=find_video"+"&url="+videourl
        try:
          thumb = item.getElementsByTagName('media:thumbnail')[0].attributes['url'].value
        except:
          thumb = ""
        title = common.replaceHTMLCodes(utils.getText(item.getElementsByTagName('title')[0].childNodes))
        utils.addLink(title, videourl, thumb)
    pagecount = pagecount+1
  xbmcplugin.endOfDirectory(int(sys.argv[1]))
Example #3
0
 def _path(self, node):
     self.path = self.canvas.beginPath()
     self.path.moveTo(**utils.getAttrsAsDict(node, ['x', 'y']))
     for n in node.childNodes:
         if n.nodeType == node.ELEMENT_NODE:
             if n.localName=='moveto':
                 vals = utils.getText(n).split()
                 self.path.moveTo(utils.as_pt(vals[0]), utils.as_pt(vals[1]))
             elif n.localName=='curvesto':
                 vals = utils.getText(n).split()
                 while len(vals)>5:
                     pos=[]
                     while len(pos)<6:
                         pos.append(utils.as_pt(vals.pop(0)))
                     self.path.curveTo(*pos)
         elif (n.nodeType == node.TEXT_NODE):
             data = n.data.split()    # Not sure if I must merge all TEXT_NODE ?
             while len(data)>1:
                 x = utils.as_pt(data.pop(0))
                 y = utils.as_pt(data.pop(0))
                 self.path.lineTo(x, y)
     if ((not node.hasAttribute('close'))
         or utils.as_bool(node.getAttribute('close'))):
         self.path.close()
     self.canvas.drawPath(
         self.path,
         **utils.getAttrsAsDict(node, [], {'fill':'bool','stroke':'bool'}))
Example #4
0
 def _lines(self, node):
     line_str = utils.getText(node).split()
     lines = []
     while len(line_str)>3:
         lines.append([utils.as_pt(l) for l in line_str[0:4]])
         line_str = line_str[4:]
     self.canvas.lines(lines)
Example #5
0
 def _lines(self, node):
     line_str = utils.getText(node).split()
     lines = []
     while len(line_str)>3:
         lines.append([utils.as_pt(l) for l in line_str[0:4]])
         line_str = line_str[4:]
     self.canvas.lines(lines)
Example #6
0
def predict(filename):
    raw_text = getText(filename)
    data = getDataset(raw_text)
    model = defineModel(data)

    loadWeights(model)
    generateText(model, raw_text)
Example #7
0
def main(planet=None, lang=None, layout=None):
    templateLoader = jinja2.FileSystemLoader(searchpath='print/templates/')
    templateEnv = jinja2.Environment(loader=templateLoader)
    planets = [
        'mercury',
        'venus',
        'earth',
        'mars',
        'jupiter',
        'saturn',
        'uranus',
        'neptune',
    ]

    if planet is not None:
        data = [getJson('data/planets/'+planet+'.json')]
    else:
        data = [getJson('data/planets/'+planet+'.json')
                for planet in planets]

    if lang is None: lang = ['fr', 'en']
    else: lang = [lang]

    cssPages = getText('print/stylesheet.css')
    cssInt = getText('print/stylesheet-intercalar.css')

    for planetData in data:
        catalog = Pages(planetData)
        intercalar = Intercalar(planetData)
        cribsheet = CribSheet(planetData)
        for l in lang:
            pages = catalog.generate(templateEnv, cssPages, l, layout)
            intercalars = intercalar.generate(templateEnv, cssInt, l, layout)
            cribsheetRender = cribsheet.generate(templateEnv, cssInt, l, None)
            print('Saving ' + planetData['name']['en'] + '' + l)
            if layout is None:
                pages = intercalars + pages
                saveAsPDF(pages, planetData['name']['en'] + '-' + l, folder='output/print/preview/')
                saveAsPDF(cribsheetRender, planetData['name']['en']+ '-cribsheet-' + l, folder='output/print/preview/')
            else:
                saveAsPDF(pages, planetData['name']['en'] + '-' + l)
                saveAsPDF(intercalars, planetData['name']['en'] + '-' + l, folder='output/print/intercalars/')
                saveAsPDF(cribsheetRender, planetData['name']['en']+ '-cribsheet-' + l)
            print('Done !\n\n\n')
Example #8
0
def train(filename):
    raw_text = getText(filename)
    data = getDataset(raw_text)
    model = defineModel(data)
    fitModel(model, data)
Example #9
0
moby = set([line.rstrip() for line in open(moby_file, encoding='mac_roman')])

# print(len(moby))

print('Building dicts...')
word_indices = dict((line.split('*')[0], (i, line.split('*')[1]))
                    for i, line in enumerate(moby))
indices_word = dict((i, line.split('*')[0]) for i, line, in enumerate(moby))

parts = set([pos for line in moby for pos in line.split('*')[1]])
pos_indices = dict((pos, i) for i, pos in enumerate(parts))
indices_pos = dict((i, pos) for i, pos in enumerate(parts))

texts = []
texts.append([s for s in getText(0) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(1) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(2) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(3) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(4) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(5) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(6) if isAdmissible(s, word_indices)])
# for text in texts:
# print(len(text))

maxlen = 128


def TrainPosModel(model, dimIn, dimOut):
    print('Generating training set...')
Example #10
0
 def _curves(self, node):
     line_str = utils.getText(node).split()
     lines = []
     while len(line_str)>7:
         self.canvas.bezier(*[utils.as_pt(l) for l in line_str[0:8]])
         line_str = line_str[8:]
def main(data):
    ## Loop through languages
    trPath = os.path.join(
        data["definitions"]["runtime"]["cwd"],
        data["config"]["Filesystem"]["SourcePathTranslations"],
    )
    trPath = os.path.abspath(trPath)
    languages = sorted(next(os.walk(trPath))[1])
    for languageKey in languages:
        data["translations"][languageKey] = {}
        trLanguagePath = os.path.join(
            trPath,
            languageKey,
        )
        ## Loop through artist groups
        groups = sorted(next(os.walk(trLanguagePath))[1])
        for groupKey in groups:
            data["translations"][languageKey][groupKey] = {}
            trLanguageGroupPath = os.path.join(
                trLanguagePath,
                groupKey,
            )
            ## Loop through artists start begin with
            artists = sorted(next(os.walk(trLanguageGroupPath))[1],
                             key=str.lower)
            for artistKey in artists:
                trLanguageGroupArtistPath = os.path.join(
                    trLanguageGroupPath,
                    artistKey,
                )
                data["translations"][languageKey][groupKey][artistKey] = {
                    "printable_name": artistKey,
                    "releases": [],
                }
                ## Loop through artist’s releases
                releases = sorted(next(os.walk(trLanguageGroupArtistPath))[1],
                                  key=str.lower)
                for releaseKey in releases:
                    trLanguageGroupArtistReleasePath = os.path.join(
                        trLanguageGroupArtistPath,
                        releaseKey,
                    )
                    data["translations"][languageKey][groupKey][artistKey][
                        "releases"].append({
                            "name": releaseKey,
                            "printable_name": releaseKey,
                            "recordings": [[]],
                        })
                    ## Loop through release’s recordings
                    recordings = sorted(next(
                        os.walk(trLanguageGroupArtistReleasePath))[2],
                                        key=str.lower)
                    for recordingKey in recordings:
                        trLanguageGroupArtistReleaseRecordingPath = os.path.join(
                            trLanguageGroupArtistReleasePath,
                            recordingKey,
                        )
                        rawContents = open(
                            trLanguageGroupArtistReleaseRecordingPath,
                            "r").read().strip()
                        rawText = utils.getText(rawContents)
                        rawMetadata = utils.getMetadata(rawContents)
                        data["translations"][languageKey][groupKey][artistKey][
                            "releases"][-1]["recordings"][0].append({
                                "name":
                                recordingKey,
                                "printable_name":
                                recordingKey,
                                "text":
                                rawText,
                                "metadata":
                                utils.parseMetadata(rawMetadata),
                            })
Example #12
0
 def _curves(self, node):
     line_str = utils.getText(node).split()
     lines = []
     while len(line_str)>7:
         self.canvas.bezier(*[utils.as_pt(l) for l in line_str[0:8]])
         line_str = line_str[8:]
Example #13
0
moby_file = os.path.join(root, 'mpos', 'mobyposi - Copy.i')

moby = set([line.rstrip() for line in open(moby_file, encoding = 'mac_roman')])

# print(len(moby))

print('Building dicts...')
word_indices = dict((line.split('*')[0], (i, line.split('*')[1])) for i,line in enumerate(moby))
indices_word = dict((i, line.split('*')[0]) for i,line, in enumerate(moby))

parts = set([pos for line in moby for pos in line.split('*')[1]])
pos_indices = dict((pos, i) for i,pos in enumerate(parts))
indices_pos = dict((i, pos) for i,pos in enumerate(parts))

texts = []
texts.append([s for s in getText(0) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(1) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(2) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(3) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(4) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(5) if isAdmissible(s, word_indices)])
texts.append([s for s in getText(6) if isAdmissible(s, word_indices)])
# for text in texts:
	# print(len(text))

maxlen = 128

def TrainPosModel(model, dimIn, dimOut):
	print('Generating training set...')
	
	text = [w for s in texts[0] for w in s]+\
Example #14
0
                        url = 'http://%s:%s/entity/search?q={"limit":%d,"cursor":%s}&opts=%s' % (
                            httpConfig.config["serverhost"],
                            httpConfig.config["serverport"],
                            limit,
                            json.dumps(cursor),
                            json.dumps(opts),
                        )

                        print ("doGet:   url = %s" % url)

                        r = requests.get(url)
                        statusCode = r.status_code
                        try:
                            utils.getJson(r)
                        except Exception(e):
                            print utils.getText(r)
                            continue

                        cursor = j["cursor"]
                        status = j["status"]

                        print ("doGet:   Got %d results" % len(j["response"]))
                        for item in j["response"]:
                            itemId = item["props"]["urn:lri:property_type:id"]
                            # XXX: should this be: if item in items?
                            if item in items:
                                print ("doGet:     DUPLICATE: %s" % itemId)
                            else:
                                items.append(item)
                        count += 1
                    print ("doGet: Got %d items: " % len(items))