Example #1
0
    def write(self,
              cacheId,
              content,
              memory=False,
              writeToFile=True,
              keepLock=False):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeToFile:
            try:
                if not cacheFile in self._locked_files:
                    self._locked_files.add(
                        cacheFile)  # this is not atomic with the next one!
                    filetool.lock(cacheFile)

                fobj = open(cacheFile, 'wb')

                pickle.dump(content, fobj, 2)

                fobj.close()
                if not keepLock:
                    filetool.unlock(cacheFile)
                    self._locked_files.remove(
                        cacheFile)  # not atomic with the previous one!

            except (IOError, EOFError, pickle.PickleError,
                    pickle.PicklingError), e:
                e.args = ("Could not store cache to %s\n" % self._path +
                          e.args[0], ) + e.args[1:]
                raise e
Example #2
0
    def write(self, cacheId, content, memory=False, writeToFile=True):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeToFile:
            try:
                self._locked_files.add(cacheFile)  # this is not atomic with the next one!
                filetool.lock(cacheFile)
                fobj = open(cacheFile, 'wb')

                pickle.dump(content, fobj, 2)

                fobj.close()
                filetool.unlock(cacheFile)
                self._locked_files.remove(cacheFile)  # not atomic with the previous one!

                #if cacheId.startswith("tree-"):
                #    print "caching: %s" % cacheId

            except (IOError, EOFError, pickle.PickleError, pickle.PicklingError):
                self._console.error("Could not store cache to %s" % self._path)
                sys.exit(1)

        if memory:
            memcache[cacheId] = content
Example #3
0
def _handleResources(script, generator, filtered=True):

    def createResourceInfo(res, resval):
        resinfo = [ { "target": "resource", "data": { res : resval }} ]
        #filetool.save(approot+"/data/resource/" + res + ".json", json.dumpsCode(resinfo))
        return resinfo

    def copyResource(res, library):
        sourcepath = os.path.join(library._resourcePath, res)
        targetpath = approot + "/resource/" + res
        filetool.directory(os.path.dirname(targetpath))
        shutil.copy(sourcepath, targetpath)
        return

    # ----------------------------------------------------------------------
    context.console.info("Processing resources: ", False)
    approot = context.jobconf.get("provider/app-root", "./provider")
    filetool.directory(approot+"/data")
    filetool.directory(approot+"/resource")
    
    # quick copy of runLogResources, for fast results
    packages   = script.packagesSorted()
    parts      = script.parts
    variants   = script.variants

    allresources = {}
    if filtered:
        # -- the next call is fake, just to populate package.data.resources!
        _ = generator._codeGenerator.generateResourceInfoCode(script, generator._settings, context.jobconf.get("library",[]))
        for packageId, package in enumerate(packages):
            allresources.update(package.data.resources)
    else:
        # get the main library
        mainlib = [x for x in script.libraries if x.namespace == script.namespace][0]
        reslist = mainlib.getResources()
        allresources = ResourceHandler.createResourceStruct(reslist, updateOnlyExistingSprites = False)

    # get resource info
    resinfos = {}
    numResources = len(allresources)
    for num,res in enumerate(allresources):
        context.console.progress(num+1, numResources)
        # fake a classId-like resourceId ("a.b.c"), for filter matching
        resId = os.path.splitext(res)[0]
        resId = resId.replace("/", ".")
        if filtered and not passesOutputfilter(resId):
            continue
        resinfos[res] = createResourceInfo(res, allresources[res])
        # extract library name space
        if isinstance(allresources[res], types.ListType): # it's an image = [14, 14, u'png', u'qx' [, u'qx/decoration/Modern/checkradio-combined.png', 0, 0]]
            library_ns = allresources[res][3]
        else: # html page etc. = "qx"
            library_ns = allresources[res]
        if library_ns:  # library_ns == '' means embedded image -> no copying
            library    = libraries[library_ns]
            copyResource(res, library)

    filetool.save(approot+"/data/resource/resources.json", json.dumpsCode(resinfos))

    return
Example #4
0
    def write(self, cacheId, content, memory=False, writeToFile=True, keepLock=False):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeCond(cacheId):
            print "\nWriting %s ..." % (cacheId,),
        if writeToFile:
            try:
                if not cacheFile in self._locked_files:
                    self._locked_files.add(cacheFile)  # this is not atomic with the next one!
                    filetool.lock(cacheFile)

                fobj = open(cacheFile, 'wb')

                pickle.dump(content, fobj, 2)

                fobj.close()
                if not keepLock:
                    filetool.unlock(cacheFile)
                    self._locked_files.remove(cacheFile)  # not atomic with the previous one!

                #print "wrote cacheId: %s" % cacheId
                if writeCond(cacheId):
                    print "to disk"

            except (IOError, EOFError, pickle.PickleError, pickle.PicklingError), e:
                e.args = ("Could not store cache to %s\n" % self._path + e.args[0], ) + e.args[1:]
                raise e
Example #5
0
    def read(self, cacheId, dependsOn=None, memory=False):
        if memcache.has_key(cacheId):
            return memcache[cacheId]

        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None

        # Out of date check
        if dependsOn:
            fileModTime = os.stat(dependsOn).st_mtime
            if fileModTime > cacheModTime:
                return None

        try:
            content = cPickle.load(open(cacheFile, 'rb'))

            if memory:
                memcache[cacheId] = content

            return content

        except (IOError, EOFError, cPickle.PickleError,
                cPickle.UnpicklingError):
            self._console.error("Could not read cache from %s" % self._path)
            return None
Example #6
0
    def read(self, cacheId, dependsOn=None, memory=False):
        if memcache.has_key(cacheId):
            return memcache[cacheId]

        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None

        # Out of date check
        if dependsOn:
            fileModTime = os.stat(dependsOn).st_mtime
            if fileModTime > cacheModTime:
                return None

        try:
            content = cPickle.load(open(cacheFile, 'rb'))

            if memory:
                memcache[cacheId] = content

            return content

        except (IOError, EOFError, cPickle.PickleError, cPickle.UnpicklingError):
            self._console.error("Could not read cache from %s" % self._path)
            return None
    def archive_download(self, url, cache_path, checksum):
        rc = 0
        # Download
        arcfile = os.path.join(cache_path, os.path.basename(url))
        tdir = os.path.dirname(arcfile)
        filetool.directory(tdir)
        tfp = open(arcfile, "wb")
        #(fname, urlinfo) = urllib.urlretrieve(url, arcfile)
        urlobj = urllib.urlopen(url)
        assert urlobj.getcode() == 200, "Could not the download contrib archive: %s" % url
        hashobj = self.copy_and_hash(urlobj.fp, tfp)
        assert hashobj.hexdigest()==checksum, "Checksum of archive does not validate (should be: %s): %s" % (checksum, arcfile)
        urlobj.close()
        tfp.close()

        # Extract
        if url.endswith('.zip'):
            zipf = ZipFile(arcfile, 'r')
            zipf.extractall(tdir)
            zipf.close()
        else: # .tar, .tgz, .tar.gz, .tar.bz2
            tar = tarfile.open(arcfile)
            tar.extractall(tdir)
            tar.close

        # Eliminate archive top-dir
        _, archive_dirs, _ = os.walk(tdir).next()
        assert archive_dirs, "The downloaded archive is not in single top-dir format: %s" % arcfile
        archive_top = os.path.join(tdir, archive_dirs[0]) # just take the first dir entry
        for item in os.listdir(archive_top):
            shutil.move(os.path.join(archive_top, item), tdir)
        os.rmdir(archive_top)
        os.unlink(arcfile)

        return rc
Example #8
0
    def read(self, cacheId, dependsOn=None, memory=False, keepLock=False):
        if dependsOn:
            dependsModTime = os.stat(dependsOn).st_mtime

        if writeCond(cacheId):
            print "\nReading %s ..." % (cacheId, ),
        # Mem cache
        if cacheId in memcache:
            memitem = memcache[cacheId]
            if not dependsOn or dependsModTime < memitem['time']:
                if writeCond(cacheId):
                    print "from memcache"
                return memitem['content'], memitem['time']

        # File cache
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None, None

        # out of date check
        if dependsOn and dependsModTime > cacheModTime:
            return None, cacheModTime

        try:
            if not cacheFile in self._locked_files:
                self._locked_files.add(cacheFile)
                filetool.lock(cacheFile)

            fobj = open(cacheFile, 'rb')
            #filetool.lock(fobj.fileno())

            gc.disable()
            try:
                content = pickle.loads(fobj.read().decode('zlib'))
            finally:
                gc.enable()

            #filetool.unlock(fobj.fileno())
            fobj.close()
            if not keepLock:
                filetool.unlock(cacheFile)
                self._locked_files.remove(cacheFile)

            if memory:
                memcache[cacheId] = {'content': content, 'time': time.time()}

            #print "read cacheId: %s" % cacheId
            if writeCond(cacheId):
                print "from disk"
            return content, cacheModTime

        except (IOError, EOFError, pickle.PickleError, pickle.UnpicklingError):
            self._console.warn(
                "Could not read cache object %s, recalculating..." % cacheFile)
            return None, cacheModTime
 def _check_path(self, path):
     if not os.path.exists(path):
         filetool.directory(path)
     elif not os.path.isdir(path):
         raise RuntimeError, "The cache path is not a directory: %s" % path
     else: # it's an existing directory
         # defer read/write access to the first call of read()/write()
         pass
Example #10
0
    def combine(self, combined, files, horizontal):
        self._console.indent()
        montage_cmd = "montage -geometry +0+0 -gravity NorthWest -tile %s -background None %s %s"
        if horizontal:
            orientation = "x1"
        else:
            orientation = "1x"

        # combine
        config = []
        clips = []
        top = 0
        left = 0
        allfiles = []
        for file in files:
            allfiles.extend(glob.glob(file))
        #self._console.debug("Combining the following images: %r" % allfiles)
        for file in allfiles:
            if not os.path.exists(file):
                self._console.warn("Non-existing file spec, skipping: %s" %
                                   file)
                continue
            clips.append(file)
            imginfo = self._imageInfo.getImageInfo(file, file)
            width, height = imginfo['width'], imginfo['height']
            config.append({
                'file': file,
                'combined': combined,
                'left': -left,
                'top': -top,
                'width': width,
                'height': height,
                'type': imginfo['type']
            })
            if horizontal:
                left += width
            else:
                top += height

        if len(clips) == 0:
            self._console.warn("No images to combine; skipping")
        else:
            filetool.directory(os.path.dirname(combined))
            (fileDescriptor, tempPath) = tempfile.mkstemp(text=True,
                                                          dir=os.curdir)
            temp = os.fdopen(fileDescriptor, "w")
            temp.write("\n".join(clips))
            temp.close()
            cmd = montage_cmd % (orientation, "@" + os.path.basename(tempPath),
                                 combined)
            rc = os.system(cmd)
            os.unlink(tempPath)
            if rc != 0:
                raise RuntimeError, "The montage command (%s) failed with the following return code: %d" % (
                    cmd, rc)

        self._console.outdent()
        return config
Example #11
0
    def read(self, cacheId, dependsOn=None, memory=False, keepLock=False):
        if dependsOn:
            dependsModTime = os.stat(dependsOn).st_mtime

        if writeCond(cacheId):
            print "\nReading %s ..." % (cacheId,),
        # Mem cache
        if cacheId in memcache:
            memitem = memcache[cacheId]
            if not dependsOn or dependsModTime < memitem['time']:
                if writeCond(cacheId):
                    print "from memcache"
                return memitem['content'], memitem['time']

        # File cache
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None, None

        # out of date check
        if dependsOn and dependsModTime > cacheModTime:
                return None, cacheModTime

        try:
            if not cacheFile in self._locked_files:
                self._locked_files.add(cacheFile)
                filetool.lock(cacheFile)

            fobj = open(cacheFile, 'rb')
            #filetool.lock(fobj.fileno())

            gc.disable()
            try:
                content = pickle.loads(fobj.read().decode('zlib'))
            finally:
                gc.enable()

            #filetool.unlock(fobj.fileno())
            fobj.close()
            if not keepLock:
                filetool.unlock(cacheFile)
                self._locked_files.remove(cacheFile)

            if memory:
                memcache[cacheId] = {'content':content, 'time': time.time()}

            #print "read cacheId: %s" % cacheId
            if writeCond(cacheId):
                print "from disk"
            return content, cacheModTime

        except (IOError, EOFError, pickle.PickleError, pickle.UnpicklingError):
            self._console.warn("Could not read cache object %s, recalculating..." % cacheFile)
            return None, cacheModTime
Example #12
0
    def combine(self, combined, files, horizontal):
        self._console.indent()
        montage_cmd = "montage -geometry +0+0 -gravity NorthWest -tile %s -background None %s %s"
        if horizontal:
            orientation = "x1"
        else:
            orientation = "1x"

        # combine
        config = []
        clips = []
        top = 0
        left = 0
        allfiles = []
        for file in files:
            allfiles.extend(glob.glob(file))
        # self._console.debug("Combining the following images: %r" % allfiles)
        for file in allfiles:
            if not os.path.exists(file):
                self._console.warn("Non-existing file spec, skipping: %s" % file)
                continue
            clips.append(file)
            imginfo = self._imageInfo.getImageInfo(file, file)
            width, height = imginfo["width"], imginfo["height"]
            config.append(
                {
                    "file": file,
                    "combined": combined,
                    "left": -left,
                    "top": -top,
                    "width": width,
                    "height": height,
                    "type": imginfo["type"],
                }
            )
            if horizontal:
                left += width
            else:
                top += height

        if len(clips) == 0:
            self._console.warn("No images to combine; skipping")
        else:
            filetool.directory(os.path.dirname(combined))
            (fileDescriptor, tempPath) = tempfile.mkstemp(text=True, dir=os.curdir)
            temp = os.fdopen(fileDescriptor, "w")
            temp.write("\n".join(clips))
            temp.close()
            cmd = montage_cmd % (orientation, "@" + os.path.basename(tempPath), combined)
            rc = os.system(cmd)
            os.unlink(tempPath)
            if rc != 0:
                raise RuntimeError, "The montage command (%s) failed with the following return code: %d" % (cmd, rc)

        self._console.outdent()
        return config
Example #13
0
 def copyResource(res, library):
     if skip_expression.search(os.path.basename(res)):
         return
     sourcepath = os.path.join(library.resourcePath, res)
     targetpath = approot + "/resource/" + res
     filetool.directory(os.path.dirname(targetpath))
     shutil.copy(sourcepath, targetpath)
     #copier = copytool.CopyTool(context.console)
     #args   = ['-x', ','.join(filetool.VERSIONCONTROL_DIR_PATTS), sourcepath, targetpath]
     #copier.parse_args(args)
     #copier.do_work()
     return
Example #14
0
 def copyResource(res, library):
     if skip_expression.search(os.path.basename(res)):
         return
     sourcepath = os.path.join(library.resourcePath, res)
     targetpath = approot + "/resource/" + res
     filetool.directory(os.path.dirname(targetpath))
     shutil.copy(sourcepath, targetpath)
     #copier = copytool.CopyTool(context.console)
     #args   = ['-x', ','.join(filetool.VERSIONCONTROL_DIR_PATTS), sourcepath, targetpath]
     #copier.parse_args(args)
     #copier.do_work()
     return
def main():
    apidata = {}
    apidata['type'] = 'doctree'
    apidata['children'] = []
    apidata['attributes'] = {}
    apidata['children'].append({
        "type": "packages",
        "attributes": {},
        "children": []
    })
    filetool.directory(store_path)

    dirwalker = filetool.find(module_root, r'\.py$')

    for pyfile in dirwalker:
        #if os.stat(pyfile).st_size == 0:
        #    continue
        # get the file's api doc as json
        filejson = pyapi2json(pyfile)
        apipackage = file2package(pyfile, module_root)
        # and store it
        filetool.save(store_path + '/' + apipackage + '.json', filejson)
        # make an entry in apidata struct
        levels = apipackage.split('.')
        curr = apidata['children'][0]['children']
        for pos, level in enumerate(levels):
            if level not in (x['attributes']['name'] for x in curr
                             if 'name' in x['attributes']):
                newentry = {
                    "children": [],
                    "type": "packages" if pos % 2 else "package",
                    "attributes": {
                        "packageName": ".".join(levels[:pos]),
                        "name": level,
                        "fullName": ".".join(levels[:pos + 1])
                    }
                }
                if pos == len(levels) - 1:
                    newentry["externalRef"] = True
                    #del newentry['children']
                    #newentry["type"] = "classes"
                    pass
                curr.append(newentry)
                curr = newentry['children']
            else:
                curr = [
                    x['children'] for x in curr
                    if x['attributes']['name'] == level
                ][0]

    # store apidata
    filetool.save(store_path + '/' + "apidata.json", json.dumps(apidata))
Example #16
0
 def _check_path(self, path):
     self._console.indent()
     self._console.debug("Checking path '%s'" % path)
     if not os.path.exists(path):
         self._console.debug("Creating non-existing cache directory")
         filetool.directory(path)
         self._update_checkfile()
     elif not os.path.isdir(path):
         raise RuntimeError, "The cache path is not a directory: %s" % path
     else:  # it's an existing directory
         # defer read/write access test to the first call of read()/write()
         self._console.debug("Using existing directory")
         pass
     self._console.outdent()
Example #17
0
 def _check_path(self, path):
     self._console.indent()
     self._console.debug("Checking path '%s'" % path)
     if not os.path.exists(path):
         self._console.debug("Creating non-existing cache directory")
         filetool.directory(path)
         self._update_checkfile()
     elif not os.path.isdir(path):
         raise RuntimeError, "The cache path is not a directory: %s" % path
     else: # it's an existing directory
         # defer read/write access test to the first call of read()/write()
         self._console.debug("Using existing directory")
         pass
     self._console.outdent()
Example #18
0
    def write(self, cacheId, content, memory=False, writeToFile=True):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeToFile:
            try:
                cPickle.dump(content, open(cacheFile, 'wb'), 2)
    
            except (IOError, EOFError, cPickle.PickleError, cPickle.PicklingError):
                self._console.error("Could not store cache to %s" % self._path)
                sys.exit(1)

        if memory:
            memcache[cacheId] = content
Example #19
0
    def combine(self, combined, files, horizontal, type="extension"):
        self._console.indent()
        if horizontal:
            orientation = "x1"
        else:
            orientation = "1x"

        # combine
        config = []
        clips = []
        top = 0
        left = 0
        allfiles = []
        for file in files:
            allfiles.extend(glob.glob(file))
        #self._console.debug("Combining the following images: %r" % allfiles)
        for file in allfiles:
            if not os.path.exists(file):
                self._console.warn("Non-existing file spec, skipping: %s" %
                                   file)
                continue
            clips.append(file)
            imginfo = Image(file).getInfoMap()
            width, height = imginfo['width'], imginfo['height']
            config.append({
                'file': file,
                'combined': combined,
                'left': -left,
                'top': -top,
                'width': width,
                'height': height,
                'type': imginfo['type']
            })
            if horizontal:
                left += width
            else:
                top += height

        if len(clips) == 0:
            self._console.warn("No images to combine; skipping")
        else:
            filetool.directory(os.path.dirname(combined))
            if type == "extension":
                self.combineImgMagick(clips, combined, orientation)
            elif type == "base64":
                self.combineBase64(config)

        self._console.outdent()
        return config
Example #20
0
def _handleResources(script, generator):

    def createResourceInfo(res, resval):
        resinfo = [ { "target": "resource", "data": { res : resval }} ]
        #filetool.save(approot+"/data/resource/" + res + ".json", json.dumpsCode(resinfo))
        return resinfo

    def copyResource(res, library):
        sourcepath = os.path.join(library['path'], library['resource'], res)
        targetpath = approot + "/resource/" + res
        filetool.directory(os.path.dirname(targetpath))
        shutil.copy(sourcepath, targetpath)
        return

    # ----------------------------------------------------------------------
    approot = context.jobconf.get("provider/app-root", "./provider")
    filetool.directory(approot+"/data")
    filetool.directory(approot+"/resource")
    
    # quick copy of runLogResources, for fast results
    packages   = script.packagesSortedSimple()
    parts      = script.parts
    variants   = script.variants

    allresources = {}
    # get resource info
    # -- the next call is fake, just to populate package.data.resources!
    _ = generator._codeGenerator.generateResourceInfoCode(script, generator._settings, context.jobconf.get("library",[]))
    for packageId, package in enumerate(packages):
        allresources.update(package.data.resources)
    
    resinfos = {}
    for res in allresources:
        # fake a classId-like resourceId ("a.b.c"), for filter matching
        resId = os.path.splitext(res)[0]
        resId = resId.replace("/", ".")
        if passesOutputfilter(resId):
            resinfos[res] = createResourceInfo(res, allresources[res])
            # extract library name space
            if isinstance(allresources[res], types.ListType): # it's an image = [14, 14, u'png', u'qx' [, u'qx/decoration/Modern/checkradio-combined.png', 0, 0]]
                library_ns = allresources[res][3]
            else: # html page etc. = "qx"
                library_ns = allresources[res]
            library    = libraries[library_ns]
            copyResource(res, library)

    filetool.save(approot+"/data/resource/resources.json", json.dumpsCode(resinfos))

    return
Example #21
0
    def write(self, cacheId, content, memory=False, writeToFile=True):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeToFile:
            try:
                cPickle.dump(content, open(cacheFile, 'wb'), 2)

            except (IOError, EOFError, cPickle.PickleError,
                    cPickle.PicklingError):
                self._console.error("Could not store cache to %s" % self._path)
                sys.exit(1)

        if memory:
            memcache[cacheId] = content
Example #22
0
 def _check_path(self, path):
     self._console.indent()
     self._console.debug("Checking path '%s'" % path)
     if not os.path.exists(path):
         self._console.debug("Creating non-existing cache directory")
         filetool.directory(path)
         self._update_checkfile()
     elif not os.path.isdir(path):
         raise RuntimeError, "The cache path is not a directory: %s" % path
     else: # it's an existing directory
         # defer read/write access test to the first call of read()/write()
         self._console.debug("Using existing directory")
         pass
     if len(os.listdir(path)) < CACHE_THRESHOLD: # not even minimal framework classes cached
         self._console.info("Populating the cache, this may take some time")
     self._console.outdent()
Example #23
0
 def _check_path(self, path):
     self._console.indent()
     self._console.debug("Checking path '%s'" % path)
     if not os.path.exists(path):
         self._console.debug("Creating non-existing cache directory")
         filetool.directory(path)
         self._update_checkfile()
     elif not os.path.isdir(path):
         raise RuntimeError, "The cache path is not a directory: %s" % path
     else: # it's an existing directory
         # defer read/write access test to the first call of read()/write()
         self._console.debug("Using existing directory")
         pass
     if len(os.listdir(path)) < CACHE_THRESHOLD: # not even minimal framework classes cached
         self._console.info("Populating the cache, this may take some time")
     self._console.outdent()
Example #24
0
    def read(self, cacheId, dependsOn=None, memory=False, keepLock=False):
        if dependsOn:
            dependsModTime = os.stat(dependsOn).st_mtime

        # Mem cache
        if cacheId in memcache:
            memitem = memcache[cacheId]
            if not dependsOn or dependsModTime < memitem['time']:
                return memitem['content'], memitem['time']

        # File cache
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None, None

        # out of date check
        if dependsOn and dependsModTime > cacheModTime:
            return None, cacheModTime

        try:
            if not cacheFile in self._locked_files:
                self._locked_files.add(cacheFile)
                filetool.lock(cacheFile)

            fobj = open(cacheFile, 'rb')
            #filetool.lock(fobj.fileno())

            content = pickle.load(fobj)

            #filetool.unlock(fobj.fileno())
            fobj.close()
            if not keepLock:
                filetool.unlock(cacheFile)
                self._locked_files.remove(cacheFile)

            if memory:
                memcache[cacheId] = {'content': content, 'time': time.time()}

            return content, cacheModTime

        except (IOError, EOFError, pickle.PickleError, pickle.UnpicklingError):
            self._console.error("Could not read cache from %s" % self._path)
            return None, cacheModTime
Example #25
0
def main():
    apidata = {}
    apidata['type'] = 'doctree'
    apidata['children'] = []
    apidata['attributes'] = {}
    apidata['children'].append({
      "type":"packages","attributes":{},"children":[]  
    })
    filetool.directory(store_path)

    dirwalker = filetool.find(module_root, r'\.py$')

    for pyfile in dirwalker:
        #if os.stat(pyfile).st_size == 0:
        #    continue
        # get the file's api doc as json
        filejson = pyapi2json(pyfile)
        apipackage = file2package(pyfile, module_root)
        # and store it
        filetool.save(store_path+'/'+apipackage+'.json', filejson)
        # make an entry in apidata struct
        levels = apipackage.split('.')
        curr = apidata['children'][0]['children']
        for pos,level in enumerate(levels):
            if level not in (x['attributes']['name'] for x in curr if 'name' in x['attributes']):
                newentry = {
                    "children" : [],
                    "type" : "packages" if pos % 2 else "package",
                    "attributes" : {
                        "packageName" : ".".join(levels[:pos]),
                        "name" : level,
                        "fullName" : ".".join(levels[:pos+1])
                    }
                }
                if pos==len(levels)-1:
                    newentry["externalRef"] = True
                    #del newentry['children']
                    #newentry["type"] = "classes"
                    pass
                curr.append(newentry)
                curr = newentry['children']
            else:
                curr = [x['children'] for x in curr if x['attributes']['name']==level][0]
        

    # store apidata
    filetool.save(store_path+'/'+"apidata.json", json.dumps(apidata))
    def read(self, cacheId, dependsOn=None, memory=False, keepLock=False):
        if dependsOn:
            dependsModTime = os.stat(dependsOn).st_mtime

        # Mem cache
        if cacheId in memcache:
            memitem = memcache[cacheId]
            if not dependsOn or dependsModTime < memitem['time']:
                return memitem['content'], memitem['time']

        # File cache
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None, None

        # out of date check
        if dependsOn and dependsModTime > cacheModTime:
                return None, cacheModTime

        try:
            if not cacheFile in self._locked_files:
                self._locked_files.add(cacheFile)
                filetool.lock(cacheFile)

            fobj = open(cacheFile, 'rb')
            #filetool.lock(fobj.fileno())

            content = pickle.load(fobj)

            #filetool.unlock(fobj.fileno())
            fobj.close()
            if not keepLock:
                filetool.unlock(cacheFile)
                self._locked_files.remove(cacheFile)

            if memory:
                memcache[cacheId] = {'content':content, 'time': time.time()}

            return content, cacheModTime

        except (IOError, EOFError, pickle.PickleError, pickle.UnpicklingError):
            self._console.error("Could not read cache from %s" % self._path)
            return None, cacheModTime
Example #27
0
def _handleCode(script, generator):

    approot = context.jobconf.get("provider/app-root", "./provider")
    filetool.directory(approot + "/code")

    for clazz in script.classesObj:
        # register library (for _handleResources)
        if clazz.library['namespace'] not in libraries:
            libraries[clazz.library['namespace']] = clazz.library

        if passesOutputfilter(clazz.id, ):
            classAId   = clazz.id.replace(".","/") + ".js"
            sourcepath = os.path.join(clazz.library['path'], clazz.library['class'], classAId) # TODO: this should be a class method
            targetpath = approot + "/code/" + classAId
            filetool.directory(os.path.dirname(targetpath))
            shutil.copy(sourcepath, targetpath)
    return
Example #28
0
    def write(self,
              cacheId,
              content,
              memory=False,
              writeToFile=True,
              keepLock=False):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeCond(cacheId):
            print "\nWriting %s ..." % (cacheId, ),
        if writeToFile:
            try:
                if not cacheFile in self._locked_files:
                    self._locked_files.add(
                        cacheFile)  # this is not atomic with the next one!
                    filetool.lock(cacheFile)

                fobj = open(cacheFile, 'wb')

                pickle.dump(content, fobj, 2)

                fobj.close()
                if not keepLock:
                    filetool.unlock(cacheFile)
                    self._locked_files.remove(
                        cacheFile)  # not atomic with the previous one!

                #print "wrote cacheId: %s" % cacheId
                if writeCond(cacheId):
                    print "to disk"

            except (IOError, EOFError, pickle.PickleError,
                    pickle.PicklingError), e:
                try:
                    os.unlink(
                        cacheFile
                    )  # try remove cache file, Pickle might leave incomplete files
                except:
                    e.args = ("Cache file might be crippled.\n" % self._path +
                              e.args[0], ) + e.args[1:]
                e.args = ("Could not store cache to %s.\n" % self._path +
                          e.args[0], ) + e.args[1:]
                raise e
Example #29
0
    def write(self, cacheId, content, memory=False, writeToFile=True):
        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        if writeToFile:
            try:
                self._locked_files.add(cacheFile)  # this is not atomic with the next one!
                filetool.lock(cacheFile)
                fobj = open(cacheFile, 'wb')

                pickle.dump(content, fobj, 2)

                fobj.close()
                filetool.unlock(cacheFile)
                self._locked_files.remove(cacheFile)  # not atomic with the previous one!

            except (IOError, EOFError, pickle.PickleError, pickle.PicklingError), e:
                e.args = ("Could not store cache to %s\n" % self._path + e.args[0], ) + e.args[1:]
                raise e
Example #30
0
    def combine(self, combined, files, horizontal, type="extension"):
        self._console.indent()
        if horizontal:
            orientation = "x1"
        else:
            orientation = "1x"

        # combine
        config = []
        clips = []
        top = 0
        left = 0
        allfiles = []
        for file in files:
            allfiles.extend(glob.glob(file))
        #self._console.debug("Combining the following images: %r" % allfiles)
        for file in allfiles:
            if not os.path.exists(file):
                self._console.warn("Non-existing file spec, skipping: %s" % file)
                continue
            clips.append(file)
            imginfo = Image(file).getInfoMap()
            width, height = imginfo['width'], imginfo['height']
            config.append({'file':file, 'combined':combined, 'left': -left,
                           'top': -top, 'width':width, 'height':height, 'type':imginfo['type']})
            if horizontal:
                left += width
            else:
                top += height

        if len(clips) == 0:
            self._console.warn("No images to combine; skipping")
        else:
            filetool.directory(os.path.dirname(combined))
            if type == "extension":
                self.combineImgMagick(clips, combined, orientation)
            elif type == "base64":
                self.combineBase64(config)

        self._console.outdent()
        return config
Example #31
0
    def read(self, cacheId, dependsOn=None, memory=False):
        if memcache.has_key(cacheId):
            return memcache[cacheId]

        filetool.directory(self._path)
        cacheFile = os.path.join(self._path, self.filename(cacheId))

        try:
            cacheModTime = os.stat(cacheFile).st_mtime
        except OSError:
            return None

        # Out of date check
        if dependsOn:
            fileModTime = os.stat(dependsOn).st_mtime
            if fileModTime > cacheModTime:
                return None

        try:
            self._locked_files.add(cacheFile)
            filetool.lock(cacheFile)

            fobj = open(cacheFile, 'rb')
            #filetool.lock(fobj.fileno())

            content = pickle.load(fobj)

            #filetool.unlock(fobj.fileno())
            fobj.close()
            filetool.unlock(cacheFile)
            self._locked_files.remove(cacheFile)

            if memory:
                memcache[cacheId] = content

            return content

        except (IOError, EOFError, pickle.PickleError, pickle.UnpicklingError):
            self._console.error("Could not read cache from %s" % self._path)
            return None
    def archive_download(self, url, cache_path, checksum):
        rc = 0
        # Download
        arcfile = os.path.join(cache_path, os.path.basename(url))
        tdir = os.path.dirname(arcfile)
        filetool.directory(tdir)
        tfp = open(arcfile, "wb")
        #(fname, urlinfo) = urllib.urlretrieve(url, arcfile)
        urlobj = urllib.urlopen(url)
        assert urlobj.getcode(
        ) == 200, "Could not the download contrib archive: %s" % url
        hashobj = self.copy_and_hash(urlobj.fp, tfp)
        assert hashobj.hexdigest(
        ) == checksum, "Checksum of archive does not validate (should be: %s): %s" % (
            checksum, arcfile)
        urlobj.close()
        tfp.close()

        # Extract
        if url.endswith('.zip'):
            zipf = ZipFile(arcfile, 'r')
            zipf.extractall(tdir)
            zipf.close()
        else:  # .tar, .tgz, .tar.gz, .tar.bz2
            tar = tarfile.open(arcfile)
            tar.extractall(tdir)
            tar.close

        # Eliminate archive top-dir
        _, archive_dirs, _ = os.walk(tdir).next()
        assert archive_dirs, "The downloaded archive is not in single top-dir format: %s" % arcfile
        archive_top = os.path.join(
            tdir, archive_dirs[0])  # just take the first dir entry
        for item in os.listdir(archive_top):
            shutil.move(os.path.join(archive_top, item), tdir)
        os.rmdir(archive_top)
        os.unlink(arcfile)

        return rc
Example #33
0
def indexAll(options):
    if options.cacheDirectory != None:
        filetool.directory(options.cacheDirectory)

    print "  * Indexing class paths... "

    fileDb = {}
    moduleDb = {}
    listIndex = 0

    for classPath in options.classPath:
        print "    - Indexing: %s" % classPath
        counter = indexClassPath(classPath, listIndex, options, fileDb, moduleDb)
        print "      - %s classes were found" % counter
        listIndex += 1

    # Resolving auto-deps
    resolveAutoDeps(fileDb, options)

    if options.cacheDirectory != None:
        storeEntryCache(fileDb, options)

    return fileDb, moduleDb
Example #34
0
def _handleCode(script, generator):

    approot = context.jobconf.get("provider/app-root", "./provider")
    builds = context.jobconf.get("provider/compile", ["source"])

    for buildtype in builds:
        context.console.info("Processing %s version of classes:\t" % buildtype,
                             False)
        if buildtype == "source":
            targetdir = approot + "/code"
            filetool.directory(targetdir)
        elif buildtype == "build":
            targetdir = approot + "/code-build"
            filetool.directory(targetdir)
            optimize = context.jobconf.get(
                "compile-options/code/optimize",
                ["variables", "basecalls", "strings"])
            variantsettings = context.jobconf.get("variants", {})
            variantSets = util.computeCombinations(variantsettings)
        else:
            raise ConfigurationError("Unknown provider compile type '%s'" %
                                     buildtype)

        numClasses = len(script.classesObj)
        for num, clazz in enumerate(script.classesObj):
            context.console.progress(num + 1, numClasses)
            # register library (for _handleResources)
            if clazz.library.namespace not in libraries:
                libraries[clazz.library.namespace] = clazz.library

            if passesOutputfilter(clazz.id, ):
                classAId = clazz.id.replace(".", "/") + ".js"
                targetpath = targetdir + "/" + classAId
                filetool.directory(os.path.dirname(targetpath))
                if buildtype == "source":
                    shutil.copy(clazz.path, targetpath)
                elif buildtype == "build":
                    compOptions = CompileOptions(
                        optimize, variantSets[0]
                    )  # only support for a single variant set!
                    code = clazz.getCode(compOptions)
                    filetool.save(targetpath, code)

    return
Example #35
0
def _handleCode(script, generator):

    approot = context.jobconf.get("provider/app-root", "./provider")
    builds  = context.jobconf.get("provider/compile",  ["source"])

    for buildtype in builds:
        context.console.info("Processing %s version of classes:\t" % buildtype, False)
        if buildtype == "source":
            targetdir = approot + "/code"
            filetool.directory(targetdir)
        elif buildtype == "build":
            targetdir = approot + "/code-build"
            filetool.directory(targetdir)
            optimize = context.jobconf.get("compile-options/code/optimize", ["variables","basecalls","strings"])
            variantsettings = context.jobconf.get("variants", {})
            variantSets = util.computeCombinations(variantsettings)
        else:
            raise ConfigurationError("Unknown provider compile type '%s'" % buildtype)

        numClasses = len(script.classesObj)
        for num, clazz in enumerate(script.classesObj):
            context.console.progress(num+1, numClasses)
            # register library (for _handleResources)
            if clazz.library.namespace not in libraries:
                libraries[clazz.library.namespace] = clazz.library

            if passesOutputfilter(clazz.id, ):
                classAId   = clazz.id.replace(".","/") + ".js"
                targetpath = targetdir + "/" + classAId
                filetool.directory(os.path.dirname(targetpath))
                if buildtype == "source":
                    shutil.copy(clazz.path, targetpath)
                elif buildtype == "build":
                    compOptions = CompileOptions(optimize, variantSets[0]) # only support for a single variant set!
                    code = clazz.getCode(compOptions)
                    filetool.save(targetpath, code)

    return
Example #36
0
 def saveFile(self, path, cont):
     filetool.directory(os.path.dirname(path))
     if not path.endswith(self.urlPathSep):
         fo = open(path, 'wb')
         fo.write(cont)
         fo.close
Example #37
0
 def saveFile(self, path, cont):
     filetool.directory(os.path.dirname(path))
     fo = open(path, 'wb')
     fo.write(cont)
     fo.close
Example #38
0
 def copyResource(res, library):
     sourcepath = os.path.join(library._resourcePath, res)
     targetpath = approot + "/resource/" + res
     filetool.directory(os.path.dirname(targetpath))
     shutil.copy(sourcepath, targetpath)
     return
Example #39
0
 def saveFile(self, path, cont):
     filetool.directory(os.path.dirname(path))
     if not path.endswith(self.urlPathSep):
         fo = open(path, 'wb')
         fo.write(cont)
         fo.close
Example #40
0
#
#  http://qooxdoo.org
#
#  Copyright:
#    2006-2013 1&1 Internet AG, Germany, http://www.1und1.de
#
#  License:
#    MIT: https://opensource.org/licenses/MIT
#    See the LICENSE file in the project's top-level directory for details.
#
#  Authors:
#    * Thomas Herchenroeder (thron7)
#
################################################################################

##
# NAME
#  mkdir.py -- a cross-platform mkdir
#
# SYNTAX
#  python mkdir.py <path>  -- create path directory, pot. with intervening dirs
#
##

import os, sys
import qxenviron

from misc import filetool

filetool.directory(sys.argv[1])
Example #41
0
 def saveFile(self, path, cont):
     filetool.directory(os.path.dirname(path))
     fo = open(path, 'wb')
     fo.write(cont)
     fo.close