コード例 #1
0
def needsUpdate(target, dependencies):
    """
    Determine if the target is older than any of its dependencies.

    @param target: A filename for the target.
    @param dependencies: A sequence of dependency filenames.
    """
    if not path.exists(target):
        return True

    targetTime = path.getmtime(target)

    for dependency in dependencies:
        if type(dependency) in (str, unicode):
            filenames = [dependency]

        elif type(dependency) == types.ModuleType:
            filenames = _getModuleDependencies(dependency)

        else:
            raise TypeError, "Unknown dependency type %s" % (type(dependency))

        for filename in filenames:
            if path.getmtime(filename) > targetTime:
                return True
    else:
        return False
コード例 #2
0
ファイル: jinja.py プロジェクト: TurboGears/tg2
    def get_source(self, environment, template):
        # Check if dottedname
        if not template.endswith(self.template_extension):
            # Get the actual filename from dotted finder
            finder = self.dotted_finder
            template = finder.get_dotted_filename(template_name=template,
                                                  template_extension=self.template_extension)
        else:
            return FileSystemLoader.get_source(self, environment, template)

        # Check if the template exists
        if not exists(template):
            raise TemplateNotFound(template)

        # Get modification time
        mtime = getmtime(template)

        # Read the source
        fd = open(template, 'rb')
        try:
            source = fd.read().decode('utf-8')
        finally:
            fd.close()

        return source, template, lambda: mtime == getmtime(template)
コード例 #3
0
ファイル: version.py プロジェクト: RamonHPSilveira/urbansim
def main():
    sumoSrc = join(dirname(__file__), '..', '..', 'src')
    # determine output file
    if len(sys.argv) > 1:
        versionDir = sys.argv[1]
    else:
        versionDir = sumoSrc
    versionFile = join(versionDir, 'version.h')

    # determine svn dir
    if len(sys.argv) > 2:
        svnDir = sys.argv[2]
    else:
        svnDir = find_svnDir(sumoSrc)
    if svnDir == None or not exists(svnDir):
        print "unknown revision - svn dir '%s' not found" % svnDir
        if not exists(versionFile):
            create_version_file(versionFile, UNKNOWN_REVISION, "<None>")
    else:
        # determine svn file
        svnFile = find_svnFile(svnDir)
        if svnFile == None:
            print "unknown revision - no svn file found in %s" % svnDir
            if not exists(versionFile):
                create_version_file(versionFile, UNKNOWN_REVISION, "<None>")
        if not exists(versionFile) or getmtime(versionFile) < getmtime(svnFile):
            # svnFile is newer. lets update the revision number
            try:
                svnRevision = int(re.search(
                    'Revision: (\d*)\n',
                    Popen(['svn', 'info', sumoSrc], stdout=PIPE).communicate()[0]).group(1))
            except:
                svnRevision = parseRevision(svnFile)
            create_version_file(versionFile, svnRevision, svnFile)
コード例 #4
0
ファイル: smart_cache.py プロジェクト: larsyencken/cjktools
def needs_update(target, dependencies):
    """
    Determine if the target is older than any of its dependencies.

    :param target:
        A filename for the target.
    :param dependencies:
        A sequence of dependency filenames.
    """
    if not path.exists(target):
        return True

    target_time = path.getmtime(target)

    for dependency in dependencies:
        if isinstance(dependency, string_types):
            filenames = [dependency]
        elif isinstance(dependency, types.ModuleType):
            filenames = _get_module_dependencies(dependency)
        else:
            raise TypeError("Unknown dependency type %s" % (type(dependency)))

        for filename in filenames:
            if path.getmtime(filename) > target_time:
                return True
    else:
        return False
コード例 #5
0
ファイル: renderer.py プロジェクト: B-Rich/wakari-app-viewer
def nb_renderer(full_path):
    directory, base = split(full_path)
    cache_file = join(directory, '.%s.html' % base)
    if not current_app.config.get('DEBUG'):
        try:
            if isfile(cache_file) and getmtime(full_path) < getmtime(cache_file):
                current_app.logger.debug('Using Cache File %s' % cache_file)
                return raw_renderer(cache_file)
        except:
            current_app.logger.warn('There was an error reading from the cache file %s' % cache_file)

    ex = HTMLExporter(extra_loaders=[current_app.jinja_env.loader],
                      template_file='wakari_notebook.html')

    ex.environment.globals.update(current_app.jinja_env.globals)
    current_app.update_template_context(ex.environment.globals)
    ex.environment.globals.update(dirname=dirname(request.view_args['path']))

    output, _ = ex.from_filename(full_path)


    try:
        with open(cache_file, 'w') as fd:
            current_app.logger.debug('Writing Cache File %s' % cache_file)
            fd.write(output.encode(errors='replace'))
    except (OSError, IOError):
        current_app.logger.warn('There was an error writing to the cache file %s' % cache_file)
        try:
            if isfile(cache_file): os.unlink(cache_file)
        except OSError:
            current_app.logger.warn('There was an error removing the cache file %s' % cache_file)
            pass

    return output
コード例 #6
0
ファイル: __init__.py プロジェクト: beecycles/retriever
def MODULE_LIST(force_compile=False):
    """Load scripts from scripts directory and return list of modules."""
    modules = []
    
    for search_path in [search_path for search_path in SCRIPT_SEARCH_PATHS if exists(search_path)]:
        to_compile = [file for file in os.listdir(search_path)
                      if file[-7:] == ".script" and file[0] != "_"
                      and ((not isfile(join(search_path, file[:-7] + '.py'))) or 
                           (isfile(join(search_path, file[:-7] + '.py')) and
                            (getmtime(join(search_path, file[:-7] + '.py')) < 
                             getmtime(join(search_path, file))))
                            or force_compile)
                          ]
        for script in to_compile:
            script_name = '.'.join(script.split('.')[:-1])
            compile_script(join(search_path, script_name))
    
        files = [file for file in os.listdir(search_path)
                 if file[-3:] == ".py" and file[0] != "_"
                 and '#retriever' in open(join(search_path, file), 'r').readline().lower()]
    
        for script in files:
            script_name = '.'.join(script.split('.')[:-1])
            file, pathname, desc = imp.find_module(script_name, [search_path])
            try:
                new_module = imp.load_module(script_name, file, pathname, desc)
                new_module.SCRIPT.download
                modules.append(new_module)
            except:
                sys.stderr.write("Failed to load script: %s (%s)" % (script_name, search_path))
    
    return modules
コード例 #7
0
ファイル: commands.py プロジェクト: MeirKriheli/acrylamid
def autocompile(ws, conf, env, **options):
    """Subcommand: autocompile -- automatically re-compiles when something in
    content-dir has changed and parallel serving files."""

    CONF_PY = './conf.py'

    mtime = -1
    cmtime = getmtime(CONF_PY)

    while True:
        ntime = max(
            max(getmtime(e) for e in readers.filelist(conf['content_dir']) if utils.istext(e)),
            max(getmtime(p) for p in readers.filelist(conf['layout_dir'])))
        if mtime != ntime:
            try:
                compile(conf, env, **options)
            except AcrylamidException as e:
                log.fatal(e.args[0])
                pass
            event.reset()
            mtime = ntime

        if cmtime != getmtime(CONF_PY):
            log.info(' * Restarting due to change in %s' % (CONF_PY))
            # Kill the webserver
            ws.shutdown()
            # Force compilation since no template was changed
            argv = sys.argv if options['force'] else sys.argv[:] + ["--force"]
            # Restart acrylamid
            os.execvp(sys.argv[0], argv)

        time.sleep(1)
コード例 #8
0
ファイル: rst.py プロジェクト: Lemma1/MINAMI
    def get_outdated_docs(self):
        """
        Return an iterable of input files that are outdated.
        """
        # This method is taken from TextBuilder.get_outdated_docs()
        # with minor changes to support :confval:`rst_file_transform`.
        for docname in self.env.found_docs:
            if docname not in self.env.all_docs:
                yield docname
                continue
            sourcename = path.join(self.env.srcdir, docname +
                                   self.file_suffix)
            targetname = path.join(self.outdir, self.file_transform(docname))
            print (sourcename, targetname)

            try:
                targetmtime = path.getmtime(targetname)
            except Exception:
                targetmtime = 0
            try:
                srcmtime = path.getmtime(sourcename)
                if srcmtime > targetmtime:
                    yield docname
            except EnvironmentError:
                # source doesn't exist anymore
                pass
コード例 #9
0
ファイル: core.py プロジェクト: slushecl/dev
    def datread(self):
        """\
Reads in data from .dat or .npy file paths and if .dat is newer than .npy saves
it in .npy format for faster reading in future."""
        for vals in self.files:
            if vals['type'] == 'dat' and vals['dir'] == 'pull':
                # self.datfileloc = vals['dir']
                pth = vals['path']
                pthnpy = pth.replace('.dat', '.npy', 1)
                try:
                    print("""\
DAT File:\t{0}
DAT: time last modified:\t{2}
NPY File:\t{1}
NPY: time last modified:\t{3}""".format(pth, pthnpy, \
                                        time.ctime(path.getmtime(pth)), \
                                        time.ctime(path.getmtime(pthnpy))))
                    if path.getmtime(pth) <= path.getmtime(pthnpy):
                        print('DATFILE: is older than npy... Continue.\n')
                    else:
                        print('DATFILE: is newer than npy... Remaking.\n')
                        os.remove(pthnpy)
                except OSError:
                    pass
                if vals['dir'] == 'pull':
                    self.datcheck = True
                    self.path = pth
                    if path.exists(pthnpy):
                        self.dat = np.load(pthnpy)
                    else:
                        self.dat = np.loadtxt(pth)
                        np.save(pthnpy, self.dat)
コード例 #10
0
ファイル: diskfile.py プロジェクト: Dieterbe/swift
def get_hashes(partition_dir, recalculate=None, do_listdir=False,
               reclaim_age=ONE_WEEK):
    """
    Get a list of hashes for the suffix dir.  do_listdir causes it to mistrust
    the hash cache for suffix existence at the (unexpectedly high) cost of a
    listdir.  reclaim_age is just passed on to hash_suffix.

    :param partition_dir: absolute path of partition to get hashes for
    :param recalculate: list of suffixes which should be recalculated when got
    :param do_listdir: force existence check for all hashes in the partition
    :param reclaim_age: age at which to remove tombstones

    :returns: tuple of (number of suffix dirs hashed, dictionary of hashes)
    """

    hashed = 0
    hashes_file = join(partition_dir, HASH_FILE)
    modified = False
    force_rewrite = False
    hashes = {}
    mtime = -1

    if recalculate is None:
        recalculate = []

    try:
        with open(hashes_file, 'rb') as fp:
            hashes = pickle.load(fp)
        mtime = getmtime(hashes_file)
    except Exception:
        do_listdir = True
        force_rewrite = True
    if do_listdir:
        for suff in os.listdir(partition_dir):
            if len(suff) == 3:
                hashes.setdefault(suff, None)
        modified = True
    hashes.update((hash_, None) for hash_ in recalculate)
    for suffix, hash_ in hashes.items():
        if not hash_:
            suffix_dir = join(partition_dir, suffix)
            try:
                hashes[suffix] = hash_suffix(suffix_dir, reclaim_age)
                hashed += 1
            except PathNotDir:
                del hashes[suffix]
            except OSError:
                logging.exception(_('Error hashing suffix'))
            modified = True
    if modified:
        with lock_path(partition_dir):
            if force_rewrite or not exists(hashes_file) or \
                    getmtime(hashes_file) == mtime:
                write_pickle(
                    hashes, hashes_file, partition_dir, PICKLE_PROTOCOL)
                return hashed, hashes
        return get_hashes(partition_dir, recalculate, do_listdir,
                          reclaim_age)
    else:
        return hashed, hashes
コード例 #11
0
ファイル: pgn.py プロジェクト: bboutkov/pychess
 def init_chess_db(self):
     """ Create/open polyglot .bin file with extra win/loss/draw stats
         using chess_db parser from https://github.com/mcostalba/chess_db
     """
     if chess_db_path is not None and self.path and self.size > 0:
         try:
             if self.progressbar is not None:
                 self.progressbar.set_text("Creating .bin index file...")
             self.chess_db = Parser(engine=(chess_db_path, ))
             self.chess_db.open(self.path)
             bin_path = os.path.splitext(self.path)[0] + '.bin'
             if not os.path.isfile(bin_path):
                 log.debug("No valid games found in %s" % self.path)
                 self.chess_db = None
             elif getmtime(self.path) > getmtime(bin_path):
                 self.chess_db.make()
         except OSError as err:
             self.chess_db = None
             log.warning("Failed to sart chess_db parser. OSError %s %s" % (err.errno, err.strerror))
         except pexpect.TIMEOUT:
             self.chess_db = None
             log.warning("chess_db parser failed (pexpect.TIMEOUT)")
         except pexpect.EOF:
             self.chess_db = None
             log.warning("chess_db parser failed (pexpect.EOF)")
コード例 #12
0
def check_mm_files(HOMEDIR):
    '''Convert any Freemind mindmap whose HTML file is older than it.
    NOTE: If the syllabus.md hasn't been updated it won't reflect the changes'''
    
    INCLUDE_PATHS = ['syllabus', 'readings', 'concepts']

    files = locate('*.mm', HOMEDIR)
    for mm_fn in files:
        if any([included in mm_fn for included in INCLUDE_PATHS]):
            fn = splitext(mm_fn)[0]
            html_fn = fn + '.html'
            if exists(html_fn):
                if getmtime(mm_fn) > getmtime(html_fn):
                    info('updating_mm %s' %fn)
                    call(['xsltproc', '-o', html_fn, 
                        HOME+'/bin/mmtoxhtml.xsl', mm_fn])
                    call(['tidy', '-asxhtml', '-utf8', 
                          '-w', '0', '-m', html_fn])
                    p3 = Popen(['tail', '-n', '+2', html_fn], 
                        stdout=PIPE)
                    p4 = Popen(['tidy', '-asxhtml', '-utf8', '-w', '0', 
                                '-o', html_fn],
                         stdin=p3.stdout)
                    # if exists, update the syllabus.md that uses the MM's HTML
                    if 'readings' in mm_fn:
                        md_syllabus_fn = fn.replace('readings', 
                            'syllabus') + '.md'
                        if exists(md_syllabus_fn):
                            update_markdown(fn, md_syllabus_fn)
コード例 #13
0
ファイル: sync_disk.py プロジェクト: Concord82/Sync_Servers
def comp (source_patch, target_patch):
	from os import path, walk
	from filecmp import cmp
	# выходное сообщение о найденных отличиях
	message = ''
	path_f = []
	tree = walk(source_patch)
	for d, dirs, files in tree:
		for f in files:
			patch = path.join(d,f) # формирование адреса
			path_f.append(patch)      # добавление адреса в список
		# перибираем адреса файлов из списка
		for patch in path_f:
			# выполняем сравнение файлов и в случае отличий получаем информацию о файлах
			# проверяем существование файла
			if not path.exists(patch.replace(source_patch, target_patch)):
				message = message + 'Отсутствует целевой файл: '+ patch.replace(source_patch, target_patch)
			# сверяем размеры исходного и целевого файла
			elif path.getsize(patch.replace(source_patch, target_patch)) <> path.getsize(patch):
				message = message + file_info(patch, patch.replace(source_patch, target_patch))
			# дата последней модификации
			elif path.getmtime(patch.replace(source_patch, target_patch)) <> path.getmtime(patch):
				message = message + file_info(patch, patch.replace(source_patch, target_patch))
			# сравниваем файлы
			elif not cmp(patch.replace(source_patch, target_patch), patch):
				message = message + file_info(patch, patch.replace(source_patch, target_patch))
		return message
コード例 #14
0
ファイル: _thumbnail.py プロジェクト: miracle2k/feedplatform
    def wrapped(image, new_width, new_height, *args, **kwargs):
        save_to = kwargs.pop('save_to', None)
        force = kwargs.pop('force', False)

        # TODO: Instead of passing the image object to the save_to()
        # call, we could simply pass the source filename. This would
        # allow us to move this code further below so that we only
        # open the image file once the timestamp comparison determined
        # that we actually have to.
        if isinstance(image, basestring):
            source_filename = image
            image = Image.open(image)
        else:
            source_filename = None
            force = True  # no filename => detection disabled

        thumb_filename = None
        if save_to:
            thumb_filename = save_to(image, new_width, new_height) \
                if callable(save_to) \
                else save_to

        if save_to and not force:
            if path.exists(thumb_filename):
                if path.getmtime(source_filename) <= path.getmtime(thumb_filename):
                    return image

        result = f(image, new_width, new_height, *args, **kwargs)

        if result and save_to:
            result.save(thumb_filename, image.format)
        return result
コード例 #15
0
ファイル: BearTest.py プロジェクト: arush0311/coala
    def test_download_cached_file(self):
        section = Section('default')
        uut = Bear(section, {})

        mock_url = 'https://test.com'
        mock_text = """<html>
            <p> lorem ipsum dolor</p>
        </html>"""
        filename = 'test.html'
        file_location = join(uut.data_dir, filename)

        with requests_mock.Mocker() as reqmock:
            reqmock.get(mock_url, text=mock_text)
            self.assertFalse(isfile(file_location))
            expected_filename = file_location
            result_filename = uut.download_cached_file(mock_url, filename)
            self.assertTrue(isfile(join(file_location)))
            self.assertEqual(result_filename, expected_filename)
            expected_time = getmtime(file_location)
            sleep(0.5)

            result_filename = uut.download_cached_file(mock_url, filename)
            self.assertEqual(result_filename, expected_filename)
            result_time = getmtime(file_location)
            self.assertEqual(result_time, expected_time)
コード例 #16
0
ファイル: players.py プロジェクト: Ryozuki/ddnet-scripts
 def reloadData():
   global types, players, maps, totalPoints, pointsRanks, weeklyPointsRanks, monthlyPointsRanks, teamrankRanks, rankRanks, serverRanks, last
   now = datetime.now()
   if not last or last < getmtime(playersFile):
     with open(playersFile, 'rb') as inp:
       del types
       del players
       del maps
       del totalPoints
       del pointsRanks
       del weeklyPointsRanks
       del monthlyPointsRanks
       del teamrankRanks
       del rankRanks
       del serverRanks
       unpacker = msgpack.Unpacker(inp)
       types = unpacker.unpack()
       maps = unpacker.unpack()
       totalPoints = unpacker.unpack()
       pointsRanks = unpacker.unpack()
       weeklyPointsRanks = unpacker.unpack()
       monthlyPointsRanks = unpacker.unpack()
       teamrankRanks = unpacker.unpack()
       rankRanks = unpacker.unpack()
       serverRanks = unpacker.unpack()
       players = unpacker.unpack()
       last = getmtime(playersFile)
     gc.collect()
コード例 #17
0
ファイル: BearTest.py プロジェクト: Anmolbansal1/coala
    def test_download_cached_file(self):
        mock_url = 'https://test.com'
        mock_text = """<html>
            <p> lorem impsum dolor</p>
        </html>"""
        filename = self.filename
        file_location = self.file_location

        with freeze_time('2017-01-01') as frozen_datetime:
            with requests_mock.Mocker() as reqmock:

                reqmock.get(mock_url, text=mock_text)
                self.assertFalse(isfile(file_location))
                expected_filename = file_location
                result_filename = self.uut.download_cached_file(mock_url,
                                                                filename)
                self.assertTrue(isfile(join(file_location)))
                self.assertEqual(result_filename, expected_filename)
                expected_time = getmtime(file_location)

                frozen_datetime.tick(delta=datetime.timedelta(seconds=0.5))
                result_filename = self.uut.download_cached_file(mock_url,
                                                                filename)
                self.assertEqual(result_filename, expected_filename)
                result_time = getmtime(file_location)
                self.assertEqual(result_time, expected_time)
コード例 #18
0
ファイル: install.py プロジェクト: wuchen1106/gaudi
def update(src,dest,old_dest = None, syml = False, logdir = realpath(".")):
    realdest = normpath(join(logdir, dest))
    dest_path = split(realdest)[0]
    realsrc = normpath(join(dest_path,src))
    # The modification time is compared only with the precision of the second
    # to avoid a bug in Python 2.5 + Win32 (Fixed in Python 2.5.1).
    # See:
    #   http://bugs.python.org/issue1671965
    #   http://bugs.python.org/issue1565150
    if (not exists(realdest)) or (int(getmtime(realsrc)) > int(getmtime(realdest))):
        if not isdir(dest_path):
            print "Create dir '%s'"%(dest_path)
            makedirs(dest_path)
        # the destination file is missing or older than the source
        if syml and sys.platform != "win32" :
            if exists(realdest):
                remove(realdest,logdir)
            print "Create Link to '%s' in '%s'"%(src,dest_path)
            os.symlink(src,realdest)
        else:
            print "Copy '%s' -> '%s'"%(src, realdest)
            if exists(realdest):
                # If the destination path exists it is better to remove it before
                # doing the copy (shutil.copystat fails if the destination file
                # is not owned by the current user).
                os.remove(realdest)
            shutil.copy2(realsrc, realdest) # do the copy (cp -p src dest)
コード例 #19
0
ファイル: xterm256.py プロジェクト: Heldroe/fabulous
def compile_speedup():
    """Tries to compile/link the C version of this module

    Like it really makes a huge difference.  With a little bit of luck
    this should *just work* for you.

    You need:

    - Python >= 2.5 for ctypes library
    - gcc (``sudo apt-get install gcc``)

    """
    import os
    import ctypes
    from os.path import join, dirname, getmtime, exists, expanduser
    # library = join(dirname(__file__), '_xterm256.so')
    library = expanduser('~/.xterm256.so')
    sauce = join(dirname(__file__), '_xterm256.c')
    if not exists(library) or getmtime(sauce) > getmtime(library):
        build = "gcc -fPIC -shared -o %s %s" % (library, sauce)
        if (os.system(build + " >/dev/null 2>&1") != 0):
            raise OSError("GCC error")
    xterm256_c = ctypes.cdll.LoadLibrary(library)
    xterm256_c.init()
    def xterm_to_rgb(xcolor):
        res = xterm256_c.xterm_to_rgb_i(xcolor)
        return ((res >> 16) & 0xFF, (res >> 8) & 0xFF, res & 0xFF)
    return (xterm256_c.rgb_to_xterm, xterm_to_rgb)
コード例 #20
0
    def recreate_tox(self, environments):
        """Recreate tox-environments.

        :param environments: Which environments to recreate.

        :type environments: list

        :rtype: list

        """
        req_txt = self.get_filename_setup('requirements.txt')
        req_dev_txt = self.get_filename_setup('requirements-dev.txt')

        req_mtimes = list()
        env_mtimes = list()

        if path.exists(req_txt):
            req_mtimes.append(path.getmtime(req_txt))
        if path.exists(req_dev_txt):
            req_mtimes.append(path.getmtime(req_dev_txt))

        for environment in environments:
            env_path = self.get_filename_setup('.tox/' + environment)
            if path.exists(env_path):
                env_mtimes.append(path.getmtime(env_path))

        if len(env_mtimes) and max(req_mtimes) > min(env_mtimes):
            run('tox', '--recreate', '--notest')
コード例 #21
0
ファイル: node.py プロジェクト: Ed-von-Schleck/cbob
    def mark_dirty(self, dirty_source_nodes, dirty_header_nodes):
        try:
            object_mtime = getmtime(self.object_path)
        except OSError:
            object_mtime = 0

        # Shortcut if the source has no dependencies (rare, I presume)
        if not self.dependencies and self.mtime > object_mtime:
            dirty_source_nodes.append((self.path, self.object_path, None))
            return

        # Node has dependencies:
        try:
            gch_mtime = getmtime(self.gch_path)
        except OSError:
            gch_mtime = 0

        header_max_mtime = 0
        while self.dependencies and header_max_mtime <= object_mtime:
            node = self.dependencies.pop()
            header_max_mtime = max(header_max_mtime, node.get_max_mtime(object_mtime))

        all_max_mtime = max(header_max_mtime, self.mtime)
        if all_max_mtime > object_mtime:
            dirty_source_nodes.append((self.path, self.object_path, self.h_path))
            if header_max_mtime > gch_mtime:
                dirty_header_nodes.append((self.h_path, self.gch_path, None))
コード例 #22
0
ファイル: texmate.py プロジェクト: DOFfactory/latex.tmbundle
    def get_cached_data():
        """Get current data and update cache."""
        cache_read = False
        typesetting_data = {}

        try:
            with open(cache_filename, "rb") as storage:
                typesetting_data = load(storage)
                cache_read = True

            cache_data_outdated = getmtime(file_path) < getmtime(cache_filename) > getmtime(filepath)

            # Write new cache data if the current data does not contain
            # the necessary up to date information - This might be the case if
            # only `texparser` has written to the cache file
            if "engine" not in typesetting_data or cache_data_outdated:
                raise Exception()

        except:
            # Get data and save it in the cache
            packages = find_tex_packages(filename, ignore_warnings)
            engine = construct_engine_command(typesetting_directives, tm_engine, packages)
            synctex = not (bool(call("{} --help | grep -q synctex".format(engine), shell=True)))
            typesetting_data.update({"engine": engine, "packages": packages, "synctex": synctex})
            if not cache_read:
                typesetting_data["files_with_guttermarks"] = {filename}

        try:
            with open(cache_filename, "wb") as storage:
                dump(typesetting_data, storage)
        except:
            print('<p class="warning"> Could not write cache file!</p>')

        return typesetting_data
コード例 #23
0
ファイル: loader.py プロジェクト: Erwyn/pa-poc2
    def get_source(self, environment, template):
        """Try and get the template from the given pseudo-pathname.
        
        It uses, like the Jinja2 documentation example, the getmtime
        function to know whether a template file had been changed
        or not.
        
        """
        if "/" not in template:
            path = template.split(".")
            # The first part of the path must be a bundle name
            # The other parts are the hierarchy of directory after 'views'
            bundle = path[0]
            sub_hierarchy = "/".join(path[1:])
            path = "bundles/" + bundle + "/views/" + sub_hierarchy + ".jj2"
        else:
            path = template

        print(path)
        if not exists(path):
            raise TemplateNotFound(template)

        mtime = getmtime(path)
        with open(path, "r") as file:
            source = file.read()

        return source, path, lambda: mtime == getmtime(path)
コード例 #24
0
ファイル: previewcache.py プロジェクト: jtoledo1974/raw2jpeg
def get_preview(origpath, thumbnail=False, return_orientation=False):
    # TODO when the rest is working go back to using crcs as the filename
    p_type = 'thumbnails' if thumbnail else 'previews'
    preview = join(
        PREVIEWDIR, p_type, dirname(origpath)[1:], basename(origpath)+'.jpg')

    try:
        origmtime = getmtime(origpath)
        prevmtime = getmtime(preview)
        if prevmtime >= origmtime:
            if not return_orientation:
                return preview
            else:
                return (preview, orientations.get(preview))
    except:
        pass  # The preview is not yet built

    if blacklist.match(origpath, origmtime=origmtime):
        raise PreviewError

    try:
        (preview, orientation) = build_preview(origpath, preview, thumbnail)
    except:
        blacklist.add(origpath)
        raise PreviewError

    orientations.set(preview, orientation)

    if not return_orientation:
        return preview
    else:
        return (preview, orientation)
コード例 #25
0
ファイル: auxiliary.py プロジェクト: rosenbrockc/fortpy
def _should_recompile(auxdir, parser, modules, compiler):
    """Determines whether the fpy_auxiliary module should be rewritten and recompiled.
    """
    from os import path
    from shutil import copy
    
    recompile = False
    for modulename in modules:
        module = parser.modules[modulename]
        auxpath = path.join(auxdir, path.split(module.filepath)[1])
        if not path.isfile(auxpath):
            copy(module.filepath, auxdir)
            recompile = True
        else:
            fmtime = path.getmtime(module.filepath)
            xmtime = path.getmtime(auxpath)
            if xmtime < fmtime:
                recompile = True
                copy(module.filepath, auxdir)

    #Also check the version numbers of the template fpy_auxiliary.f90 and the one present
    #in our directory (if it exists).
    fpyaux = path.join(auxdir, "fpy_auxiliary.f90")
    if path.isfile(fpyaux):
        from fortpy.testing.compilers import template_version, get_fortpy_version
        tversion = template_version(compiler, "fpy_auxiliary.f90")
        xversion = get_fortpy_version(compiler, fpyaux)
        recompile = recompile or (xversion != tversion)
    else:
        recompile = True

    return recompile
コード例 #26
0
def is_actual_problem(problem_id):
    # This is a very paranoid function
    global _cached_problem_ids
    assert _id_rx.match(problem_id), problem_id
    
    def sanity_check(ids):
        assert len(ids) == 1420 + 200 + 200
        assert all(_id_rx.match(id) for id in ids)
        
    if _cached_problem_ids is None:
        if not os_path.exists(MYPROBLEMS_FILE):
            # regenerate problems file
            update_myproblems_file()
            
        if (not os_path.exists(CACHED_PROBLEM_IDS_FILE) or
            os_path.getmtime(CACHED_PROBLEM_IDS_FILE) < os_path.getmtime(MYPROBLEMS_FILE)):
            # regenerate CACHED_PROBLEM_IDS_FILE
            problems = load_problems()
            ids = frozenset(problem.id for problem in problems)
            sanity_check(ids)
            with open(CACHED_PROBLEM_IDS_FILE, 'wb') as f:
                pickle.dump(ids, f)
        else:
            with open(CACHED_PROBLEM_IDS_FILE, 'rb') as f:
                ids = pickle.load(f)
            sanity_check(ids)
        _cached_problem_ids = ids
        
    return problem_id in _cached_problem_ids
コード例 #27
0
def createDict(path, root={}):
    pathList = listdir(path)
    for i, item in enumerate(pathList):
        file_path = path_join(path, item)
        if item not in ignore_dir and exists(file_path):
            if isdir(file_path):
                if not root.get(item, False):
                    root[item] = {"type": "dir", "files": {}}
                createDict(file_path, root[item]["files"])
            else:
                if not root.get(item, False):
                    log("new file " + file_path)
                    root[item] = {"type": "file",
                                  "file_size": getsize(file_path),
                                  "mtime": getmtime(file_path), 
                                  "ctime": getctime(file_path),
                                  "md5": md5(file_path),
                                  "sha256": sha256(file_path)}
                else:
                    if root[item]["mtime"] != getmtime(file_path):
                        log("rehashing " + file_path)
                        root[item] = {"type": "file",
                                      "file_size": getsize(file_path),
                                      "mtime": getmtime(file_path), 
                                      "ctime": getctime(file_path),
                                      "md5": md5(file_path),
                                      "sha256": sha256(file_path)}
                        
                                    
    return root
コード例 #28
0
ファイル: test_functional.py プロジェクト: mrfuxi/testrunner
    def test_update_conf(self, default_config, show_notification):
        conf_time_1 = path.getmtime(self.tmp.conf.join("config.py"))
        out_file = self.tmp_output.join("out.log")
        command_args = [
            "-c", self.config_file,
            "-r", "bash -c 'echo a | tee -a {}'".format(out_file),
            "-d", unicode(self.tmp.src),
        ]
        events = [
            (self.tmp.conf, "config.py", "# some new data"),
            (self.tmp.conf, "config.py", "# some new data"),
        ]

        self._copy_default_config(default_config)
        default_config.RUNNER_DELAY = -1

        wm = WatchManager()
        config = Config(watch_manager=wm, command_args=command_args)
        handler = FileChangeHandler(config=config)
        notifier = Notifier(wm, handler, timeout=1000)

        notifier.loop(callback=partial(self._event_generator, events))

        # There are some stupid race conditions (possibly due to the callbacks)
        # Sleep time allows to execute all needed code
        sleep(0.2)

        conf_time_2 = path.getmtime(self.tmp.conf.join("config.py"))

        self.assertNotEqual(conf_time_1, conf_time_2)
        self.assertTrue(path.exists(out_file))
        self.assertEqual(show_notification.call_count, 2)
コード例 #29
0
ファイル: mako.py プロジェクト: maphew/acrylamid
        def resolve(uri):
            """Check whether any referenced template has changed -- recursively."""

            self.used.add(uri)

            if uri in self.resolved:
                return self.resolved[uri]

            filename = posixpath.normpath(posixpath.join(self.directories[0], uri))
            p = self.modulename_callable(filename, uri)
            modified = getmtime(filename) > getmtime(p) if isfile(p) else True

            if modified:
                self.resolved[uri] = True
                return True

            with io.open(filename) as fp:
                source = fp.read()

            for match in self.inherits.finditer(source):
                if resolve(match.group(1)):
                    return True

            for match in self.includes.finditer(source):
                if resolve(match.group(1)):
                    return True

            return False
コード例 #30
0
ファイル: statusfile.py プロジェクト: AKIo0O/node
def ReadStatusFile(path, variables):
  # As long as the old-format .status files are authoritative, just
  # create the converted version on demand and cache it to speed up
  # subsequent runs.
  if path.endswith(".status"):
    newpath = path + "2"
    if not exists(newpath) or getmtime(newpath) < getmtime(path):
      print "Converting status file."
      converted = old_statusfile.ConvertNotation(path).GetOutput()
      with open(newpath, 'w') as f:
        f.write(converted)
    path = newpath

  with open(path) as f:
    global KEYWORDS
    contents = eval(f.read(), KEYWORDS)

  rules = {}
  wildcards = {}
  variables.update(VARIABLES)
  for section in contents:
    assert type(section) == list
    assert len(section) == 2
    if not eval(section[0], variables): continue
    section = section[1]
    assert type(section) == dict
    for rule in section:
      assert type(rule) == str
      if rule[-1] == '*':
        _ParseOutcomeList(rule, section[rule], wildcards, variables)
      else:
        _ParseOutcomeList(rule, section[rule], rules, variables)
  return rules, wildcards
コード例 #31
0
def render_drawio(
    self: SphinxTranslator,
    node: DrawIONode,
    in_filename: str,
    default_output_format: str,
) -> str:
    """Render drawio file into an output image file."""

    page_index = str(node["config"].get("page-index", 0))
    output_format = node["config"].get("format") or default_output_format
    scale = str(node["config"].get("scale", self.config.drawio_default_scale))
    transparent = node["config"].get("transparency",
                                     self.config.drawio_default_transparency)
    no_sandbox = self.config.drawio_no_sandbox

    # Any directive options which would change the output file would go here
    unique_values = (
        # This ensures that the same file hash is generated no matter the build directory
        # Mainly useful for pytest, as it creates a new build directory every time
        node["filename"].replace(self.builder.srcdir, ""),
        page_index,
        scale,
        output_format,
        *[
            str(node["config"].get(option))
            for option in DrawIO.optional_uniques
        ],
    )
    hash_key = "\n".join(unique_values)
    sha_key = sha1(hash_key.encode()).hexdigest()
    filename = "drawio-{}.{}".format(sha_key, default_output_format)
    file_path = posixpath.join(self.builder.imgpath, filename)
    out_file_path = os.path.join(self.builder.outdir, self.builder.imagedir,
                                 filename)

    if os.path.isfile(
            out_file_path) and getmtime(in_filename) < getmtime(out_file_path):
        return file_path

    ensuredir(os.path.dirname(out_file_path))

    if self.builder.config.drawio_binary_path:
        binary_path = self.builder.config.drawio_binary_path
    elif platform.system() == "Windows":
        binary_path = r"C:\Program Files\draw.io\draw.io.exe"
    else:
        binary_path = "/opt/draw.io/drawio"

    scale_args = ["--scale", scale]
    if output_format == "pdf" and float(scale) == 1.0:
        # https://github.com/jgraph/drawio-desktop/issues/344 workaround
        scale_args.clear()

    extra_args = []
    for option in DrawIO.optional_uniques:
        if option in node["config"]:
            value = node["config"][option]
            extra_args.append("--{}".format(option))
            extra_args.append(str(value))

    if transparent:
        extra_args.append("--transparent")

    drawio_args = [
        binary_path,
        "--export",
        "--crop",
        "--page-index",
        page_index,
        *scale_args,
        *extra_args,
        "--format",
        output_format,
        "--output",
        out_file_path,
        in_filename,
    ]

    if no_sandbox:
        # This may be needed for docker support, and it has to be the last argument to work.
        drawio_args.append("--no-sandbox")

    doc_name = node.get("doc_name", "index")
    cwd = os.path.dirname(os.path.join(self.builder.srcdir, doc_name))

    new_env = os.environ.copy()
    if self.config._display:
        new_env["DISPLAY"] = ":{}".format(self.config._display)

    try:
        ret = subprocess.run(
            drawio_args,
            stderr=subprocess.PIPE,
            stdout=subprocess.PIPE,
            cwd=cwd,
            check=True,
            env=new_env,
        )
        if not os.path.isfile(out_file_path):
            raise DrawIOError("draw.io did not produce an output file:"
                              "\n[stderr]\n{}\n[stdout]\n{}".format(
                                  ret.stderr, ret.stdout))
        return file_path
    except OSError as exc:
        raise DrawIOError("draw.io ({}) exited with error:\n{}".format(
            " ".join(drawio_args), exc))
    except subprocess.CalledProcessError as exc:
        raise DrawIOError("draw.io ({}) exited with error:\n[stderr]\n{}"
                          "\n[stdout]\n{}".format(" ".join(drawio_args),
                                                  exc.stderr, exc.stdout))
コード例 #32
0
ファイル: Queries.py プロジェクト: PlopFriction/gonewilder
    def get_zip(user, include_videos=False, album=None):
        from os import path, mkdir, walk, remove, sep as ossep
        from zipfile import ZipFile, ZIP_STORED
        db = DB()

        # Verify the user exists
        if not path.exists(path.join('content', user)):
            return {'error': 'user dir "%s" not found' % user}
        source = path.join('content', user)
        if album != None:
            if not path.exists(path.join(source, album)):
                return {'error': 'album dir "%s" not found' % album}
            source = path.join(source, album)
        if db.count('users', 'username like ?', [user]) == 0:
            return {'error': 'user "%s" not in db' % user}
        if not path.exists('zips'): mkdir('zips')

        zip_path = path.join('zips', user)
        if album != None: zip_path = '%s-%s' % (zip_path, album)
        if not include_videos:
            zip_path = '%s-novids' % zip_path
        zip_path = '%s.zip' % zip_path

        # Check for existing zip
        if path.exists(zip_path):
            zip_time = path.getmtime(zip_path)
            source_time = db.select_one(
                'max(created)', 'posts',
                'userid in (select id from users where username = ?)', [user])
            if album == None:
                q = 'user = ? and album is null'
                v = [user]
            else:
                q = 'user = ? and album = ?'
                v = [user, album]
            if zip_time > source_time and db.count('zips', q, v) > 0:
                # Zip is fresher than source album, don't need to re-zip
                (images, videos, audios) = db.select('images, videos, audios',
                                                     'zips', q, v)[0]
                return {
                    'zip': zip_path,
                    'size': path.getsize(zip_path),
                    'images': images,
                    'videos': videos,
                    'audios': audios
                }
            else:
                remove(zip_path)  # Delete the stale zip

        # Create new zip
        zipped_file_ids = []
        images = videos = audios = 0
        z = ZipFile(zip_path, "w", ZIP_STORED)
        for root, dirs, files in walk(source):
            if root.endswith('/thumbs'): continue
            for fn in files:
                if not '.' in fn: continue  # We need a file extension
                # Check for duplicates
                file_id = fn[fn.rfind('-') + 1:]
                if file_id in zipped_file_ids: continue
                zipped_file_ids.append(file_id)
                # Count images/videos/audios
                ext = fn[fn.rfind('.') + 1:].lower()
                if ext in ['mp4', 'flv', 'wmv']:
                    if not include_videos: continue
                    videos += 1
                elif ext in ['jpg', 'jpeg', 'png', 'gif']:
                    images += 1
                elif ext in ['wma', 'm4v', 'mp3', 'wav']:
                    audios += 1
                absfn = path.join(root, fn)  # content/user/
                source_minus_one = source[:source.rfind(ossep)]
                zipfn = absfn[len(source_minus_one):]
                z.write(absfn, zipfn)
        z.close()

        if images == 0 and videos == 0 and audios == 0:
            remove(zip_path)
            return {
                'error': 'no images, videos, or audio files could be zipped'
            }

        zip_size = path.getsize(zip_path)
        # Update DB
        db.delete('zips', 'zippath = ?', [zip_path])
        db.insert('zips',
                  (zip_path, user, album, images, videos, audios, zip_size))
        db.commit()
        return {
            'zip': zip_path,
            'size': zip_size,
            'images': images,
            'videos': videos,
            'audios': audios
        }
コード例 #33
0
        if onwindows: artifact = artifact.replace("/", "\\")

        srcdeps = dependencies(source, incpaths)

        if showDependencies:
            print ""
            for dep in srcdeps:
                print "    needs " + dep
            stdout.write("    ")
            stdout.flush()

        doCompile = recompile or not path.exists(artifact)
        if not doCompile:
            for dep in srcdeps:
                if path.exists(dep):
                    if str(path.getmtime(artifact)) < str(path.getmtime(dep)):
                        doCompile = True
                        break
                else:
                    print "ERROR: dependency " + dep + " could not be found!"
                    exit(1)

        if doCompile:
            stdout.write("compiling > ")
            stdout.flush()

            if path.exists(artifact):
                os.remove(artifact)

            tus = ""
            for dep in srcdeps:
コード例 #34
0
ファイル: ListCases.py プロジェクト: kaiserpeng/PyFoam
    def run(self):
        dirs = self.parser.getArgs()

        if len(dirs) == 0:
            dirs = [path.curdir]

        cData = []
        totalDiskusage = 0
        useSolverInData = False

        self.hasState = False

        customData = []
        for i, c in enumerate(self.opts.customData):
            lst = c.split("=")
            if len(lst) == 2:
                name, spec = lst
                name += "_"  # Make sure that there is no collision with standard-names
            elif len(lst) == 1:
                name, spec = "Custom%d" % (i + 1), c
            else:
                self.error("Custom specification", c,
                           "does not fit the pattern 'name=subs1::subs2::..'")
            customData.append((name, spec.split("::")))

        if len(customData) > 0 and not self.opts.solverNameForCustom:
            self.warning(
                "Parameter '--solver-name-for-custom-data' should be set if '--custom-data' is used"
            )
            useSolverInData = True

        for d in dirs:
            for n in listdir(d):
                cName = path.join(d, n)
                if path.isdir(cName):
                    try:
                        sol = SolutionDirectory(cName,
                                                archive=None,
                                                paraviewLink=False)
                        if sol.isValid():
                            if self.opts.progress:
                                print_("Processing", cName)

                            data = {}

                            data["mtime"] = stat(cName)[ST_MTIME]
                            times = sol.getTimes()
                            try:
                                data["first"] = times[0]
                            except IndexError:
                                data["first"] = "None"
                            try:
                                data["last"] = times[-1]
                            except IndexError:
                                data["last"] = "None"
                            data["nrSteps"] = len(times)
                            data["procs"] = sol.nrProcs()
                            data["pFirst"] = -1
                            data["pLast"] = -1
                            data["nrParallel"] = -1
                            if self.opts.parallel:
                                pTimes = sol.getParallelTimes()
                                data["nrParallel"] = len(pTimes)
                                if len(pTimes) > 0:
                                    data["pFirst"] = pTimes[0]
                                    data["pLast"] = pTimes[-1]
                            data["name"] = cName
                            data["diskusage"] = -1
                            if self.opts.diskusage:
                                data["diskusage"] = diskUsage(cName)
                                totalDiskusage += data["diskusage"]
                            if self.opts.parallel:
                                for f in listdir(cName):
                                    if re.compile("processor[0-9]+").match(f):
                                        data["mtime"] = max(
                                            stat(path.join(cName,
                                                           f))[ST_MTIME],
                                            data["mtime"])

                            if self.opts.state:
                                try:
                                    data["nowTime"] = float(
                                        self.readState(sol, "CurrentTime"))
                                except ValueError:
                                    data["nowTime"] = None

                                try:
                                    data["lastOutput"] = time.mktime(
                                        time.strptime(
                                            self.readState(
                                                sol, "LastOutputSeen")))
                                except ValueError:
                                    data["lastOutput"] = "nix"

                                data["state"] = self.readState(sol, "TheState")

                            if self.opts.state or self.opts.estimateEndTime:
                                try:
                                    data["startedAt"] = time.mktime(
                                        time.strptime(
                                            self.readState(sol, "StartedAt")))
                                except ValueError:
                                    data["startedAt"] = "nix"

                            if self.opts.startEndTime or self.opts.estimateEndTime:
                                try:
                                    ctrlDict = ParsedParameterFile(
                                        sol.controlDict(),
                                        doMacroExpansion=True)
                                except PyFoamParserError:
                                    # Didn't work with Macro expansion. Let's try without
                                    try:
                                        ctrlDict = ParsedParameterFile(
                                            sol.controlDict())
                                    except PyFoamParserError:
                                        ctrlDict = None
                                if ctrlDict:
                                    data["startTime"] = ctrlDict["startTime"]
                                    data["endTime"] = ctrlDict["endTime"]
                                else:
                                    data["startTime"] = None
                                    data["endTime"] = None

                            if self.opts.estimateEndTime:
                                data["endTimeEstimate"] = None
                                if self.readState(sol,
                                                  "TheState") == "Running":
                                    gone = time.time() - data["startedAt"]
                                    try:
                                        current = float(
                                            self.readState(sol, "CurrentTime"))
                                        frac = (current - data["startTime"]
                                                ) / (data["endTime"] -
                                                     data["startTime"])
                                    except ValueError:
                                        frac = 0
                                    if frac > 0:
                                        data["endTimeEstimate"] = data[
                                            "startedAt"] + gone / frac

                            if len(customData) > 0:
                                fn = None
                                pickleFile = None
                                if useSolverInData:
                                    data["solver"] = "none found"
                                    # try to find the oldest pickled file
                                    for f in [
                                            "pickledData",
                                            "pickledUnfinishedData",
                                            "pickledStartData"
                                    ]:
                                        dirAndTime = []
                                        for g in glob(
                                                path.join(cName,
                                                          "*.analyzed")):
                                            pName = path.join(g, f)
                                            base = path.basename(g)
                                            if base.find("PyFoamRunner.") == 0:
                                                solverName = base[
                                                    len("PyFoamRunner."
                                                        ):-len(".analyzed")]
                                            else:
                                                solverName = None
                                            if path.exists(pName):
                                                dirAndTime.append(
                                                    (path.getmtime(pName),
                                                     solverName, pName))
                                        dirAndTime.sort(key=lambda x: x[0])
                                        if len(dirAndTime) > 0:
                                            data["solver"] = dirAndTime[-1][1]
                                            pickleFile = dirAndTime[-1][2]
                                            break

                                    solverName = data["solver"]
                                else:
                                    solverName = self.opts.solverNameForCustom

                                if pickleFile:
                                    fn = pickleFile
                                else:
                                    for f in [
                                            "pickledData",
                                            "pickledUnfinishedData",
                                            "pickledStartData"
                                    ]:
                                        fp = path.join(
                                            cName, "PyFoamRunner." +
                                            solverName + ".analyzed", f)
                                        if path.exists(fp):
                                            fn = fp
                                            break
                                if fn:
                                    raw = pickle.Unpickler(open(fn)).load()
                                    for n, spec in customData:
                                        dt = raw
                                        for k in spec:
                                            try:
                                                dt = dt[k]
                                            except KeyError:
                                                dt = "No key '" + k + "'"
                                                break
                                            if isinstance(dt, string_types):
                                                break
                                        data[n] = dt
                                else:
                                    for n, spec in customData:
                                        data[n] = "no file"

                            cData.append(data)
                    except OSError:
                        print_(cName, "is unreadable")

        if self.opts.progress:
            print_("Sorting data")

        cData.sort(key=lambda x: x[self.opts.sort], reverse=self.opts.reverse)

        if len(cData) == 0:
            print_("No cases found")
            return

        if self.opts.dump:
            print_(cData)
            return

        lens = {}
        for k in list(cData[0].keys()):
            lens[k] = len(k)
        for c in cData:
            for k in ["mtime", "lastOutput", "startedAt", "endTimeEstimate"]:
                try:
                    if c[k] != None:
                        if self.opts.relativeTime:
                            c[k] = datetime.timedelta(
                                seconds=long(time.time() - c[k]))
                        else:
                            c[k] = time.asctime(time.localtime(c[k]))
                except KeyError:
                    pass
                except TypeError:
                    c[k] = None

            try:
                c["diskusage"] = humanReadableSize(c["diskusage"])
            except KeyError:
                pass

            for k, v in iteritems(c):
                lens[k] = max(lens[k], len(str(v)))

        format = ""
        spec = ["mtime", " | ", "first", " - ", "last", " (", "nrSteps", ") "]
        if self.opts.parallel:
            spec += [
                "| ", "procs", " : ", "pFirst", " - ", "pLast", " (",
                "nrParallel", ") | "
            ]
        if self.opts.diskusage:
            spec += ["diskusage", " | "]
        if self.hasState:
            spec += ["nowTime", " s ", "state", " | "]
            if self.opts.advancedState:
                spec += ["lastOutput", " | ", "startedAt", " | "]
        if self.opts.estimateEndTime:
            if not self.opts.advancedState:
                spec += ["startedAt", " | "]
            spec += ["endTimeEstimate", " | "]
        if self.opts.startEndTime:
            spec += ["startTime", " | ", "endTime", " | "]

        if useSolverInData:
            spec += ["solver", " | "]
        for n, s in customData:
            spec += [n, " | "]

        spec += ["name"]

        for i, l in enumerate(spec):
            if not l in list(cData[0].keys()):
                format += l
            else:
                if i < len(spec) - 1:
                    format += "%%(%s)%ds" % (l, lens[l])
                else:
                    format += "%%(%s)s" % (l)

        if self.opts.progress:
            print_("Printing\n\n")

        header = format % dict(
            list(zip(list(cData[0].keys()), list(cData[0].keys()))))
        print_(header)
        print_("-" * len(header))

        for d in cData:
            for k in list(d.keys()):
                d[k] = str(d[k])
            print_(format % d)

        if self.opts.diskusage:
            print_("Total disk-usage:", humanReadableSize(totalDiskusage))
コード例 #35
0
ファイル: __init__.py プロジェクト: akhildp/newDoc
    def read_doc(self, docname, app=None):
        """Parse a file and add/update inventory entries for the doctree."""

        self.temp_data['docname'] = docname
        # defaults to the global default, but can be re-set in a document
        self.temp_data['default_domain'] = \
            self.domains.get(self.config.primary_domain)

        self.settings['input_encoding'] = self.config.source_encoding
        self.settings['trim_footnote_reference_space'] = \
            self.config.trim_footnote_reference_space
        self.settings['gettext_compact'] = self.config.gettext_compact

        docutilsconf = path.join(self.srcdir, 'docutils.conf')
        # read docutils.conf from source dir, not from current dir
        OptionParser.standard_config_files[1] = docutilsconf
        if path.isfile(docutilsconf):
            self.note_dependency(docutilsconf)

        with sphinx_domains(self):
            if self.config.default_role:
                role_fn, messages = roles.role(self.config.default_role, english,
                                               0, dummy_reporter)
                if role_fn:
                    roles._roles[''] = role_fn
                else:
                    self.warn(docname, 'default role %s not found' %
                              self.config.default_role)

            codecs.register_error('sphinx', self.warn_and_replace)

            # publish manually
            reader = SphinxStandaloneReader(self.app, parsers=self.config.source_parsers)
            pub = Publisher(reader=reader,
                            writer=SphinxDummyWriter(),
                            destination_class=NullOutput)
            pub.set_components(None, 'restructuredtext', None)
            pub.process_programmatic_settings(None, self.settings, None)
            src_path = self.doc2path(docname)
            source = SphinxFileInput(app, self, source=None, source_path=src_path,
                                     encoding=self.config.source_encoding)
            pub.source = source
            pub.settings._source = src_path
            pub.set_destination(None, None)
            pub.publish()
            doctree = pub.document

        # post-processing
        self.process_dependencies(docname, doctree)
        self.process_images(docname, doctree)
        self.process_downloads(docname, doctree)
        self.process_metadata(docname, doctree)
        self.create_title_from(docname, doctree)
        for manager in itervalues(self.managers):
            manager.process_doc(docname, doctree)
        for domain in itervalues(self.domains):
            domain.process_doc(self, docname, doctree)

        # allow extension-specific post-processing
        if app:
            app.emit('doctree-read', doctree)

        # store time of reading, for outdated files detection
        # (Some filesystems have coarse timestamp resolution;
        # therefore time.time() can be older than filesystem's timestamp.
        # For example, FAT32 has 2sec timestamp resolution.)
        self.all_docs[docname] = max(
            time.time(), path.getmtime(self.doc2path(docname)))

        if self.versioning_condition:
            old_doctree = None
            if self.versioning_compare:
                # get old doctree
                try:
                    with open(self.doc2path(docname,
                                            self.doctreedir, '.doctree'), 'rb') as f:
                        old_doctree = pickle.load(f)
                except EnvironmentError:
                    pass

            # add uids for versioning
            if not self.versioning_compare or old_doctree is None:
                list(add_uids(doctree, self.versioning_condition))
            else:
                list(merge_doctrees(
                    old_doctree, doctree, self.versioning_condition))

        # make it picklable
        doctree.reporter = None
        doctree.transformer = None
        doctree.settings.warning_stream = None
        doctree.settings.env = None
        doctree.settings.record_dependencies = None

        # cleanup
        self.temp_data.clear()
        self.ref_context.clear()
        roles._roles.pop('', None)  # if a document has set a local default role

        # save the parsed doctree
        doctree_filename = self.doc2path(docname, self.doctreedir,
                                         '.doctree')
        ensuredir(path.dirname(doctree_filename))
        with open(doctree_filename, 'wb') as f:
            pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
コード例 #36
0
def setupDom(setup=None, plugin=None):
    # Constants for checkItems()
    ROOT_ALLOWED = ("setup", )  # Tags allowed in top level of setupxml entry.
    ELEMENT_ALLOWED = ("item", "if"
                       )  # Tags allowed in top level of setup entry.
    IF_ALLOWED = ("item", "if", "elif", "else")  # Tags allowed inside <if />.
    AFTER_ELSE_ALLOWED = ("item", "if"
                          )  # Tags allowed after <elif /> or <else />.
    CHILDREN_ALLOWED = (
        "setup",
        "if",
    )  # Tags that may have children.
    TEXT_ALLOWED = ("item",
                    )  # Tags that may have non-whitespace text (or tail).
    KEY_ATTRIBUTES = {  # Tags that have a reference key mandatory attribute.
        "setup": "key",
        "item": "text"
    }
    MANDATORY_ATTRIBUTES = {  # Tags that have a list of mandatory attributes.
        "setup": ("key", "title"),
        "item": ("text", )
    }

    def checkItems(parentNode,
                   key,
                   allowed=ROOT_ALLOWED,
                   mandatory=MANDATORY_ATTRIBUTES,
                   reference=KEY_ATTRIBUTES):
        keyText = " in '%s'" % key if key else ""
        for element in parentNode:
            if element.tag not in allowed:
                print(
                    "[Setup] Error: Tag '%s' not permitted%s!  (Permitted: '%s')"
                    % (element.tag, keyText, ", ".join(allowed)))
                continue
            if mandatory and element.tag in mandatory:
                valid = True
                for attrib in mandatory[element.tag]:
                    if element.get(attrib) is None:
                        print(
                            "[Setup] Error: Tag '%s'%s does not contain the mandatory '%s' attribute!"
                            % (element.tag, keyText, attrib))
                        valid = False
                if not valid:
                    continue
            if element.tag not in TEXT_ALLOWED:
                if element.text and not element.text.isspace():
                    print("[Setup] Tag '%s'%s contains text '%s'." %
                          (element.tag, keyText, element.text.strip()))
                if element.tail and not element.tail.isspace():
                    print("[Setup] Tag '%s'%s has trailing text '%s'." %
                          (element.tag, keyText, element.text.strip()))
            if element.tag not in CHILDREN_ALLOWED and len(element):
                itemKey = ""
                if element.tag in reference:
                    itemKey = " (%s)" % element.get(reference[element.tag])
                print(
                    "[Setup] Tag '%s'%s%s contains children where none expected."
                    % (element.tag, itemKey, keyText))
            if element.tag in CHILDREN_ALLOWED:
                if element.tag in reference:
                    key = element.get(reference[element.tag])
                checkItems(element, key, allowed=IF_ALLOWED)
            elif element.tag == "else":
                allowed = AFTER_ELSE_ALLOWED  # Another else and elif not permitted after else.
            elif element.tag == "elif":
                pass

    setupFileDom = fromstring("<setupxml></setupxml>")
    setupFile = resolveFilename(SCOPE_PLUGINS, pathJoin(
        plugin, "setup.xml")) if plugin else resolveFilename(
            SCOPE_SKIN, "setup.xml")
    global domSetups, setupModTimes
    try:
        modTime = getmtime(setupFile)
    except (IOError, OSError) as err:
        print(
            "[Setup] Error: Unable to get '%s' modified time - Error (%d): %s!"
            % (setupFile, err.errno, err.strerror))
        if setupFile in domSetups:
            del domSetups[setupFile]
        if setupFile in setupModTimes:
            del setupModTimes[setupFile]
        return setupFileDom
    cached = setupFile in domSetups and setupFile in setupModTimes and setupModTimes[
        setupFile] == modTime
    print("[Setup] XML%s setup file '%s', using element '%s'%s." %
          (" cached" if cached else "", setupFile, setup,
           " from plugin '%s'" % plugin if plugin else ""))
    if cached:
        return domSetups[setupFile]
    try:
        if setupFile in domSetups:
            del domSetups[setupFile]
        if setupFile in setupModTimes:
            del setupModTimes[setupFile]
        with open(
                setupFile, "r"
        ) as fd:  # This open gets around a possible file handle leak in Python's XML parser.
            try:
                fileDom = parse(fd).getroot()
                checkItems(fileDom, None)
                setupFileDom = fileDom
                domSetups[setupFile] = setupFileDom
                setupModTimes[setupFile] = modTime
                for setup in setupFileDom.findall("setup"):
                    key = setup.get("key")
                    if key:  # If there is no key then this element is useless and can be skipped!
                        title = setup.get("title", "").encode(
                            "UTF-8", errors="ignore") if PY2 else setup.get(
                                "title", "")
                        if title == "":
                            print(
                                "[Setup] Error: Setup key '%s' title is missing or blank!"
                                % key)
                            title = "** Setup error: '%s' title is missing or blank!" % key
                        # print("[Setup] DEBUG: XML setup load: key='%s', title='%s'." % (key, setup.get("title", "").encode("UTF-8", errors="ignore")))
            except ParseError as err:
                fd.seek(0)
                content = fd.readlines()
                line, column = err.position
                print("[Setup] XML Parse Error: '%s' in '%s'!" %
                      (err, setupFile))
                data = content[line - 1].replace("\t", " ").rstrip()
                print("[Setup] XML Parse Error: '%s'" % data)
                print("[Setup] XML Parse Error: '%s^%s'" %
                      ("-" * column, " " * (len(data) - column - 1)))
            except Exception as err:
                print(
                    "[Setup] Error: Unable to parse setup data in '%s' - '%s'!"
                    % (setupFile, err))
    except (IOError, OSError) as err:
        if err.errno == errno.ENOENT:  # No such file or directory.
            print("[Setup] Warning: Setup file '%s' does not exist!" %
                  setupFile)
        else:
            print("[Setup] Error %d: Opening setup file '%s'! (%s)" %
                  (err.errno, setupFile, err.strerror))
    except Exception as err:
        print(
            "[Setup] Error %d: Unexpected error opening setup file '%s'! (%s)"
            % (err.errno, setupFile, err.strerror))
    return setupFileDom
コード例 #37
0
    def _reload(self, force=False):
        self._rtime = time() + self.reload_time
        if force or self.has_changed():
            ring_data = RingData.load(self.serialized_path)

            try:
                self._validation_hook(ring_data)
            except RingLoadError:
                if force:
                    raise
                else:
                    # In runtime reload at working server, it's ok to use old
                    # ring data if the new ring data is invalid.
                    return

            self._mtime = getmtime(self.serialized_path)
            self._devs = ring_data.devs
            # NOTE(akscram): Replication parameters like replication_ip
            #                and replication_port are required for
            #                replication process. An old replication
            #                ring doesn't contain this parameters into
            #                device. Old-style pickled rings won't have
            #                region information.
            for dev in self._devs:
                if dev:
                    dev.setdefault('region', 1)
                    if 'ip' in dev:
                        dev.setdefault('replication_ip', dev['ip'])
                    if 'port' in dev:
                        dev.setdefault('replication_port', dev['port'])

            self._replica2part2dev_id = ring_data._replica2part2dev_id
            self._part_shift = ring_data._part_shift
            self._rebuild_tier_data()

            # Do this now, when we know the data has changed, rather than
            # doing it on every call to get_more_nodes().
            #
            # Since this is to speed up the finding of handoffs, we only
            # consider devices with at least one partition assigned. This
            # way, a region, zone, or server with no partitions assigned
            # does not count toward our totals, thereby keeping the early
            # bailouts in get_more_nodes() working.
            dev_ids_with_parts = set()
            for part2dev_id in self._replica2part2dev_id:
                for dev_id in part2dev_id:
                    dev_ids_with_parts.add(dev_id)

            regions = set()
            zones = set()
            ips = set()
            self._num_devs = 0
            for dev in self._devs:
                if dev and dev['id'] in dev_ids_with_parts:
                    regions.add(dev['region'])
                    zones.add((dev['region'], dev['zone']))
                    ips.add((dev['region'], dev['zone'], dev['ip']))
                    self._num_devs += 1
            self._num_regions = len(regions)
            self._num_zones = len(zones)
            self._num_ips = len(ips)
コード例 #38
0
 def get_db_mtime(self):
     # get database file last modification time
     try:
         return path.getmtime(settings['database'])
     except:
         return 0
コード例 #39
0
ファイル: ctx.py プロジェクト: WebCampZg/conference-web
def css_last_modified(request):
    css_last_modified = getmtime(
        abspath(join(dirname(__file__), 'dist/styles/style.css')))
    return {'css_last_modified': css_last_modified}
コード例 #40
0
ファイル: __init__.py プロジェクト: prihoda/pyfaidx
    def __init__(self,
                 filename,
                 default_seq=None,
                 key_function=lambda x: x,
                 as_raw=False,
                 strict_bounds=False,
                 read_ahead=None,
                 mutable=False,
                 split_char=None,
                 duplicate_action="stop",
                 filt_function=lambda x: True,
                 one_based_attributes=True,
                 read_long_names=False,
                 sequence_always_upper=False,
                 rebuild=True,
                 build_index=True):
        """
        filename: name of fasta file
        key_function: optional callback function which should return a unique
          key for the self.index dictionary when given rname.
        as_raw: optional parameter to specify whether to return sequences as a
          Sequence() object or as a raw string.
          Default: False (i.e. return a Sequence() object).
        """
        self.filename = filename

        if filename.lower().endswith('.bgz') or filename.lower().endswith(
                '.gz'):
            # Only try to import Bio if we actually need the bgzf reader.
            try:
                from Bio import bgzf
                from Bio import __version__ as bgzf_version
                from distutils.version import LooseVersion
                if LooseVersion(bgzf_version) < LooseVersion('1.73'):
                    raise ImportError
            except ImportError:
                raise ImportError(
                    "BioPython >= 1.73 must be installed to read block gzip files.")
            else:
                self._fasta_opener = bgzf.open
                self._bgzf = True
        elif filename.lower().endswith('.bz2') or filename.lower().endswith(
                '.zip'):
            raise UnsupportedCompressionFormat(
                "Compressed FASTA is only supported in BGZF format. Use "
                "bgzip to compresss your FASTA.")
        else:
            self._fasta_opener = open
            self._bgzf = False

        try:
            self.file = self._fasta_opener(filename, 'r+b'
                                           if mutable else 'rb')
        except (ValueError, IOError) as e:
            if str(e).find('BGZF') > -1:
                raise UnsupportedCompressionFormat(
                    "Compressed FASTA is only supported in BGZF format. Use "
                    "the samtools bgzip utility (instead of gzip) to "
                    "compress your FASTA.")
            else:
                raise FastaNotFoundError(
                    "Cannot read FASTA file %s" % filename)

        self.indexname = filename + '.fai'
        self.read_long_names = read_long_names
        self.key_function = key_function
        try:
            key_fn_test = self.key_function(
                "TestingReturnType of_key_function")
            if not isinstance(key_fn_test, string_types):
                raise KeyFunctionError(
                    "key_function argument should return a string, not {0}".
                    format(type(key_fn_test)))
        except Exception as e:
            pass
        self.filt_function = filt_function
        assert duplicate_action in ("stop", "first", "last", "longest",
                                    "shortest", "drop")
        self.duplicate_action = duplicate_action
        self.as_raw = as_raw
        self.default_seq = default_seq
        if self._bgzf and self.default_seq is not None:
            raise FetchError(
                "The default_seq argument is not supported with using BGZF compression. Please decompress your FASTA file and try again."
            )
        if self._bgzf:
            self.strict_bounds = True
        else:
            self.strict_bounds = strict_bounds
        self.split_char = split_char
        self.one_based_attributes = one_based_attributes
        self.sequence_always_upper = sequence_always_upper
        self.index = OrderedDict()
        self.lock = Lock()
        self.buffer = dict((('seq', None), ('name', None), ('start', None),
                            ('end', None)))
        if not read_ahead or isinstance(read_ahead, integer_types):
            self.read_ahead = read_ahead
        elif not isinstance(read_ahead, integer_types):
            raise ValueError("read_ahead value must be int, not {0}".format(
                type(read_ahead)))

        self.mutable = mutable
        with self.lock:  # lock around index generation so only one thread calls method
            try:
                if os.path.exists(self.indexname) and getmtime(
                        self.indexname) >= getmtime(self.filename):
                    self.read_fai()
                elif os.path.exists(self.indexname) and getmtime(
                        self.indexname) < getmtime(
                            self.filename) and not rebuild:
                    self.read_fai()
                    warnings.warn(
                        "Index file {0} is older than FASTA file {1}.".format(
                            self.indexname, self.filename), RuntimeWarning)
                elif build_index:
                    self.build_index()
                    self.read_fai()
                else:
                    self.read_fai()

            except FastaIndexingError:
                os.remove(self.indexname)
                self.file.close()
                raise
            except Exception:
                # Handle potential exceptions other than 'FastaIndexingError'
                self.file.close()
                raise
コード例 #41
0
ファイル: main.py プロジェクト: zreos/wx_code
若修改配置的过程中,报错,则会使用上一次的配置信息(非首次,如果首次就报错,就会退出程序)
mongodb有验证的连接,如果你的没有用户名和密码,请按需修改
"""




# influx配置
client = InfluxDBClient(host='localhost', port=8086)
# 创建数据库
client.create_database('Spider')
client.switch_database('Spider')
#配置文件名
config_name = 'settings.conf'
WATCHED_FILES = [config_name]
WATCHED_FILES_MTIMES = [(f, getmtime(f)) for f in WATCHED_FILES]
_count_dict = {}
_size_dict = {}


def parse_config(file_name):
    try:
        cf = ConfigParser()
        cf.read(file_name)
        host = cf.get('Mongo_Uri', 'host')
        user = cf.get('Mongo_Uri', 'user')
        passwd = cf.get('Mongo_Uri', 'passwd')
        uri = 'mongodb://%s:%s@%s' % (user, passwd, host)
        interval = cf.getint('time', 'interval')
        dbs_and_cos = ast.literal_eval(cf.get('db', 'db_co_dict'))
    except:
コード例 #42
0
ファイル: lfs.py プロジェクト: mmather02/itools
 def get_mtime(self, path):
     path = self._resolve_path(path)
     mtime = getmtime(path)
     return datetime.fromtimestamp(mtime)
コード例 #43
0
def _get_modified_time(path):
    try:
        mtime = getmtime(path)
    except OSError:
        mtime = 0
    return datetime.datetime.fromtimestamp(mtime)
コード例 #44
0
def file_info(path):
    return {
        'size': getsize(path),
        'md5': md5_file(path),
        'mtime': getmtime(path)
    }
コード例 #45
0
    def clean_cargo_cache(self, force=False, show_size=False, keep=None, custom_path=False):
        def get_size(path):
            if os.path.isfile(path):
                return os.path.getsize(path) / (1024 * 1024.0)
            total_size = 0
            for dirpath, dirnames, filenames in os.walk(path):
                for f in filenames:
                    fp = os.path.join(dirpath, f)
                    total_size += os.path.getsize(fp)
            return total_size / (1024 * 1024.0)

        removing_anything = False
        packages = {
            'crates': {},
            'git': {},
        }
        import toml
        if os.environ.get("CARGO_HOME", "") and custom_path:
            cargo_dir = os.environ.get("CARGO_HOME")
        else:
            cargo_dir = path.join(self.context.topdir, ".cargo")
        if not os.path.isdir(cargo_dir):
            return
        cargo_file = open(path.join(self.context.topdir, "Cargo.lock"))
        content = toml.load(cargo_file)

        for package in content.get("package", []):
            source = package.get("source", "")
            version = package["version"]
            if source == u"registry+https://github.com/rust-lang/crates.io-index":
                crate_name = "{}-{}".format(package["name"], version)
                if not packages["crates"].get(crate_name, False):
                    packages["crates"][package["name"]] = {
                        "current": [],
                        "exist": [],
                    }
                packages["crates"][package["name"]]["current"].append(crate_name)
            elif source.startswith("git+"):
                name = source.split("#")[0].split("/")[-1].replace(".git", "")
                branch = ""
                crate_name = "{}-{}".format(package["name"], source.split("#")[1])
                crate_branch = name.split("?")
                if len(crate_branch) > 1:
                    branch = crate_branch[1].replace("branch=", "")
                    name = crate_branch[0]

                if not packages["git"].get(name, False):
                    packages["git"][name] = {
                        "current": [],
                        "exist": [],
                    }
                packages["git"][name]["current"].append(source.split("#")[1][:7])
                if branch:
                    packages["git"][name]["current"].append(branch)

        crates_dir = path.join(cargo_dir, "registry")
        crates_cache_dir = ""
        crates_src_dir = ""
        if os.path.isdir(path.join(crates_dir, "cache")):
            for p in os.listdir(path.join(crates_dir, "cache")):
                crates_cache_dir = path.join(crates_dir, "cache", p)
                crates_src_dir = path.join(crates_dir, "src", p)

        git_dir = path.join(cargo_dir, "git")
        git_db_dir = path.join(git_dir, "db")
        git_checkout_dir = path.join(git_dir, "checkouts")
        git_db_list = filter(lambda f: not f.startswith('.'), os.listdir(git_db_dir))
        git_checkout_list = os.listdir(git_checkout_dir)

        for d in list(set(git_db_list + git_checkout_list)):
            crate_name = d.replace("-{}".format(d.split("-")[-1]), "")
            if not packages["git"].get(crate_name, False):
                packages["git"][crate_name] = {
                    "current": [],
                    "exist": [],
                }
            if os.path.isdir(path.join(git_checkout_dir, d)):
                with cd(path.join(git_checkout_dir, d)):
                    git_crate_hash = glob.glob('*')
                if not git_crate_hash or not os.path.isdir(path.join(git_db_dir, d)):
                    packages["git"][crate_name]["exist"].append(("del", d, ""))
                    continue
                for d2 in git_crate_hash:
                    dep_path = path.join(git_checkout_dir, d, d2)
                    if os.path.isdir(dep_path):
                        packages["git"][crate_name]["exist"].append((path.getmtime(dep_path), d, d2))
            elif os.path.isdir(path.join(git_db_dir, d)):
                packages["git"][crate_name]["exist"].append(("del", d, ""))

        for d in os.listdir(crates_src_dir):
            crate_name = re.sub(r"\-\d+(\.\d+){1,3}.+", "", d)
            if not packages["crates"].get(crate_name, False):
                packages["crates"][crate_name] = {
                    "current": [],
                    "exist": [],
                }
            packages["crates"][crate_name]["exist"].append(d)

        total_size = 0
        for packages_type in ["git", "crates"]:
            sorted_packages = sorted(packages[packages_type])
            for crate_name in sorted_packages:
                crate_count = 0
                existed_crates = packages[packages_type][crate_name]["exist"]
                for exist in sorted(existed_crates, reverse=True):
                    current_crate = packages[packages_type][crate_name]["current"]
                    size = 0
                    exist_name = path.join(exist[1], exist[2]) if packages_type == "git" else exist
                    exist_item = exist[2] if packages_type == "git" else exist
                    if exist_item not in current_crate:
                        crate_count += 1
                        if int(crate_count) >= int(keep) or not current_crate or \
                           exist[0] == "del" or exist[2] == "master":
                            removing_anything = True
                            crate_paths = []
                            if packages_type == "git":
                                exist_checkout_path = path.join(git_checkout_dir, exist[1])
                                exist_db_path = path.join(git_db_dir, exist[1])
                                exist_path = path.join(git_checkout_dir, exist_name)

                                if exist[0] == "del":
                                    if os.path.isdir(exist_checkout_path):
                                        crate_paths.append(exist_checkout_path)
                                    if os.path.isdir(exist_db_path):
                                        crate_paths.append(exist_db_path)
                                    crate_count += -1
                                else:
                                    crate_paths.append(exist_path)

                                    exist_checkout_list = glob.glob(path.join(exist_checkout_path, '*'))
                                    if len(exist_checkout_list) <= 1:
                                        crate_paths.append(exist_checkout_path)
                                        if os.path.isdir(exist_db_path):
                                            crate_paths.append(exist_db_path)
                            else:
                                crate_paths.append(path.join(crates_cache_dir, "{}.crate".format(exist)))
                                crate_paths.append(path.join(crates_src_dir, exist))

                            size = sum(get_size(p) for p in crate_paths) if show_size else 0
                            total_size += size
                            print_msg = (exist_name, " ({}MB)".format(round(size, 2)) if show_size else "", cargo_dir)
                            if force:
                                print("Removing `{}`{} package from {}".format(*print_msg))
                                for crate_path in crate_paths:
                                    if os.path.exists(crate_path):
                                        delete(crate_path)
                            else:
                                print("Would remove `{}`{} package from {}".format(*print_msg))

        if removing_anything and show_size:
            print("\nTotal size of {} MB".format(round(total_size, 2)))

        if not removing_anything:
            print("Nothing to remove.")
        elif not force:
            print("\nNothing done. "
                  "Run `./mach clean-cargo-cache -f` to actually remove.")
コード例 #46
0
def Allocate_ALEPH_SYS(parameters, curdir, form, user_info=None):
    """
       Get the next available ALEPH SYS from the counter file, and allocate it as the
       SYS for this record. Increment the counterby one.
       ALEPH SYS allocation works in "slots" of free numbers. For example,
       000425201 -> 000634452 for a given database may be available.
       This means that it is necessary to care about not over-stepping the maximum
       boundary. To this end, two counters (for each ALEPH Database) must be present:
          - last_SYS_<DATABASE>      (this contains the last SYS allocated for
            a database)
          - maximum_SYS_<DATABASE>   (this contains the MAXIMUM SYS allowed for a
            database)
       So, for example, for the CER database, there would be:
          - last_SYS_CER
          - maximum_SYS_CER
       When the maximum SYS has been reached, all further attempts to obtain ALEPH SYSs
       will fail, as this function will fail with an error.  To prevent this from coming
       as a surprise, however, when "last_SYS_<DATABASE>" gets somewhere near to the value
       stored in "maximum_SYS_<DATABASE>", a mail will be sent to the Admin with every
       SYS allocated, warning them that only N numbers remain free for the XY database.
       The number until MAX SYS which determines this period of warning emails is
       determined by a variable "warn_admin_at_N_sys_remaining".  It is set to 2000 by
       default, but can be changed.
       When the system allocates a new sys and there are 2000 or less free SYS remaining,
       the warning mails to ADMIN will be sent.

       @param alephdatabase: (string) the name of the ALEPH database for which a SYS is to be
        allocated.  E.g. "CER".  The he absence of this will cause the function to fail.
        Also, the absence of either of the 2 counter files "last_SYS_${database}" and
        "maximum_SYS_${database}" will cause the function to fail.
    """
    mailfrom_addr = '%s Submission Engine <%s>' % (CFG_SITE_NAME,
                                                   CFG_SITE_SUPPORT_EMAIL)
    database = parameters['alephdatabase'].strip()
    counter_lastsys = "last_SYS_%s" % database
    counter_maxsys = "maximum_SYS_%s" % database

    ## ensure that "database" param is not empty, and exists in the list of legal DBs
    if database == "" or database not in CFG_LEGAL_ALEPH_DATABASES:
        ## error with supplied database
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record, an invalid database name was"""\
              """ supplied: [%s]. It was therefore not possible to allocate the SYS.""" % database
        raise InvenioWebSubmitFunctionError(msg)

    ## before trying to make a lockfile, test if one exists and whether it is older than "CFG_MAX_AGE_LOCKFILE" seconds
    ## if so, raise an error and warn the admin:
    counter_lockfile = "last_SYS_%s.lock" % database
    try:
        lockfile_modtime = getmtime(
            "%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lockfile))
        time_now = mktime(localtime())
        time_since_last_lockfile_mod = time_now - lockfile_modtime
        if time_since_last_lockfile_mod > CFG_MAX_AGE_LOCKFILE:
            ## lockfile is old - warn admin and stop
            admin_msg = """ERROR: When trying to allocate an ALEPH SYS for a record in the [%s] DB, it was not possible """\
                        """to create a lockfile. An attempt was made at [%s], but a lockfile already existed with a """\
                        """last modification time of [%s]. It was therefore not possible to allocate the SYS.""" \
                        % (database, strftime("%d/%m/%Y %H:%M:%S", localtime(time_now)),
                           strftime("%d/%m/%Y %H:%M:%S", localtime(lockfile_modtime)))
            send_email(
                fromaddr=mailfrom_addr,
                toaddr=CFG_SITE_ADMIN_EMAIL,
                subject="WebSubmit ERROR - OLD ALEPH SYS LOCKFILE ENCOUNTERED!",
                content=admin_msg)
            user_msg = """ERROR: When trying to allocate an ALEPH SYS for a record in the [%s] DB, it was not possible""" \
                       """ to create a lockfile. It was therefore not possible to allocate the SYS.""" \
                       % database
            raise InvenioWebSubmitFunctionError(user_msg)
    except OSError:
        ## no lockfile
        pass

    ## before any counter operations, create a lockfile:
    got_lock = _create_SYS_counter_lockfile(database)

    if got_lock == 0:
        ## unable to create lockfile!
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record in the [%s] DB, it was not possible"""\
              """ to create a lockfile within 60 seconds. It was therefore not possible to allocate the SYS.""" % database
        send_email(fromaddr=mailfrom_addr,
                   toaddr=CFG_SITE_ADMIN_EMAIL,
                   subject="WebSubmit ERROR - CANNOT CREATE LOCKFILE!",
                   content=msg)
        raise InvenioWebSubmitFunctionError(msg)

    ## test that counter files exist for "database":
    rw_count_lastsys_ok = access(
        "%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys), R_OK | W_OK)
    rw_count_maxsys_ok = access(
        "%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_maxsys), R_OK | W_OK)

    if not rw_count_lastsys_ok or not rw_count_maxsys_ok:
        ## cannot access the ALEPH counter files - critical error
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record, either [%s] or [%s] (or both) was not"""\
              """ accessible. It was therefore not possible to allocate the SYS.""" % (counter_lastsys, counter_maxsys)
        lockfile_removed = _unlink_SYS_counter_lockfile(database)
        if lockfile_removed == 0:
            ## couldn't remove lockfile - mail ADMIN
            _mail_admin_because_lockfile_not_removeable(
                lockfilename="last_SYS_%s" % database, extramsg="\n\n" + msg)
        send_email(
            fromaddr=mailfrom_addr,
            toaddr=CFG_SITE_ADMIN_EMAIL,
            subject="WebSubmit ERROR - CANNOT ACCESS ALEPH SYS COUNTER(S)!",
            content=msg)
        raise InvenioWebSubmitFunctionError(msg)

    ## read last-sys and max-sys:
    try:
        fp = open("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys), "r")
        fileval_lastsys = fp.read()
        fp.close()
        fp = open("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_maxsys), "r")
        fileval_maxsys = fp.read()
        fp.close()
    except IOError:
        ## could not read one or both of the files
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record, either [%s] or [%s] (or both) could not"""\
              """ be read. It was therefore not possible to allocate the SYS.""" % (counter_lastsys, counter_maxsys)
        lockfile_removed = _unlink_SYS_counter_lockfile(database)
        if lockfile_removed == 0:
            ## couldn't remove lockfile - mail ADMIN
            _mail_admin_because_lockfile_not_removeable(
                lockfilename="last_SYS_%s" % database, extramsg="\n\n" + msg)
        send_email(
            fromaddr=mailfrom_addr,
            toaddr=CFG_SITE_ADMIN_EMAIL,
            subject="WebSubmit ERROR - CANNOT ACCESS ALEPH SYS COUNTER(S)!",
            content=msg)
        raise InvenioWebSubmitFunctionError(msg)

    ## for the values from both files, clean any whitespace from beginning or end of file text and cast the result to an integer:
    try:
        lastsys = int(fileval_lastsys.strip())
        maxsys = int(fileval_maxsys.strip())
    except ValueError:
        ## the value in one or both of the files did not cast to an int!
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record, either [%s] or [%s] (or both) contained invalid"""\
              """ (non-integer) values. It was therefore not possible to allocate the SYS.""" % (counter_lastsys, counter_maxsys)
        lockfile_removed = _unlink_SYS_counter_lockfile(database)
        if lockfile_removed == 0:
            ## couldn't remove lockfile - mail ADMIN
            _mail_admin_because_lockfile_not_removeable(
                lockfilename="last_SYS_%s" % database, extramsg="\n\n" + msg)
        send_email(
            fromaddr=mailfrom_addr,
            toaddr=CFG_SITE_ADMIN_EMAIL,
            subject=
            "WebSubmit ERROR - ALEPH SYS COUNTER(S) CONTAINS INVALID DATA!",
            content=msg)
        raise InvenioWebSubmitFunctionError(msg)

    ## check that "fileval_lastsys" is less than "fileval_maxsys". If yes, proceed - else fail and mail ADMIN
    if not (lastsys < maxsys):
        ## MAX SYS EXCEEDED
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record, the value of [%s -> %d] is not less than the """\
              """value of [%s -> %d]. It was therefore not possible to allocate the SYS. A new SYS range must be allocated!"""\
              % (counter_lastsys, lastsys, counter_maxsys, maxsys)
        ## mail admin:
        send_email(
            fromaddr=mailfrom_addr,
            toaddr=CFG_SITE_ADMIN_EMAIL,
            subject=
            "WebSubmit ERROR - MAXIMUM ALEPH SYS COUNTER VALUE EXCEEDED!",
            content=msg)
        lockfile_removed = _unlink_SYS_counter_lockfile(database)
        if lockfile_removed == 0:
            ## couldn't remove lockfile - mail ADMIN
            _mail_admin_because_lockfile_not_removeable(
                lockfilename="last_SYS_%s" % database, extramsg="\n\n" + msg)
        raise InvenioWebSubmitFunctionError(msg)

    if maxsys - lastsys < CFG_WARNING_MAX_SYS_APPROACHING:
        ## WARN admin that MAX ALEPH SYS for this DB is approaching:
        _warn_admin_counterlimit_approaching(db=database,
                                             lastsys=lastsys,
                                             maxsys=maxsys)

    ## increment the value of the last SYS
    lastsys += 1

    ## cast sys to a string and pad the value on the left with leading zeros to 9 characters:
    cursys = "%09d%s" % (lastsys, database[0:3].upper().strip())

    ## now write out the new value of lastsys to the relevant counter file:
    ## make temporary file then move it later
    tmpfname = "%s_%s_%s" % (counter_lastsys,
                             strftime("%Y%m%d%H%M%S", localtime()), getpid())

    ## open temp counter file for writing:
    try:
        fp = open("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, tmpfname), "w")
        fp.write("%d" % (lastsys, ))
        fp.flush()
        fp.close()
    except IOError:
        ## could not write to temp file
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record, could not write out new value for last SYS used """\
              """to a temporary file [%s]. It was therefore not possible to allocate a SYS for the record ([%s] was not """\
              """incremented.)""" % ("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, tmpfname), counter_lastsys)
        ## remove the "lock file"
        lockfile_removed = _unlink_SYS_counter_lockfile(database)
        if lockfile_removed == 0:
            ## couldn't remove lockfile - mail ADMIN
            _mail_admin_because_lockfile_not_removeable(
                lockfilename="last_SYS_%s" % database, extramsg="\n\n" + msg)
        send_email(
            fromaddr=mailfrom_addr,
            toaddr=CFG_SITE_ADMIN_EMAIL,
            subject=
            "WebSubmit ERROR - CANNOT CREATE TEMPORARY ALEPH SYS COUNTER FILE!",
            content=msg)
        raise InvenioWebSubmitFunctionError(msg)

    ## copy old counter file to backup version:
    try:
        copyfile("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys),
                 "%s/%s.bk" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys))
    except IOError:
        ## unable to make backup of counter file:
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record, could not write out new value for last SYS used."""\
              """ Couldn't make a back-up copy of the SYS counter file [%s].""" % ("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys),)
        ## remove the "lock file"
        lockfile_removed = _unlink_SYS_counter_lockfile(database)
        if lockfile_removed == 0:
            ## couldn't remove lockfile - mail ADMIN
            _mail_admin_because_lockfile_not_removeable(
                lockfilename="last_SYS_%s" % database, extramsg="\n\n" + msg)
        send_email(
            fromaddr=mailfrom_addr,
            toaddr=CFG_SITE_ADMIN_EMAIL,
            subject="WebSubmit ERROR - CANNOT WRITE BACK-UP ALEPH SYS COUNTER!",
            content=msg)
        raise InvenioWebSubmitFunctionError(msg)

    ## rename temp counter file to final counter file:
    try:
        rename("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, tmpfname),
               "%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys))
    except OSError:
        ## couldnt rename the tmp file to final file name
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record, could not write out new value for last SYS used."""\
              """ Created the temporary last SYS counter file [%s], but couldn't then rename it to the final counter file [%s]."""\
              """ It was therefore not possible to allocate a SYS for the record ([%s] was not incremented.)"""\
              % ("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, tmpfname), "%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys), counter_lastsys)
        lockfile_removed = _unlink_SYS_counter_lockfile(database)
        if lockfile_removed == 0:
            ## couldn't remove lockfile - mail ADMIN
            _mail_admin_because_lockfile_not_removeable(
                lockfilename="last_SYS_%s" % database, extramsg="\n\n" + msg)
        send_email(
            fromaddr=mailfrom_addr,
            toaddr=CFG_SITE_ADMIN_EMAIL,
            subject="WebSubmit ERROR - CANNOT WRITE ALEPH SYS COUNTER FILE!",
            content=msg)
        raise InvenioWebSubmitFunctionError(msg)

    ## now that counter has been successfully incremented, write cursys out to the file "SNa500":
    try:
        fp = open("%s/SNa500" % curdir, "w")
        fp.write("%s" % cursys)
        fp.flush()
        fp.close()
    except IOError:
        ## unable to write out the SYS!
        msg = """ERROR: When trying to allocate an ALEPH SYS for a record, could not write out new SYS to file [%s/SNa500]."""\
              """ It was therefore not possible to allocate the SYS ([%s] was not incremented.)"""\
              % (curdir, counter_lastsys)
        lockfile_removed = _unlink_SYS_counter_lockfile(database)
        if lockfile_removed == 0:
            ## couldn't remove lockfile - mail ADMIN
            _mail_admin_because_lockfile_not_removeable(
                lockfilename="last_SYS_%s" % database, extramsg="\n\n" + msg)
        raise InvenioWebSubmitFunctionError(msg)

    ## finally, unlink the lock file:
    lockfile_removed = _unlink_SYS_counter_lockfile(database)
    if lockfile_removed == 0:
        ## couldn't remove lockfile - mail ADMIN
        msg = """ERROR: After allocating an ALEPH SYS for a record, it was not possible to remove the lock file [last_SYS_%s.lock] after the """\
              """SYS was allocated.""" % ("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, database),)
        _mail_admin_because_lockfile_not_removeable(
            lockfilename="last_SYS_%s" % database, extramsg="\n\n" + msg)
        raise InvenioWebSubmitFunctionError(msg)

    return ""
コード例 #47
0
 def check_file_change(self):
     """Restart if any files we're watching have changed."""
     for f, mtime in watched_files_mtimes:
         if getmtime(f) > mtime:
             self.restart()
コード例 #48
0
from os import path as op
import time

ospath = op.abspath("os库之路径操作.py")
print(ospath)

print(op.normpath("D://PYE//file.txt"))

print(
    op.relpath(
        r"C:\Users\PangFei\Desktop\PythonLearn\【mooc】Python语言程序设计\第7周 文件和数据格式化\7.1 文件的使用"
    ))

print(op.dirname("D://PYE//file.txt"))

print(op.basename("D://PYE//file.txt"))

print(op.join("D:/", "PYE/file.txt"))

print(op.exists("D://PYE//file.txt"))

print(op.isfile("D://PYE//file.txt"))

print(op.isdir("D://PYE//file.txt"))  # 这里是文件,不是目录,所以False

print(op.getatime(ospath))
print(op.getmtime(ospath))
print(
    time.ctime(op.getctime(ospath))
)  # time.ctime 把一个表示时间的元组或者struct_time表示为这种形式:'Sun Jun 20 23:21:05 1993'。
print(op.getsize(ospath))
コード例 #49
0
def extract_subject(subj,
                    mask_name,
                    summary_func=np.mean,
                    residual=False,
                    exp_name=None):
    """Extract timeseries from within a mask, summarizing flexibly.

    Parameters
    ----------
    subj : string
        subject name
    mask_name : string
        name of mask in data hierarchy
    summary_func : callable or None
        callable to reduce data over voxel dimensions. can take an
        ``axis`` argument to operate over each frame, if this
        argument does not exist the function will be called on the
        n_tr x n_voxel array. if None, simply returns all voxels.
    residual : boolean
        If True, extract from the registered residual timecourse.
    exp_name : string
        experiment name, if not using the default experiment

    Returns
    -------
    data : dict with ndarray
        datta array is n_runs x n_timepoint x n_dimension,
        data are not otherwise altered

    """
    project = gather_project_info()
    if exp_name is None:
        exp_name = project["default_exp"]

    # Get a path to the file where
    cache_dir = op.join(project["analysis_dir"], exp_name, subj, "evoked")

    try:
        os.makedirs(cache_dir)
    except OSError:
        pass

    if summary_func is None:
        func_name = ""
    else:
        func_name = summary_func.__name__
    cache_fname = mask_name + "_" + func_name
    cache_fname = cache_fname.strip("_") + ".npz"
    cache_file = op.join(cache_dir, cache_fname)

    # Get paths to the relevant files
    mask_file = op.join(project["data_dir"], subj, "masks",
                        "%s.nii.gz" % mask_name)
    ts_dir = op.join(project["analysis_dir"], exp_name, subj, "reg", "epi",
                     "unsmoothed")
    n_runs = len(glob(op.join(ts_dir, "run_*")))

    ftemp = op.join(ts_dir, "run_{:d}/{}_xfm.nii.gz")
    fstem = "res4d" if residual else "timeseries"
    ts_files = [ftemp.format(r_i, fstem) for r_i in range(n_runs)]

    # Get the hash value for this extraction
    cache_hash = hashlib.sha1()
    cache_hash.update(mask_name)
    cache_hash.update(str(op.getmtime(mask_file)))
    for ts_file in ts_files:
        cache_hash.update(str(op.getmtime(ts_file)))
    cache_hash = cache_hash.hexdigest()

    # If the file exists and the hash matches, return the data
    if op.exists(cache_file):
        with np.load(cache_file) as cache_obj:
            if cache_hash == str(cache_obj["hash"]):
                return dict(cache_obj.items())

    # Otherwise, do the extraction
    data = []
    mask = nib.load(mask_file).get_data().astype(bool)
    for run, ts_file in enumerate(ts_files):
        ts_data = nib.load(ts_file).get_data()
        roi_data = ts_data[mask].T

        if summary_func is None:
            data.append(roi_data)
            continue

        # Try to use the axis argument to summarize over voxels
        try:
            roi_data = summary_func(roi_data, axis=1)
        # Catch a TypeError and just call the function
        # This lets us do e.g. a PCA
        except TypeError:
            roi_data = summary_func(roi_data)

        data.append(roi_data)

    data = np.array(list(map(np.squeeze, data)))

    # Save the results and return them
    data_dict = dict(data=data, subj=subj, hash=cache_hash)
    np.savez(cache_file, **data_dict)

    return data_dict
コード例 #50
0
def _static_file_get_modified_time(f):
    f = joinp(DIR.static, f)
    if path.exists(f):
        return dt.fromtimestamp(path.getmtime(f)).replace(tzinfo=tz.tzlocal())
    return None
コード例 #51
0
    def run(self):

        self.run_first()
        self.output_status()

        t_ts = t_out = t_loop = t_mtime = datetime.utcnow()

        while True:

            self.get_futures()
            # Directional
            # 0: none
            # 1: StochRSI
            #
            # Price
            # 0: best bid/offer
            # 1: vwap
            # 2: BitMex Index Difference
            # 3: BBands %B
            #
            # Volatility
            # 0: none
            # 1: ewma
            # 2: BBands Width
            # 3: ATR
            #
            # Quantity
            # 0: none
            # 1: BitMex Index Difference
            # 2: PPO
            # 3: Relative Volume
            # 4: BBands %B
            with open('bitmex-settings.json', 'r') as read_file:
                data = json.load(read_file)
                self.maxMaxDD = data['maxMaxDD']
                self.minMaxDD = data['minMaxDD']
                self.directional = data['directional']
                self.price = data['price']
                self.volatility = data['volatility']
                self.quantity_switch = data['quantity']
            # Restart if a new contract is listed
            if len(self.futures) != len(self.futures_prv):
                self.restart()

            self.update_positions()

            t_now = datetime.utcnow()

            # Update time series and vols
            if (t_now - t_ts).total_seconds() >= WAVELEN_TS:
                t_ts = t_now
                self.update_timeseries()
                self.update_vols()
                r = requests.get(
                    'https://testnet.bitmex.com/api/v1/instrument?symbol=XBTUSD'
                )
                r = r.json()
                j = r[0]['lastPrice']

                j2 = r[0]['markPrice']
                diff = j / j2
                print(diff)
                if diff < 1:
                    diff = 1 / diff
                diff = -1 * (1 - diff) * 100
                print(diff)

                diff = diff * INDEX_MOD
                diff = diff / 100 + 1
                self.diff = diff
                print(self.diff)

            self.place_orders()

            # Display status to terminal
            if self.output:
                t_now = datetime.utcnow()
                if (t_now - t_out).total_seconds() >= WAVELEN_OUT:
                    self.output_status()
                    t_out = t_now

            # Restart if file change detected
            t_now = datetime.utcnow()
            if (t_now - t_mtime).total_seconds() > WAVELEN_MTIME_CHK:
                t_mtime = t_now
                if getmtime(__file__) > self.this_mtime:
                    self.restart()

            t_now = datetime.utcnow()
            looptime = (t_now - t_loop).total_seconds()

            # Estimate mean looptime
            w1 = EWMA_WGT_LOOPTIME
            w2 = 1.0 - w1
            t1 = looptime
            t2 = self.mean_looptime

            self.mean_looptime = w1 * t1 + w2 * t2

            t_loop = t_now
            sleep_time = MIN_LOOP_TIME - looptime
            if sleep_time > 0:
                time.sleep(sleep_time)
            if self.monitor:
                time.sleep(WAVELEN_OUT)
コード例 #52
0
from datetime import datetime, timedelta
from os.path import getmtime
import random
import requests
import atexit
import signal
import numpy as np

from market_maker import bitmex
from market_maker.settings import settings
from market_maker.utils import log, constants, errors, math

# Used for reloading the bot - saves modified times of key files
import os

watched_files_mtimes = [(f, getmtime(f)) for f in settings.WATCHED_FILES]

#
# Helpers
#
logger = log.setup_custom_logger('root')


class ExchangeInterface:
    def __init__(self, dry_run=False):
        self.dry_run = dry_run
        if len(sys.argv) > 1:
            self.symbol = sys.argv[1]
        else:
            self.symbol = settings.SYMBOL
        self.bitmex = bitmex.BitMEX(base_url=settings.BASE_URL,
コード例 #53
0
 def uptodate():
     try:
         return path.getmtime(filename) == mtime
     except OSError:
         return False
コード例 #54
0
ファイル: sar.py プロジェクト: leelasdSI/rdkit_ipynb_tools
try:
    from misc_tools import apl_tools as apt
    AP_TOOLS = True
except ImportError:
    AP_TOOLS = False

if AP_TOOLS:
    #: Library version
    VERSION = apt.get_commit(__file__)
    # I use this to keep track of the library versions I use in my project notebooks
    print("{:45s} (commit: {})".format(__name__, VERSION))
else:
    print("{:45s} ({})".format(
        __name__,
        time.strftime("%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))

BGCOLOR = "#94CAEF"
IMG_GRID_SIZE = 235

TABLE_INTRO = """<table id="sar_table" width="" cellspacing="1" cellpadding="1" border="1" align="center" height="60" summary="">"""
HTML_INTRO = """<!DOCTYPE html>
<html>
<head>
  <title>%s</title>
  <meta charset="UTF-8">

  <link rel="stylesheet" type="text/css" href="css/style.css" />

  <script src="lib/float.js"></script>
コード例 #55
0
def package(ictx, aTag):
    '''
    Package bitfile with address table and file list
        Generate the bitfile if it doesn't exist
    '''

    def get_max_mtime_in_dir(path):
        root, _, files = next(iter( os.walk(path)))

        # from datetime import datetime
        # for f in files:
        #     cprint(f"{join(root, f)} {datetime.fromtimestamp(getmtime(join(root, f)))}")
        return max(os.path.getmtime(join(root, f)) for f in files)

    ensure_vivado(ictx)

    if not exists(ictx.vivadoProjFile):
        cprint('Vivado project does not exist. Creating the project...', style='yellow')
        genproject(ictx, True, True, None, False)

    lProjName = ictx.currentproj.name
    lDepFileParser = ictx.depParser
    lTopEntity = lDepFileParser.settings.get('top_entity', kTopEntity)

    # Create bitfile if missing
    lBaseName = ictx.vivadoProdFileBase
    lBitPath  = lBaseName + '.bit'
    gen_bitfile = False
    if not exists(lBitPath):
        cprint("Bitfile does not exist. Starting a build ...", style='yellow')
        gen_bitfile = True
    elif get_max_mtime_in_dir(ictx.vivado_impl_dir) > getmtime(lBitPath):
        cprint(f"Bitfile exists but it's older than the content of {_rum_impl}. Rebuilding ...", style='yellow')
        gen_bitfile = True

    if gen_bitfile:
        bitfile(ictx)

    # Create SVF file if requested
    lSVFPath = None
    try:
        lVivadoCfg = lDepFileParser.settings['vivado']
        if _svfSettingName in lVivadoCfg:
            lSVFPath = lBaseName + '.svf'
            if not exists(lSVFPath):
                _svffile(ictx)
    except KeyError as e:
        lSVFPath = None

    # Create configuration memory files if requested and missing
    try:
        lVivadoCfg = lDepFileParser.settings['vivado']
        lActiveMemCfgs = [k for k,o in _memCfgKinds.items() if o in lVivadoCfg]
        lMemCfgFiles = [lBaseName + '.' + k for k in lActiveMemCfgs]

        if any([not exists(f) for f in lMemCfgFiles]):
            memcfg(ictx)
    except KeyError as e:
        lMemCfgFiles = []

    lDebugProbesPath = lBaseName + '.ltx'
    if not os.path.exists(lDebugProbesPath):
        lDebugProbesPath = None

    lPkgPath = 'package'
    lPkgSrcPath = join(lPkgPath, 'src')

    # Cleanup first
    sh.rm('-rf', lPkgPath, _out=sys.stdout)

    # Create the folders
    try:
        os.makedirs(join(lPkgSrcPath, 'addrtab'))
    except OSError:
        pass

    # -------------------------------------------------------------------------
    # Generate a json signature file

    console.log("Generating summary files", style='blue')

    # -------------------------------------------------------------------------

    lHash = hash(ictx, output=join(lPkgSrcPath, 'hashes.txt'), verbose=True)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    lSummary = dict(ictx.currentproj.settings)
    lSummary.update(
        {
            'build host': socket.gethostname().replace('.', '_'),
            'time': time.strftime("%a, %d %b %Y %H:%M:%S +0000"),
            'md5': lHash.hexdigest(),
        }
    )

    with open(join(lPkgSrcPath, 'summary.txt'), 'w') as lSummaryFile:
        yaml.safe_dump(lSummary, lSummaryFile, indent=2, default_flow_style=False)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # Copy bitfile, memcfg, and address table into the packaging area
    console.log("Collecting bitfile", style='blue')
    sh.cp('-av', lBitPath, lPkgSrcPath, _out=sys.stdout)

    if lSVFPath is not None:
        console.log("Collecting SVF file {}".format(lSVFPath), style='blue')
        sh.cp('-av', lSVFPath, lPkgSrcPath, _out=sys.stdout)

    for f in lMemCfgFiles:
        console.log("Collecting memcfg {}".format(f), style='blue')
        sh.cp('-av', f, lPkgSrcPath, _out=sys.stdout)

    if lDebugProbesPath:
        console.log("Collecting debug-probes file", style='blue')
        sh.cp('-av', lDebugProbesPath, lPkgSrcPath, _out=sys.stdout)

    console.log("Collecting address tables", style='blue')
    for addrtab in ictx.depParser.commands['addrtab']:
        sh.cp('-avL', addrtab.filepath, join(lPkgSrcPath, 'addrtab'), _out=sys.stdout)
    # -------------------------------------------------------------------------

    # -------------------------------------------------------------------------
    # Tar everything up
    console.log("Creating tarball", style='blue')

    lTgzBaseName = '_'.join(
        [ictx.currentproj.settings['name']]
        + ([aTag] if aTag is not None else [])
        + [socket.gethostname().replace('.', '_'), time.strftime('%y%m%d_%H%M')]
    )
    lTgzPath = join(lPkgPath, lTgzBaseName + '.tgz')

    # Zip everything
    sh.tar(
        'cvfz',
        abspath(lTgzPath),
        '-C',
        lPkgPath,
        '--transform',
        's|^src|' + lTgzBaseName + '|',
        'src',
        _out=sys.stdout,
    )

    console.log(
        f"Package {lTgzPath} successfully created.",
        style='green',
    )
コード例 #56
0
ファイル: views.py プロジェクト: wawrzek/graphite-web
 def check(self):
     if getmtime(settings.DASHBOARD_CONF) > self.last_read:
         self.load()
コード例 #57
0
 def _get_date ( self ):
     return strftime( '%m/%d/%Y', 
                      localtime( getmtime( self.file_name ) ) )
コード例 #58
0
def ds_traverse(rootds,
                parent=None,
                json=None,
                recursive=False,
                all_=False,
                long_=False):
    """Hierarchical dataset traverser

    Parameters
    ----------
    rootds: Dataset
      Root dataset to be traversed
    parent: Dataset
      Parent dataset of the current rootds
    recursive: bool
       Recurse into subdirectories of the current dataset
    all_: bool
       Recurse into subdatasets of the root dataset

    Returns
    -------
    list of dict
      extracts and returns a (recursive) list of dataset(s) info at path
    """
    # extract parent info to pass to traverser
    fsparent = fs_extract(parent.path, parent.repo,
                          basepath=rootds.path) if parent else None

    # (recursively) traverse file tree of current dataset
    fs = fs_traverse(rootds.path,
                     rootds.repo,
                     render=False,
                     parent=fsparent,
                     recursive=all_,
                     json=json)
    size_list = [fs['size']]

    # (recursively) traverse each subdataset
    children = []
    # yoh: was in return results branch returning full datasets:
    # for subds in rootds.subdatasets(result_xfm='datasets'):
    # but since rpath is needed/used, decided to return relpaths
    for subds_rpath in rootds.subdatasets(result_xfm='relpaths'):

        subds_path = opj(rootds.path, subds_rpath)
        subds = Dataset(subds_path)
        subds_json = metadata_locator(path='.', ds_path=subds_path)

        def handle_not_installed():
            # for now just traverse as fs
            lgr.warning("%s is either not installed or lacks meta-data", subds)
            subfs = fs_extract(subds_path, rootds, basepath=rootds.path)
            # but add a custom type that it is a not installed subds
            subfs['type'] = 'uninitialized'
            # we need to kick it out from 'children'
            # TODO:  this is inefficient and cruel -- "ignored" should be made
            # smarted to ignore submodules for the repo
            if fs['nodes']:
                fs['nodes'] = [
                    c for c in fs['nodes'] if c['path'] != subds_rpath
                ]
            return subfs

        if not subds.is_installed():
            subfs = handle_not_installed()
        elif recursive:
            subfs = ds_traverse(subds,
                                json=json,
                                recursive=recursive,
                                all_=all_,
                                parent=rootds)
            subfs.pop('nodes', None)
            size_list.append(subfs['size'])
        # else just pick the data from metadata_file of each subdataset
        else:
            lgr.info(subds.path)
            if exists(subds_json):
                with open(subds_json) as data_file:
                    subfs = js.load(data_file)
                    subfs.pop('nodes', None)  # remove children
                    subfs['path'] = subds_rpath  # reassign the path
                    size_list.append(subfs['size'])
            else:
                # the same drill as if not installed
                lgr.warning("%s is installed but no meta-data yet", subds)
                subfs = handle_not_installed()

        children.extend([subfs])

    # sum sizes of all 1st level children dataset
    children_size = {}
    for subdataset_size in size_list:
        for size_type, subds_size in subdataset_size.items():
            children_size[size_type] = children_size.get(
                size_type, 0) + machinesize(subds_size)

    # update current dataset sizes to the humanized aggregate subdataset sizes
    fs['size'] = {
        size_type: humanize.naturalsize(size)
        for size_type, size in children_size.items()
    }
    fs['nodes'][0]['size'] = fs[
        'size']  # update self's updated size in nodes sublist too!

    # add dataset specific entries to its dict
    rootds_model = GitModel(rootds.repo)
    fs['tags'] = rootds_model.describe
    fs['branch'] = rootds_model.branch
    index_file = opj(rootds.path, '.git', 'index')
    fs['index-mtime'] = time.strftime(
        u"%Y-%m-%d %H:%M:%S", time.localtime(
            getmtime(index_file))) if exists(index_file) else ''

    # append children datasets info to current dataset
    fs['nodes'].extend(children)

    # render current dataset
    lgr.info('Dataset: %s' % rootds.path)
    fs_render(fs, json=json, ds_path=rootds.path)
    return fs
コード例 #59
0
 def _get_time ( self ):
     return strftime( '%I:%M:%S %p', 
                      localtime( getmtime( self.file_name ) ) )
コード例 #60
0
ファイル: viewer.py プロジェクト: Sascha-Roedding/screenly
def check_update():
    """
    Check if there is a later version of Screenly OSE
    available. Only do this update once per day.
    Return True if up to date was written to disk,
    False if no update needed and None if unable to check.
    """

    sha_file = path.join(settings.get_configdir(), 'latest_screenly_sha')
    device_id_file = path.join(settings.get_configdir(), 'device_id')

    if path.isfile(sha_file):
        sha_file_mtime = path.getmtime(sha_file)
        last_update = datetime.fromtimestamp(sha_file_mtime)
    else:
        last_update = None

    if not path.isfile(device_id_file):
        device_id = ''.join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(15))
        with open(device_id_file, 'w') as f:
            f.write(device_id)
    else:
        with open(device_id_file, 'r') as f:
            device_id = f.read()

    logging.debug('Last update: %s' % str(last_update))

    git_branch = sh.git('rev-parse', '--abbrev-ref', 'HEAD').strip()
    git_hash = sh.git('rev-parse', '--short', 'HEAD').strip()

    if last_update is None or last_update < (datetime.now() -
                                             timedelta(days=1)):

        if not settings['analytics_opt_out'] and not is_ci():
            mp = Mixpanel('d18d9143e39ffdb2a4ee9dcc5ed16c56')
            try:
                mp.track(device_id, 'Version', {
                    'Branch': str(git_branch),
                    'Hash': str(git_hash),
                })
            except MixpanelException:
                pass

        if remote_branch_available(git_branch):
            latest_sha = fetch_remote_hash(git_branch)

            if latest_sha:
                with open(sha_file, 'w') as f:
                    f.write(latest_sha)
                return True
            else:
                logging.debug('Unable to fetch latest hash.')
                return
        else:
            touch(sha_file)
            logging.debug(
                'Unable to check if branch exist. Checking again tomorrow.')
            return
    else:
        return False