示例#1
0
 def test_SmallRulesFile_OneDiameter_SpecifyOutfile(self):
     for format in ['csv', 'tsv']:
         with self.subTest(format=format):
             diam = '2'
             outfile = NamedTemporaryFile(delete=False)
             parse_rules(
                 rules_file    = self.rules_file,
                 outfile       = outfile.name,
                 diameters     = diam,
                 output_format = format
             )
             self.assertListEqual(
                 list(
                     io_open(
                         outfile.name
                     )
                 ),
                 list(
                     io_open(
                         getattr(
                             self,
                             'ref_d2_'+format
                         )
                     )
                 )
             )
             outfile.close()
             unlink(outfile.name)
示例#2
0
def _get(cache_key, file_name, override_expire_secs=None, pickle=False):

	expire_datetime = None
	cache_path = xbmc_helper().get_file_path(CONST['CACHE_DIR'], file_name)

	if (override_expire_secs is not None):
		expire_datetime = datetime.now() - timedelta(seconds=override_expire_secs)
	elif 'expires' in CONST['CACHE'][cache_key].keys() and CONST['CACHE'][cache_key]['expires'] is not None:
		expire_datetime = datetime.now() - timedelta(seconds=CONST['CACHE'][cache_key]['expires'])

	cache_data = {
	        'data': None,
	        'is_expired': True,
	}

	if path.exists(cache_path):

		filectime = datetime.fromtimestamp(path.getctime(cache_path))
		filemtime = datetime.fromtimestamp(path.getmtime(cache_path))

		if filemtime is None or filectime > filemtime:
			filemtime = filectime
		if pickle is False:
			with io_open(file=cache_path, mode='r', encoding='utf-8') as cache_infile:
				cache_data.update({'data': cache_infile.read()})
		else:
			with io_open(file=cache_path, mode='rb') as cache_infile:
				cache_data.update({'data': pickle_load(cache_infile)})

		if expire_datetime is None or filemtime >= expire_datetime:
			cache_data.update({'is_expired': False})

	return cache_data
示例#3
0
def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True):
    if 'b' in mode:
        return io_open(ensure_fs_path_encoding(file), str(mode), buffering=buffering,
                       errors=errors, newline=newline, closefd=closefd)
    else:
        return io_open(ensure_fs_path_encoding(file), str(mode), buffering=buffering,
                       encoding=encoding or 'utf-8', errors=errors, newline=newline,
                       closefd=closefd)
示例#4
0
文件: compat.py 项目: zyxws012/PTVS
def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True):
    if 'b' in mode:
        return io_open(file, str(mode), buffering=buffering,
                       errors=errors, newline=newline, closefd=closefd)
    else:
        return io_open(file, str(mode), buffering=buffering,
                       encoding=encoding or 'utf-8', errors=errors, newline=newline,
                       closefd=closefd)
示例#5
0
def _set(cache_key, file_name, data, pickle=False):

	cache_path = xbmc_helper().get_file_path(CONST['CACHE_DIR'], file_name)
	if pickle is False:
		with io_open(file=cache_path, mode='w', encoding='utf-8') as cache_outfile:
			cache_outfile.write(compat._unicode(data))
	else:
		with io_open(file=cache_path, mode='wb') as cache_outfile:
			pickle_dump(data, cache_outfile, protocol=0)
示例#6
0
 def test_GoodInputFormatCSV(self):
     diam = '2'
     outfile = NamedTemporaryFile(delete=False)
     parse_rules(rules_file=self.rules_file,
                 input_format='csv',
                 diameters=diam,
                 outfile=outfile.name)
     self.assertListEqual(list(io_open(outfile.name)),
                          list(io_open(self.ref_d2_csv)))
     outfile.close()
     unlink(outfile.name)
示例#7
0
    def _open_csv_file(self, charset):
        """
        Open the file in mode dependent on the python version.

        :charset: File encoding
        :returns: handle to the newly-opened file
        :raises: IOError if the file cannot be read
        """
        if six.PY2:
            return io_open(self.filename, "rb")
        return io_open(self.filename, "rt", encoding=charset)
示例#8
0
def main(options):
    if options.extension.startswith(
            "."
    ):  # Ensure user specified extension does not include leading '.'
        options.extension = options.extension[1:]
    try:
        config_file = io_open(options.config, encoding="utf8")
    except IOError:
        sys.stderr.write(
            "\nConfiguration file not found (specify with -c or use the default 'config.ini')\n"
        )
        sys.exit()
    depedit = DepEdit(config_file=config_file, options=options)
    if sys.platform == "win32":  # Print \n new lines in Windows
        import os
        import msvcrt
        msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
    files = glob(options.file)
    for filename in files:
        infile = io_open(filename, encoding="utf8")
        basename = os.path.basename(filename)
        docname = basename[:basename.rfind(
            ".")] if options.docname or options.sent_id else filename
        output_trees = depedit.run_depedit(infile,
                                           docname,
                                           sent_id=options.sent_id,
                                           docname=options.docname,
                                           stepwise=options.stepwise)
        if len(files) == 1:
            # Single file being processed, just print to STDOUT
            if sys.version_info[0] < 3:
                print(output_trees.encode("utf-8"), end="")
            else:
                print(output_trees, end="")
        else:
            # Multiple files, add '.depedit' or other infix from options before extension and write to file
            if options.outdir != "":
                if not options.outdir.endswith(os.sep):
                    options.outdir += os.sep
            outname = options.outdir + basename
            if "." in filename:
                extension = outname[outname.rfind(".") + 1:]
                if options.extension != "":
                    extension = options.extension
                outname = outname[:outname.rfind(".")]
                outname += options.infix + "." + extension
            else:
                outname += options.infix + "." + options.extension if options.extension else options.infix
            if sys.version_info[0] < 3:
                with open(outname, 'wb') as f:
                    f.write(output_trees.encode("utf-8"))
            else:
                with open(outname, 'w', encoding="utf8") as f:
                    f.write(output_trees.encode("utf-8"))
示例#9
0
 def test_print_rpSBML(self):
     rpsbml = rpSBML(name='rpSBML_test')
     rpsbml.genericModel('RetroPath_Pathway_test', 'RP_model_test',
                         self.comp_xref, 'MNXC3', 999999, 0)
     rpsbml.createPathway('rp_pathway')
     rpsbml.createPathway('central_species')
     with NamedTemporaryFile() as tempf:
         rpsbml.writeSBML(tempf.name)
         self.assertListEqual(
             list(io_open(tempf.name)),
             list(io_open(os_path.join('data', 'rpSBML_test_sbml.xml'))))
示例#10
0
 def test_OneDiameter(self):
     for diam in ['2']:
         with self.subTest(diam=diam):
             outfile = NamedTemporaryFile(delete=False)
             parse_rules(
                 rules_file=self.rules_file,
                 diameters=diam,
                 outfile=outfile.name
             )
             self.assertListEqual(
                 list(io_open(outfile.name)),
                 list(io_open(self.ref_d2_csv))
             )
             outfile.close()
             unlink(outfile.name)
示例#11
0
    def scrape_file(self):
        """Scrape file."""
        if not self._check_wellformed and self._only_wellformed:
            self._messages.append("Skipping scraper: Well-formed check not "
                                  "used.")
            return

        # Check file header
        with io_open(self.filename, "rb") as input_file:
            first_line = input_file.readline()
        if SPSS_PORTABLE_HEADER not in first_line:
            self._errors.append("File is not SPSS Portable format.")

        # Try to convert file with pspp-convert. If conversion is succesful
        # (converted.por file is produced), the original file is well-formed.
        temp_dir = tempfile.mkdtemp()
        temp_file = os.path.join(temp_dir, "converted.por")

        try:
            shell = Shell([PSPP_PATH, self.filename, temp_file])
            if shell.stderr:
                self._errors.append(shell.stderr)
            self._messages.append(shell.stdout)
            if os.path.isfile(temp_file):
                self._messages.append("File conversion was succesful.")
            else:
                self._errors.append("File conversion failed.")
        finally:
            shutil.rmtree(temp_dir)
            for md_class in self._supported_metadata:
                self.streams.append(
                    md_class(self._given_mimetype, self._given_version))
            self._check_supported(allow_unav_mime=True,
                                  allow_unav_version=True)
示例#12
0
 def readFile(filename):
     printDebugMessage(u'readFile', u'Begin', 1)
     fh = io_open(filename, u'r')
     data = fh.read()
     fh.close()
     printDebugMessage(u'readFile', u'End', 1)
     return data
示例#13
0
    def load(self, rule_file, encoding='utf-8'):
        self.body = '@external\n'

        # parse rule file
        with io_open(rule_file, encoding=encoding) as f:
            for line in f:
                # parse input line
                if line.strip().lower().startswith('input'):
                    input_header, input_line = line.split(None, 1)

                    # sanity check
                    if input_header.lower() != 'input':
                        raise ValueError

                    # parse *param0="value0",*param1="value1",...
                    for pair in input_line.split(','):
                        label, value = pair.split('=')
                        self.params[label.strip()] = value.strip()

                # parse output line
                elif line.strip().lower().startswith('output'):
                    output_header, output_line = line.split(None, 1)

                    # sanity check
                    if output_header.lower() != 'output':
                        raise ValueError

                    # use line as is
                    self.output = output_line.strip()

                # parse rule
                else:
                    self.body += line
    def scrape_file(self):
        """Scrape file."""
        # Check file header
        with io_open(self.filename, "rb") as input_file:
            first_line = input_file.readline()
        if first_line.count(SPSS_PORTABLE_HEADER) != 1:
            self._errors.append("File is not SPSS Portable format.")

        # Try to convert file with pspp-convert. If conversion is succesful
        # (converted.por file is produced), the original file is well-formed.
        temp_dir = tempfile.mkdtemp()
        temp_file = os.path.join(temp_dir, "converted.por")

        try:
            shell = Shell([PSPP_PATH, self.filename, temp_file])
            if shell.stderr:
                self._errors.append(shell.stderr)
            self._messages.append(shell.stdout)
            if os.path.isfile(temp_file):
                self._messages.append("File conversion was succesful.")
            else:
                self._errors.append("File conversion failed.")
        finally:
            shutil.rmtree(temp_dir)
            self.streams = list(
                self.iterate_models(well_formed=self.well_formed))
            self._check_supported(allow_unav_mime=True,
                                  allow_unav_version=True)
示例#15
0
def serve_landing_page(request, target, engagement):
    with io_open(engagement.landing_page.path, 'r', encoding='utf-8') as f:
        page_source = f.read()

    page_source = replace_shortcodes(page_source, engagement, target)

    return render(request, 'index.html', {'soup': page_source})
示例#16
0
def test_comppath():
    """Test CLI --comppath"""
    if IS_WIN:
        skip("no manpages on windows")
    tmp = mkdtemp()
    man = path.join(tmp, "tqdm_completion.sh")
    assert not path.exists(man)
    try:
        main(argv=['--comppath', tmp], fp=NULL)
    except SystemExit:
        pass
    else:
        raise SystemExit("Expected system exit")
    assert path.exists(man)

    # check most important options appear
    with io_open(man, mode='r', encoding='utf-8') as fd:
        script = fd.read()
    opts = set([
        '--help', '--desc', '--total', '--leave', '--ncols', '--ascii',
        '--dynamic_ncols', '--position', '--bytes', '--nrows', '--delim',
        '--manpath', '--comppath'
    ])
    assert all(args in script for args in opts)
    rmtree(tmp, True)
示例#17
0
def ctm_to_textgrid(phone_ctm, out_directory, utt2dur, frameshift=0.01):
    textgrid_write_errors = {}
    frameshift = Decimal(str(frameshift))
    if not os.path.exists(out_directory):
        os.makedirs(out_directory)

    utt2dur_mapping = generate_utt2dur(utt2dur)

    for i, (k, v) in enumerate(sorted(phone_ctm.items())):
        maxtime = Decimal(str(utt2dur_mapping[k]))
        try:
            tg = TextGrid(maxTime=maxtime)
            phonetier = IntervalTier(name='phones', maxTime=maxtime)
            for interval in v:
                if maxtime - interval[1] < frameshift:
                    interval[1] = maxtime
                #remove B E I and stress (0,1) information from phoneme
                interval[2] = re.sub("\d+", "", interval[2].split('_')[0])
                phonetier.add(*interval)
            tg.append(phonetier)
            outpath = os.path.join(out_directory, k + '.TextGrid')
            tg.write(outpath)
        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            textgrid_write_errors[k] = '\n'.join(
                traceback.format_exception(exc_type, exc_value, exc_traceback))
    if textgrid_write_errors:
        error_log = os.path.join(out_directory, 'output_errors.txt')
        with io_open(error_log, 'w', encoding='utf-8') as f:
            f.write(
                u'The following exceptions were encountered during the ouput of the alignments to TextGrids:\n\n'
            )
            for k, v in textgrid_write_errors.items():
                f.write(u'{}:\n'.format(k))
                f.write(u'{}\n\n'.format(v))
示例#18
0
def open_binary(package, resource):
    """Return a file-like object opened for binary reading of the resource."""
    resource = _normalize_path(resource)
    package = _get_package(package)
    # Using pathlib doesn't work well here due to the lack of 'strict' argument
    # for pathlib.Path.resolve() prior to Python 3.6.
    package_path = os.path.dirname(package.__file__)
    relative_path = os.path.join(package_path, resource)
    full_path = os.path.abspath(relative_path)
    try:
        return io_open(full_path, 'rb')
    except IOError:
        # This might be a package in a zip file.  zipimport provides a loader
        # with a functioning get_data() method, however we have to strip the
        # archive (i.e. the .zip file's name) off the front of the path.  This
        # is because the zipimport loader in Python 2 doesn't actually follow
        # PEP 302.  It should allow the full path, but actually requires that
        # the path be relative to the zip file.
        try:
            loader = package.__loader__
            full_path = relative_path[len(loader.archive) + 1:]
            data = loader.get_data(full_path)
        except (IOError, AttributeError):
            package_name = package.__name__
            message = '{!r} resource not found in {!r}'.format(
                resource, package_name)
            raise FileNotFoundError(message)
        return BytesIO(data)
示例#19
0
	def set_data(self, filename, data, dir_type='DATA_DIR'):
		data_file_path = self.get_file_path(CONST[dir_type], filename)

		with io_open(file=data_file_path, mode='w', encoding='utf-8') as data_outfile:
			data_outfile.write(compat._decode(compat._unicode(data)))

		return data_file_path
示例#20
0
 def readFile(filename):
     printDebugMessage(u"readFile", u"Begin", 1)
     fh = io_open(filename, u"r")
     data = fh.read()
     fh.close()
     printDebugMessage(u"readFile", u"End", 1)
     return data
示例#21
0
 def get_file_contents(self, file_path):
     data = None
     if os.path.exists(file_path):
         with io_open(file=file_path, mode='r',
                      encoding='utf-8') as data_infile:
             data = data_infile.read()
     return data
示例#22
0
def obj_select(image_id):
    """fetch all object run-length data for one image"""
    object_list = []
    with io_open(TRAIN_LABEL_CSV, 'r') as f:
        for line in f:
            if search(image_id, line) is not None:
                object_list.append(sub(image_id + ",", "", line.strip("\n")))
    return object_list
示例#23
0
def find_version():
    """Get version from loudml_py/_version.py"""
    _locals = locals()
    src_dir = os.path.abspath(os.path.dirname(__file__))
    version_file = os.path.join(src_dir, 'loudml_py', '_version.py')
    with io_open(version_file, mode='r') as fd:
        exec(fd.read())  # __version__ is set in the exec call.
        return _locals['__version__']
示例#24
0
 def getCompiledCode(self, relativeFilePath):
     filename = path.basename(relativeFilePath)
     name = path.splitext(filename)[0]
     if name in ContractsFixture.compiledCode:
         return ContractsFixture.compiledCode[name]
     dependencySet = set()
     self.getAllDependencies(relativeFilePath, dependencySet)
     ContractsFixture.ensureCacheDirectoryExists()
     compiledOutputPath = path.join(COMPILATION_CACHE, name)
     lastCompilationTime = path.getmtime(compiledOutputPath) if path.isfile(
         compiledOutputPath) else 0
     needsRecompile = False
     for dependencyPath in dependencySet:
         if (path.getmtime(dependencyPath) > lastCompilationTime):
             needsRecompile = True
             break
     if (needsRecompile):
         print('compiling ' + name + '...')
         extension = path.splitext(filename)[1]
         compiledCode = None
         if extension == '.sol':
             compiledCode = bytearray.fromhex(
                 self.compileSolidity(relativeFilePath)['evm']['bytecode']
                 ['object'])
         else:
             raise
         with io_open(compiledOutputPath, mode='wb') as file:
             file.write(compiledCode)
     else:
         pass  #print('using cached compilation for ' + name)
     with io_open(compiledOutputPath, mode='rb') as file:
         compiledCode = file.read()
         contractSize = len(compiledCode)
         if (contractSize >= CONTRACT_SIZE_LIMIT):
             print('%sContract %s is OVER the size limit by %d bytes%s' %
                   (bcolors.FAIL, name, contractSize - CONTRACT_SIZE_LIMIT,
                    bcolors.ENDC))
         elif (contractSize >= CONTRACT_SIZE_WARN_LEVEL):
             print('%sContract %s is under size limit by only %d bytes%s' %
                   (bcolors.WARN, name, CONTRACT_SIZE_LIMIT - contractSize,
                    bcolors.ENDC))
         elif (contractSize > 0):
             pass  #print('Size: %i' % contractSize)
         ContractsFixture.compiledCode[name] = compiledCode
         return (compiledCode)
示例#25
0
    def test_retrieve_std_streams_from_rule(self):
        '''
        Tests running a rule from a client-side .r file.
        The rule writes things to its stdout that we
        get back on the client side
        '''

        # Wrong buffer length on older versions
        if self.sess.server_version < (4, 1, 7):
            self.skipTest('For iRODS 4.1.7 and newer')

        session = self.sess

        # test metadata
        some_string = u'foo'
        some_other_string = u'我喜欢麦当劳'
        err_string = u'⛔'

        # make rule file
        ts = time.time()
        rule_file_path = "/tmp/test_{ts}.r".format(**locals())
        rule = textwrap.dedent(u'''\
                                test {{
                                    # write stuff
                                    writeLine("stdout", *some_string);
                                    writeLine("stdout", *some_other_string);
                                    writeLine("stderr", *err_string);
                                }}
                                INPUT *some_string="{some_string}",*some_other_string="{some_other_string}",*err_string="{err_string}"
                                OUTPUT ruleExecOut'''.format(**locals()))

        with io_open(rule_file_path, "w", encoding='utf-8') as rule_file:
            rule_file.write(rule)

        # run test rule
        myrule = Rule(session, rule_file_path)
        out_array = myrule.execute()

        # retrieve out buffer
        buf = out_array.MsParam_PI[0].inOutStruct.stdoutBuf.buf

        # it's binary data (BinBytesBuf) so must be decoded
        buf = buf.decode('utf-8')

        # check that we get our strings back
        self.assertIn(some_string, buf)
        self.assertIn(some_other_string, buf)

        # same thing stderr buffer
        buf = out_array.MsParam_PI[0].inOutStruct.stderrBuf.buf

        # decode and check
        buf = buf.decode('utf-8')
        self.assertIn(err_string, buf)

        # remove rule file
        os.remove(rule_file_path)
def read_html_file_as_reencoded_text(htmlpath):
    txt = ""
    try:
        # using io.open w/ newline option to preserve newlines
        with io_open(str(htmlpath), "rt", encoding="cp1252") as f:
            txt = f.read()
    except:
        pass

    txt = txt.split('charset=windows-1252')

    if len(txt) > 1:
        txt = 'charset=utf-8'.join(txt)
    else:
        with io_open(str(htmlpath), "rt", encoding="utf-8") as f:
            txt = f.read()

    return txt
示例#27
0
文件: pro.py 项目: CI-WATER/gsshapy
    def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
        """
        Projection File Read from File Method
        """
        # Set file extension property
        self.fileExtension = extension

        # Open file and parse into a data structure
        with io_open(path, 'r') as f:
            self.projection = f.read()
示例#28
0
 def get_local_file(self, filename):
     """Attempts to load a user from a locally-saved HTML file.  If successful, returns a BeautifulSoup HTML parse tree.  Useful for testing without hammering OKC's servers."""
     try:
         with io_open(filename, 'r', encoding='utf8') as htmlfile:
             html = htmlfile.read()
         return BeautifulSoup(html, 'html5lib')
     except Exception as e:
         print 'Could not load local HTML file:', filename
         print e
         return None
示例#29
0
def generate_mapping(mapping_file):
    mapping = {}
    with io_open(mapping_file, 'r', encoding='utf-8') as fid:
        word_num_pairs = fid.readlines()
        for item in word_num_pairs:
            word = item.strip().split()[0]
            num = item.strip().split()[1]
            mapping[int(num)] = word

    return mapping
示例#30
0
 def toSide(self, fpath, ftype=None, fname=None):
     file_type = ftype or fpath.rsplit('.', 1)[-1]
     display_name = fname or path.split(fpath)[-1]
     if RE_URL.match(fpath):
         return self.comparisons.side_from_url(fpath,
                                               file_type=file_type,
                                               display_name=display_name)
     return self.comparisons.side_from_file(io_open(fpath, mode="rb"),
                                            file_type=file_type,
                                            display_name=display_name)
示例#31
0
def _integration_test(argv, io_open, Random, create_engine,
                      survey_section='saa_survey',
                      config_file='integration-test.ini'):  # pragma: nocover
    logging.basicConfig(level=logging.DEBUG)

    email_addr = argv[1]
    survey_id, db_url = SecureSurvey._config(
        io_open(config_file), config_file, survey_section)

    saa = SecureSurvey(create_engine(db_url).connect, Random(), survey_id)
    _explore(email_addr, saa)
示例#32
0
    def _read(self, directory, filename, session, path, name, extension,
              spatial, spatialReferenceID, replaceParamFile):
        """
        Projection File Read from File Method
        """
        # Set file extension property
        self.fileExtension = extension

        # Open file and parse into a data structure
        with io_open(path, 'r') as f:
            self.projection = f.read()
示例#33
0
def _read_file(filename):
    here = os.path.abspath(os.path.dirname(__file__))
    if sys.version_info[0] > 2:
        with open(os.path.join(here, filename), 'r', encoding='utf-8') as file_handle:
            return file_handle.read()
    else:
        # With python 2.7, open has no encoding parameter, resulting in TypeError
        # Fix with io.open (slow but works)
        from io import open as io_open
        with io_open(os.path.join(here, filename), 'r', encoding='utf-8') as file_handle:
            return file_handle.read()
示例#34
0
 def __init__(self, here, ops):
     '''
     :param str here:
     '''
     io_open, path_join, path_exists, glob = ops
     make = lambda there: Path(there, ops)  # noqa
     self.joinpath = lambda there: make(path_join(here, there))
     self.open = lambda **kwargs: io_open(here, **kwargs)
     self.exists = lambda: path_exists(here)
     self.glob = lambda pattern: (make(it)
                                  for it in glob(path_join(here, pattern)))
     self._path = here
示例#35
0
def comp_open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None):
    """
    Compatibility wrapper for :py:func:`open`. This function makes more advanced
    options from Python 3 available for Python 2. Similar to the Python 3 implementation
    of :py:func:`open`, this function may act as :py:keyword:`with` statement context manager.

    Other than the original :py:func:`open` function in Python 2, this function does not
    return a legacy file object (``<type 'file'>``) when used on Python 2. Instead, as in
    Python 3, it returns an :py:mod:`io` wrapper object, depending on what kind of file has
    been opened (binary or text). For text files, this will most likely be something like
    ``<type '_io.TextIOWrapper'>``.

    .. note::

       In case no encoding is specified, the default encoding as defined by
       :py:data:`magrathea.conf.default.DEFAULT_CHARSET` will be used.

    :param file: file is either a string or bytes object giving the pathname or
                 an integer file descriptor of the file to be wrapped
    :param mode: specifies the mode in which the file is opened
    :param buffering: optional integer used to set the buffering policy
    :param encoding: name of the encoding used to decode or encode the file
    :param errors: optional string that specifies how encoding and decoding errors are to be handled
    :param newline: controls how universal newlines mode works
    :param closefd: if False and a file descriptor rather than a filename was given, the underlying
                    file descriptor will be kept open when the file is closed
    :param opener: custom opener
    :returns: a :py:term:`file object`
    """
    if not encoding:
        encoding = get_conf('DEFAULT_CHARSET')
    if sys.version_info < (3, 0, 0):
        fp = io_open(
            file,
            mode=mode,
            buffering=buffering,
            encoding=encoding,
            errors=errors,
            newline=newline,
            closefd=closefd
        )
    else:
        fp = open(
            file,
            mode=mode,
            buffering=buffering,
            encoding=encoding,
            errors=errors,
            newline=newline,
            closefd=closefd,
            opener=opener
        )
    return fp
示例#36
0
def read(path: Path) -> OrderedDict:
    """Get contents of JSON at `path`

    Arguments:
        path {Path} -- path to file

    Returns:
        OrderedDict -- JSON data
    """
    with io_open(str(path), 'r', encoding='utf-8') as stream:
        data = json_load(stream, object_pairs_hook=OrderedDict)

    return data
示例#37
0
 def getCompiledCode(self, relativeFilePath):
     filename = path.basename(relativeFilePath)
     name = path.splitext(filename)[0]
     if name in ContractsFixture.compiledCode:
         return ContractsFixture.compiledCode[name]
     dependencySet = set()
     self.getAllDependencies(relativeFilePath, dependencySet)
     ContractsFixture.ensureCacheDirectoryExists()
     compiledOutputPath = path.join(COMPILATION_CACHE, name)
     lastCompilationTime = path.getmtime(compiledOutputPath) if path.isfile(compiledOutputPath) else 0
     needsRecompile = False
     for dependencyPath in dependencySet:
         if (path.getmtime(dependencyPath) > lastCompilationTime):
             needsRecompile = True
             break
     if (needsRecompile):
         print('compiling ' + name + '...')
         extension = path.splitext(filename)[1]
         compiledCode = None
         if extension == '.sol':
             compiledCode = bytearray.fromhex(self.compileSolidity(relativeFilePath)['evm']['bytecode']['object'])
         else:
             raise
         with io_open(compiledOutputPath, mode='wb') as file:
             file.write(compiledCode)
     else:
         pass#print('using cached compilation for ' + name)
     with io_open(compiledOutputPath, mode='rb') as file:
         compiledCode = file.read()
         contractSize = len(compiledCode)
         if (contractSize >= CONTRACT_SIZE_LIMIT):
             print('%sContract %s is OVER the size limit by %d bytes%s' % (bcolors.FAIL, name, contractSize - CONTRACT_SIZE_LIMIT, bcolors.ENDC))
         elif (contractSize >= CONTRACT_SIZE_WARN_LEVEL):
             print('%sContract %s is under size limit by only %d bytes%s' % (bcolors.WARN, name, CONTRACT_SIZE_LIMIT - contractSize, bcolors.ENDC))
         elif (contractSize > 0):
             pass#print('Size: %i' % contractSize)
         ContractsFixture.compiledCode[name] = compiledCode
         return(compiledCode)
    def run(self, edit):
        file_name = self.view.file_name()
        if not file_name or not str(file_name).endswith("js"):
            return

        selection = self.view.sel()
        for region in selection:
            if not region.empty():
                s = self.view.substr(region)
                js_tmpfile = mkstemp(suffix='.js', text=True)
                with io_open(js_tmpfile[1], 'w+') as tmpf:
                    tmpf.write(s)
                try:
                    fn_execute([get_command(), "-g", "-t", js_tmpfile[1]])
                except:
                    remove(js_tmpfile[1])
                with io_open(js_tmpfile[1], 'r') as tmpf:
                    file_size = tmpf.seek(0, 2)
                    tmpf.seek(0, 0)
                    data = tmpf.read(file_size)
                    if data != '':
                        self.view.replace(edit, region, data)
                remove(js_tmpfile[1])
示例#39
0
    def getResult(jobId):
        printDebugMessage(u"getResult", u"Begin", 1)
        printDebugMessage(u"getResult", u"jobId: " + jobId, 1)
        # Check status and wait if necessary
        clientPoll(jobId)
        # Get available result types
        resultTypes = serviceGetResultTypes(jobId)

        # function modified by mana to allow more than one output file written when 'outformat' is defined.
        # original script allowed only one file to be written.
        for resultType in resultTypes:
            # Derive the filename for the result
            if options.outfile:
                filename = (
                    options.outfile
                    + u"."
                    + unicode(resultType[u"identifier"])
                    + u"."
                    + unicode(resultType[u"fileSuffix"])
                )
            else:
                filename = jobId + u"." + unicode(resultType[u"identifier"]) + u"." + unicode(resultType[u"fileSuffix"])
                # Write a result file

            outformat_parm = str(options.outformat).split(",")
            for outformat_type in outformat_parm:
                outformat_type = outformat_type.replace(" ", "")

                if outformat_type == "None":
                    outformat_type = None

                if not outformat_type or outformat_type == unicode(resultType[u"identifier"]):
                    # Get the result
                    result = serviceGetResult(jobId, unicode(resultType[u"identifier"]))
                    if (
                        unicode(resultType[u"mediaType"]) == u"image/png"
                        or unicode(resultType[u"mediaType"]) == u"image/jpeg"
                        or unicode(resultType[u"mediaType"]) == u"application/gzip"
                    ):
                        fmode = u"wb"
                    else:
                        fmode = u"w"

                    fh = io_open(filename, fmode)

                    fh.write(result)
                    fh.close()
                    print filename
        printDebugMessage(u"getResult", u"End", 1)
示例#40
0
    def write(self, session, directory, name, replaceParamFile=None, **kwargs):
        """
        Write from database back to file.

        Args:
            session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
            directory (str): Directory where the file will be written.
            name (str): The name of the file that will be created (including the file extension is optional).
            replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if
                the file you are writing contains replacement parameters.
        """

        # Assemble Path to file
        name_split = name.split('.')
        name = name_split[0]

        # Default extension
        extension = ''

        if len(name_split) >= 2:
            extension = name_split[-1]

        # Run name preprocessor method if present
        try:
            name = self._namePreprocessor(name)
        except:
            'DO NOTHING'

        if extension == '':
            filename = '{0}.{1}'.format(name, self.fileExtension)
        else:
            filename = '{0}.{1}'.format(name, extension)

        filePath = os.path.join(directory, filename)

        with io_open(filePath, 'w') as openFile:
            # Write Lines
            self._write(session=session,
                        openFile=openFile,
                        replaceParamFile=replaceParamFile,
                        **kwargs)
示例#41
0
 def __init__(self, temp_filename, final_filename):
     if os_path.isfile(final_filename):
         raise Exception('Destination file already exists')
     self._temp_filename = temp_filename
     self._final_filename = final_filename
     self._fd = io_open(self._temp_filename, 'wb')
示例#42
0
    def _load(self):
        try:
            mtime = getmtime(self.cache_path_json)
        except (IOError, OSError):
            log.debug("No local cache found for %s at %s", self.url_w_subdir, self.cache_path_json)
            if context.use_index_cache or (context.offline
                                           and not self.url_w_subdir.startswith('file://')):
                log.debug("Using cached data for %s at %s forced. Returning empty repodata.",
                          self.url_w_subdir, self.cache_path_json)
                return {
                    '_package_records': (),
                    '_names_index': defaultdict(list),
                    '_track_features_index': defaultdict(list),
                }
            else:
                mod_etag_headers = {}
        else:
            mod_etag_headers = read_mod_and_etag(self.cache_path_json)

            if context.use_index_cache:
                log.debug("Using cached repodata for %s at %s because use_cache=True",
                          self.url_w_subdir, self.cache_path_json)

                _internal_state = self._read_local_repdata(mod_etag_headers.get('_etag'),
                                                           mod_etag_headers.get('_mod'))
                return _internal_state

            if context.local_repodata_ttl > 1:
                max_age = context.local_repodata_ttl
            elif context.local_repodata_ttl == 1:
                max_age = get_cache_control_max_age(mod_etag_headers.get('_cache_control', ''))
            else:
                max_age = 0

            timeout = mtime + max_age - time()
            if (timeout > 0 or context.offline) and not self.url_w_subdir.startswith('file://'):
                log.debug("Using cached repodata for %s at %s. Timeout in %d sec",
                          self.url_w_subdir, self.cache_path_json, timeout)
                _internal_state = self._read_local_repdata(mod_etag_headers.get('_etag'),
                                                           mod_etag_headers.get('_mod'))
                return _internal_state

            log.debug("Local cache timed out for %s at %s",
                      self.url_w_subdir, self.cache_path_json)

        try:
            raw_repodata_str = fetch_repodata_remote_request(self.url_w_credentials,
                                                             mod_etag_headers.get('_etag'),
                                                             mod_etag_headers.get('_mod'))
        except Response304ContentUnchanged:
            log.debug("304 NOT MODIFIED for '%s'. Updating mtime and loading from disk",
                      self.url_w_subdir)
            touch(self.cache_path_json)
            _internal_state = self._read_local_repdata(mod_etag_headers.get('_etag'),
                                                       mod_etag_headers.get('_mod'))
            return _internal_state
        else:
            if not isdir(dirname(self.cache_path_json)):
                mkdir_p(dirname(self.cache_path_json))
            try:
                with io_open(self.cache_path_json, 'w') as fh:
                    fh.write(raw_repodata_str or '{}')
            except (IOError, OSError) as e:
                if e.errno in (EACCES, EPERM, EROFS):
                    raise NotWritableError(self.cache_path_json, e.errno, caused_by=e)
                else:
                    raise
            _internal_state = self._process_raw_repodata_str(raw_repodata_str)
            self._internal_state = _internal_state
            self._pickle_me()
            return _internal_state
示例#43
0
def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None,
              exclude_failures=False, **options):
    """
    Compile a set of source modules into C/C++ files and return a list of distutils
    Extension objects for them.

    :param module_list: As module list, pass either a glob pattern, a list of glob
                        patterns or a list of Extension objects.  The latter
                        allows you to configure the extensions separately
                        through the normal distutils options.
                        You can also pass Extension objects that have
                        glob patterns as their sources. Then, cythonize
                        will resolve the pattern and create a
                        copy of the Extension for every matching file.

    :param exclude: When passing glob patterns as ``module_list``, you can exclude certain
                    module names explicitly by passing them into the ``exclude`` option.

    :param nthreads: The number of concurrent builds for parallel compilation
                     (requires the ``multiprocessing`` module).

    :param aliases: If you want to use compiler directives like ``# distutils: ...`` but
                    can only know at compile time (when running the ``setup.py``) which values
                    to use, you can use aliases and pass a dictionary mapping those aliases
                    to Python strings when calling :func:`cythonize`. As an example, say you
                    want to use the compiler
                    directive ``# distutils: include_dirs = ../static_libs/include/``
                    but this path isn't always fixed and you want to find it when running
                    the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``,
                    find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python
                    variable called ``foo`` as a string, and then call
                    ``cythonize(..., aliases={'MY_HEADERS': foo})``.

    :param quiet: If True, Cython won't print error and warning messages during the compilation.

    :param force: Forces the recompilation of the Cython modules, even if the timestamps
                  don't indicate that a recompilation is necessary.

    :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this
                     will be determined at a per-file level based on compiler directives.  This
                     affects only modules found based on file names.  Extension instances passed
                     into :func:`cythonize` will not be changed. It is recommended to rather
                     use the compiler directive ``# distutils: language = c++`` than this option.

    :param exclude_failures: For a broad 'try to compile' mode that ignores compilation
                             failures and simply excludes the failed extensions,
                             pass ``exclude_failures=True``. Note that this only
                             really makes sense for compiling ``.py`` files which can also
                             be used without compilation.

    :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py``
                     files compiled. The HTML file gives an indication
                     of how much Python interaction there is in
                     each of the source code lines, compared to plain C code.
                     It also allows you to see the C/C++ code
                     generated for each line of Cython code. This report is invaluable when
                     optimizing a function for speed,
                     and for determining when to :ref:`release the GIL <nogil>`:
                     in general, a ``nogil`` block may contain only "white" code.
                     See examples in :ref:`determining_where_to_add_types` or
                     :ref:`primes`.

    :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this:
                                ``compiler_directives={'embedsignature': True}``.
                                See :ref:`compiler-directives`.
    """
    if exclude is None:
        exclude = []
    if 'include_path' not in options:
        options['include_path'] = ['.']
    if 'common_utility_include_dir' in options:
        safe_makedirs(options['common_utility_include_dir'])

    pythran_options = None
    if pythran_version:
        pythran_options = CompilationOptions(**options)
        pythran_options.cplus = True
        pythran_options.np_pythran = True

    c_options = CompilationOptions(**options)
    cpp_options = CompilationOptions(**options); cpp_options.cplus = True
    ctx = c_options.create_context()
    options = c_options
    module_list, module_metadata = create_extension_list(
        module_list,
        exclude=exclude,
        ctx=ctx,
        quiet=quiet,
        exclude_failures=exclude_failures,
        language=language,
        aliases=aliases)
    deps = create_dependency_tree(ctx, quiet=quiet)
    build_dir = getattr(options, 'build_dir', None)

    def copy_to_build_dir(filepath, root=os.getcwd()):
        filepath_abs = os.path.abspath(filepath)
        if os.path.isabs(filepath):
            filepath = filepath_abs
        if filepath_abs.startswith(root):
            # distutil extension depends are relative to cwd
            mod_dir = join_path(build_dir,
                                os.path.dirname(_relpath(filepath, root)))
            copy_once_if_newer(filepath_abs, mod_dir)

    modules_by_cfile = collections.defaultdict(list)
    to_compile = []
    for m in module_list:
        if build_dir:
            for dep in m.depends:
                copy_to_build_dir(dep)

        cy_sources = [
            source for source in m.sources
            if os.path.splitext(source)[1] in ('.pyx', '.py')]
        if len(cy_sources) == 1:
            # normal "special" case: believe the Extension module name to allow user overrides
            full_module_name = m.name
        else:
            # infer FQMN from source files
            full_module_name = None

        new_sources = []
        for source in m.sources:
            base, ext = os.path.splitext(source)
            if ext in ('.pyx', '.py'):
                if m.np_pythran:
                    c_file = base + '.cpp'
                    options = pythran_options
                elif m.language == 'c++':
                    c_file = base + '.cpp'
                    options = cpp_options
                else:
                    c_file = base + '.c'
                    options = c_options

                # setup for out of place build directory if enabled
                if build_dir:
                    if os.path.isabs(c_file):
                      warnings.warn("build_dir has no effect for absolute source paths")
                    c_file = os.path.join(build_dir, c_file)
                    dir = os.path.dirname(c_file)
                    safe_makedirs_once(dir)

                if os.path.exists(c_file):
                    c_timestamp = os.path.getmtime(c_file)
                else:
                    c_timestamp = -1

                # Priority goes first to modified files, second to direct
                # dependents, and finally to indirect dependents.
                if c_timestamp < deps.timestamp(source):
                    dep_timestamp, dep = deps.timestamp(source), source
                    priority = 0
                else:
                    dep_timestamp, dep = deps.newest_dependency(source)
                    priority = 2 - (dep in deps.immediate_dependencies(source))
                if force or c_timestamp < dep_timestamp:
                    if not quiet and not force:
                        if source == dep:
                            print("Compiling %s because it changed." % source)
                        else:
                            print("Compiling %s because it depends on %s." % (source, dep))
                    if not force and options.cache:
                        fingerprint = deps.transitive_fingerprint(source, m, options)
                    else:
                        fingerprint = None
                    to_compile.append((
                        priority, source, c_file, fingerprint, quiet,
                        options, not exclude_failures, module_metadata.get(m.name),
                        full_module_name))
                new_sources.append(c_file)
                modules_by_cfile[c_file].append(m)
            else:
                new_sources.append(source)
                if build_dir:
                    copy_to_build_dir(source)
        m.sources = new_sources

    if options.cache:
        if not os.path.exists(options.cache):
            os.makedirs(options.cache)
    to_compile.sort()
    # Drop "priority" component of "to_compile" entries and add a
    # simple progress indicator.
    N = len(to_compile)
    progress_fmt = "[{0:%d}/{1}] " % len(str(N))
    for i in range(N):
        progress = progress_fmt.format(i+1, N)
        to_compile[i] = to_compile[i][1:] + (progress,)

    if N <= 1:
        nthreads = 0
    if nthreads:
        import multiprocessing
        pool = multiprocessing.Pool(
            nthreads, initializer=_init_multiprocessing_helper)
        # This is a bit more involved than it should be, because KeyboardInterrupts
        # break the multiprocessing workers when using a normal pool.map().
        # See, for example:
        # http://noswap.com/blog/python-multiprocessing-keyboardinterrupt
        try:
            result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
            pool.close()
            while not result.ready():
                try:
                    result.get(99999)  # seconds
                except multiprocessing.TimeoutError:
                    pass
        except KeyboardInterrupt:
            pool.terminate()
            raise
        pool.join()
    if not nthreads:
        for args in to_compile:
            cythonize_one(*args)

    if exclude_failures:
        failed_modules = set()
        for c_file, modules in modules_by_cfile.items():
            if not os.path.exists(c_file):
                failed_modules.update(modules)
            elif os.path.getsize(c_file) < 200:
                f = io_open(c_file, 'r', encoding='iso8859-1')
                try:
                    if f.read(len('#error ')) == '#error ':
                        # dead compilation result
                        failed_modules.update(modules)
                finally:
                    f.close()
        if failed_modules:
            for module in failed_modules:
                module_list.remove(module)
            print("Failed compilations: %s" % ', '.join(sorted([
                module.name for module in failed_modules])))

    if options.cache:
        cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
    # cythonize() is often followed by the (non-Python-buffered)
    # compiler output, flush now to avoid interleaving output.
    sys.stdout.flush()
    return module_list
示例#44
0
def cythonize(module_list, exclude=[], nthreads=0, aliases=None, quiet=False, force=False,
              exclude_failures=False, **options):
    """
    Compile a set of source modules into C/C++ files and return a list of distutils
    Extension objects for them.

    As module list, pass either a glob pattern, a list of glob patterns or a list of
    Extension objects.  The latter allows you to configure the extensions separately
    through the normal distutils options.

    When using glob patterns, you can exclude certain module names explicitly
    by passing them into the 'exclude' option.

    For parallel compilation, set the 'nthreads' option to the number of
    concurrent builds.

    For a broad 'try to compile' mode that ignores compilation failures and
    simply excludes the failed extensions, pass 'exclude_failures=True'. Note
    that this only really makes sense for compiling .py files which can also
    be used without compilation.

    Additional compilation options can be passed as keyword arguments.
    """
    if 'include_path' not in options:
        options['include_path'] = ['.']
    if 'common_utility_include_dir' in options:
        if options.get('cache'):
            raise NotImplementedError("common_utility_include_dir does not yet work with caching")
        if not os.path.exists(options['common_utility_include_dir']):
            os.makedirs(options['common_utility_include_dir'])
    c_options = CompilationOptions(**options)
    cpp_options = CompilationOptions(**options); cpp_options.cplus = True
    ctx = c_options.create_context()
    options = c_options
    module_list = create_extension_list(
        module_list,
        exclude=exclude,
        ctx=ctx,
        quiet=quiet,
        exclude_failures=exclude_failures,
        aliases=aliases)
    deps = create_dependency_tree(ctx, quiet=quiet)
    build_dir = getattr(options, 'build_dir', None)

    modules_by_cfile = {}
    to_compile = []
    for m in module_list:
        if build_dir:
            root = os.path.realpath(os.path.abspath(find_root_package_dir(m.sources[0])))
            def copy_to_build_dir(filepath, root=root):
                filepath_abs = os.path.realpath(os.path.abspath(filepath))
                if os.path.isabs(filepath):
                    filepath = filepath_abs
                if filepath_abs.startswith(root):
                    mod_dir = os.path.join(build_dir,
                            os.path.dirname(_relpath(filepath, root)))
                    if not os.path.isdir(mod_dir):
                        os.makedirs(mod_dir)
                    shutil.copy(filepath, mod_dir)
            for dep in m.depends:
                copy_to_build_dir(dep)

        new_sources = []
        for source in m.sources:
            base, ext = os.path.splitext(source)
            if ext in ('.pyx', '.py'):
                if m.language == 'c++':
                    c_file = base + '.cpp'
                    options = cpp_options
                else:
                    c_file = base + '.c'
                    options = c_options

                # setup for out of place build directory if enabled
                if build_dir:
                    c_file = os.path.join(build_dir, c_file)
                    dir = os.path.dirname(c_file)
                    if not os.path.isdir(dir):
                        os.makedirs(dir)

                if os.path.exists(c_file):
                    c_timestamp = os.path.getmtime(c_file)
                else:
                    c_timestamp = -1

                # Priority goes first to modified files, second to direct
                # dependents, and finally to indirect dependents.
                if c_timestamp < deps.timestamp(source):
                    dep_timestamp, dep = deps.timestamp(source), source
                    priority = 0
                else:
                    dep_timestamp, dep = deps.newest_dependency(source)
                    priority = 2 - (dep in deps.immediate_dependencies(source))
                if force or c_timestamp < dep_timestamp:
                    if not quiet:
                        if source == dep:
                            print("Compiling %s because it changed." % source)
                        else:
                            print("Compiling %s because it depends on %s." % (source, dep))
                    if not force and hasattr(options, 'cache'):
                        extra = m.language
                        fingerprint = deps.transitive_fingerprint(source, extra)
                    else:
                        fingerprint = None
                    to_compile.append((priority, source, c_file, fingerprint, quiet,
                                       options, not exclude_failures))
                new_sources.append(c_file)
                if c_file not in modules_by_cfile:
                    modules_by_cfile[c_file] = [m]
                else:
                    modules_by_cfile[c_file].append(m)
            else:
                new_sources.append(source)
                if build_dir:
                    copy_to_build_dir(source)
        m.sources = new_sources

    if hasattr(options, 'cache'):
        if not os.path.exists(options.cache):
            os.makedirs(options.cache)
    to_compile.sort()
    if nthreads:
        # Requires multiprocessing (or Python >= 2.6)
        try:
            import multiprocessing
            pool = multiprocessing.Pool(nthreads)
        except (ImportError, OSError):
            print("multiprocessing required for parallel cythonization")
            nthreads = 0
        else:
            try:
                pool.map(cythonize_one_helper, to_compile)
            finally:
                pool.close()
    if not nthreads:
        for args in to_compile:
            cythonize_one(*args[1:])

    if exclude_failures:
        failed_modules = set()
        for c_file, modules in modules_by_cfile.iteritems():
            if not os.path.exists(c_file):
                failed_modules.update(modules)
            elif os.path.getsize(c_file) < 200:
                f = io_open(c_file, 'r', encoding='iso8859-1')
                try:
                    if f.read(len('#error ')) == '#error ':
                        # dead compilation result
                        failed_modules.update(modules)
                finally:
                    f.close()
        if failed_modules:
            for module in failed_modules:
                module_list.remove(module)
            print("Failed compilations: %s" % ', '.join(sorted([
                module.name for module in failed_modules])))

    if hasattr(options, 'cache'):
        cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
    # cythonize() is often followed by the (non-Python-buffered)
    # compiler output, flush now to avoid interleaving output.
    sys.stdout.flush()
    return module_list
示例#45
0
文件: cmt.py 项目: CI-WATER/gsshapy
    def _read(self, directory, filename, session, path, name, extension,
              spatial=False, spatialReferenceID=4236, replaceParamFile=None,
              readIndexMaps=True):
        """
        Mapping Table Read from File Method
        """
        # Set file extension property
        self.fileExtension = extension

        # Dictionary of keywords/cards and parse function names
        KEYWORDS = {'INDEX_MAP': mtc.indexMapChunk,
                    'ROUGHNESS': mtc.mapTableChunk,
                    'INTERCEPTION': mtc.mapTableChunk,
                    'RETENTION': mtc.mapTableChunk,
                    'GREEN_AMPT_INFILTRATION': mtc.mapTableChunk,
                    'GREEN_AMPT_INITIAL_SOIL_MOISTURE': mtc.mapTableChunk,
                    'RICHARDS_EQN_INFILTRATION_BROOKS': mtc.mapTableChunk,
                    'RICHARDS_EQN_INFILTRATION_HAVERCAMP': mtc.mapTableChunk,
                    'EVAPOTRANSPIRATION': mtc.mapTableChunk,
                    'WELL_TABLE': mtc.mapTableChunk,
                    'OVERLAND_BOUNDARY': mtc.mapTableChunk,
                    'TIME_SERIES_INDEX': mtc.mapTableChunk,
                    'GROUNDWATER': mtc.mapTableChunk,
                    'GROUNDWATER_BOUNDARY': mtc.mapTableChunk,
                    'AREA_REDUCTION': mtc.mapTableChunk,
                    'WETLAND_PROPERTIES': mtc.mapTableChunk,
                    'MULTI_LAYER_SOIL': mtc.mapTableChunk,
                    'SOIL_EROSION_PROPS': mtc.mapTableChunk,
                    'CONTAMINANT_TRANSPORT': mtc.contamChunk,
                    'SEDIMENTS': mtc.sedimentChunk}

        indexMaps = dict()
        mapTables = []

        # Parse file into chunks associated with keywords/cards
        with io_open(path, 'r') as f:
            chunks = pt.chunk(KEYWORDS, f)

        # Parse chunks associated with each key
        for key, chunkList in iteritems(chunks):
            # Parse each chunk in the chunk list
            for chunk in chunkList:
                # Call chunk specific parsers for each chunk
                result = KEYWORDS[key](key, chunk)

                # Index Map handler
                if key == 'INDEX_MAP':

                    # Create GSSHAPY IndexMap object from result object
                    indexMap = IndexMap(name=result['idxName'])

                    # Dictionary used to map index maps to mapping tables
                    indexMaps[result['idxName']] = indexMap

                    # Associate IndexMap with MapTableFile
                    indexMap.mapTableFile = self

                    if readIndexMaps:
                        # Invoke IndexMap read method
                        indexMap.read(directory=directory, filename=result['filename'], session=session,
                                      spatial=spatial, spatialReferenceID=spatialReferenceID)
                    else:
                        # add path to file
                        indexMap.filename = result['filename']

                # Map Table handler
                else:
                    # Create a list of all the map tables in the file
                    if result:
                        mapTables.append(result)

        # Create GSSHAPY ORM objects with the resulting objects that are
        # returned from the parser functions
        self._createGsshaPyObjects(mapTables, indexMaps, replaceParamFile, directory, session, spatial, spatialReferenceID)
示例#46
0
文件: setup.py 项目: gaulinmp/tqdm
        # (incl quoted strings and comments)
        parsed_cmd = shlex.split(cmd, comments=True)
        # Execute command if not empty (ie, not just a comment)
        if parsed_cmd:
            if verbose:
                print("Running command: " + cmd)
            # Launch the command and wait to finish (synchronized call)
            check_call(parsed_cmd)


# Main setup.py config #

# Get version from tqdm/_version.py
__version__ = None
version_file = os.path.join(os.path.dirname(__file__), "tqdm", "_version.py")
with io_open(version_file, mode="r") as fd:
    exec(fd.read())

# Executing makefile commands if specified
if sys.argv[1].lower().strip() == "make":
    # Filename of the makefile
    fpath = "Makefile"
    # Parse the makefile, substitute the aliases and extract the commands
    commands = parse_makefile_aliases(fpath)

    # If no alias (only `python setup.py make`), print the list of aliases
    if len(sys.argv) < 3 or sys.argv[-1] == "--help":
        print("Shortcut to use commands via aliases. List of aliases:")
        print("\n".join(alias for alias in sorted(commands.keys())))

    # Else process the commands for this alias
示例#47
0
文件: setup.py 项目: casperdcl/argopt
# For Makefile parsing
import shlex
try:  # pragma: no cover
    import ConfigParser
    import StringIO
except ImportError:  # pragma: no cover
    # Python 3 compatibility
    import configparser as ConfigParser
    import io as StringIO
import re

__author__ = None
__licence__ = None
__version__ = None
main_file = os.path.join(os.path.dirname(__file__), 'argopt', '_argopt.py')
for l in io_open(main_file, mode='r'):
    if any(l.startswith(i) for i in ('__author__', '__licence__')):
        exec(l)
version_file = os.path.join(os.path.dirname(__file__), 'argopt', '_version.py')
with io_open(version_file, mode='r') as fd:
    exec(fd.read())

# Makefile auxiliary functions #

RE_MAKE_CMD = re.compile('^\t(@\+?)(make)?', flags=re.M)


def parse_makefile_aliases(filepath):
    """
    Parse a makefile to find commands and substitute variables. Expects a
    makefile with only aliases and a line return between each command.
示例#48
0
文件: _version.py 项目: ssebs/tqdm
__all__ = ["__version__"]

# major, minor, patch, -extra
version_info = 4, 26, 0

# Nice string for the version
__version__ = '.'.join(map(str, version_info))


# auto -extra based on commit hash (if not tagged as release)
scriptdir = os.path.dirname(__file__)
gitdir = os.path.abspath(os.path.join(scriptdir, "..", ".git"))
if os.path.isdir(gitdir):  # pragma: nocover
    extra = None
    # Open config file to check if we are in tqdm project
    with io_open(os.path.join(gitdir, "config"), 'r') as fh_config:
        if 'tqdm' in fh_config.read():
            # Open the HEAD file
            with io_open(os.path.join(gitdir, "HEAD"), 'r') as fh_head:
                extra = fh_head.readline().strip()
            # in a branch => HEAD points to file containing last commit
            if 'ref:' in extra:
                # reference file path
                ref_file = extra[5:]
                branch_name = ref_file.rsplit('/', 1)[-1]

                ref_file_path = os.path.abspath(os.path.join(gitdir, ref_file))
                # check that we are in git folder
                # (by stripping the git folder from the ref file path)
                if os.path.relpath(
                        ref_file_path, gitdir).replace('\\', '/') != ref_file:
示例#49
0
    def history(self, parameter_s = ''):
        """Print input history (_i<n> variables), with most recent last.

        %history [-o -p -t -n] [-f filename] [range | -g pattern | -l number]

        By default, input history is printed without line numbers so it can be
        directly pasted into an editor. Use -n to show them.

        By default, all input history from the current session is displayed.
        Ranges of history can be indicated using the syntax:
        4      : Line 4, current session
        4-6    : Lines 4-6, current session
        243/1-5: Lines 1-5, session 243
        ~2/7   : Line 7, session 2 before current
        ~8/1-~6/5 : From the first line of 8 sessions ago, to the fifth line
                    of 6 sessions ago.
        Multiple ranges can be entered, separated by spaces

        The same syntax is used by %macro, %save, %edit, %rerun

        Options:

          -n: print line numbers for each input.
          This feature is only available if numbered prompts are in use.

          -o: also print outputs for each input.

          -p: print classic '>>>' python prompts before each input.  This is
           useful for making documentation, and in conjunction with -o, for
           producing doctest-ready output.

          -r: (default) print the 'raw' history, i.e. the actual commands you
           typed.

          -t: print the 'translated' history, as IPython understands it.
          IPython filters your input and converts it all into valid Python
          source before executing it (things like magics or aliases are turned
          into function calls, for example). With this option, you'll see the
          native history instead of the user-entered version: '%cd /' will be
          seen as 'get_ipython().magic("%cd /")' instead of '%cd /'.

          -g: treat the arg as a pattern to grep for in (full) history.
          This includes the saved history (almost all commands ever written).
          The pattern may contain '?' to match one unknown character and '*'
          to match any number of unknown characters. Use '%hist -g' to show
          full saved history (may be very long).

          -l: get the last n lines from all sessions. Specify n as a single
          arg, or the default is the last 10 lines.

          -f FILENAME: instead of printing the output to the screen, redirect
           it to the given file.  The file is always overwritten, though *when
           it can*, IPython asks for confirmation first. In particular, running
           the command 'history -f FILENAME' from the IPython Notebook
           interface will replace FILENAME even if it already exists *without*
           confirmation.

        Examples
        --------
        ::

          In [6]: %history -n 4-6
          4:a = 12
          5:print a**2
          6:%history -n 4-6

        """

        if not self.shell.displayhook.do_full_cache:
            print('This feature is only available if numbered prompts '
                  'are in use.')
            return
        opts,args = self.parse_options(parameter_s,'noprtglf:',mode='string')

        # For brevity
        history_manager = self.shell.history_manager

        def _format_lineno(session, line):
            """Helper function to format line numbers properly."""
            if session in (0, history_manager.session_number):
                return str(line)
            return "%s/%s" % (session, line)

        # Check if output to specific file was requested.
        try:
            outfname = opts['f']
        except KeyError:
            outfile = io.stdout  # default
            # We don't want to close stdout at the end!
            close_at_end = False
        else:
            if os.path.exists(outfname):
                try:
                    ans = io.ask_yes_no("File %r exists. Overwrite?" % outfname)
                except StdinNotImplementedError:
                    ans = True
                if not ans:
                    print('Aborting.')
                    return
                print("Overwriting file.")
            outfile = io_open(outfname, 'w', encoding='utf-8')
            close_at_end = True

        print_nums = 'n' in opts
        get_output = 'o' in opts
        pyprompts = 'p' in opts
        # Raw history is the default
        raw = not('t' in opts)

        pattern = None

        if 'g' in opts:         # Glob search
            pattern = "*" + args + "*" if args else "*"
            hist = history_manager.search(pattern, raw=raw, output=get_output)
            print_nums = True
        elif 'l' in opts:       # Get 'tail'
            try:
                n = int(args)
            except (ValueError, IndexError):
                n = 10
            hist = history_manager.get_tail(n, raw=raw, output=get_output)
        else:
            if args:            # Get history by ranges
                hist = history_manager.get_range_by_str(args, raw, get_output)
            else:               # Just get history for the current session
                hist = history_manager.get_range(raw=raw, output=get_output)

        # We could be displaying the entire history, so let's not try to pull
        # it into a list in memory. Anything that needs more space will just
        # misalign.
        width = 4

        for session, lineno, inline in hist:
            # Print user history with tabs expanded to 4 spaces.  The GUI
            # clients use hard tabs for easier usability in auto-indented code,
            # but we want to produce PEP-8 compliant history for safe pasting
            # into an editor.
            if get_output:
                inline, output = inline
            inline = inline.expandtabs(4).rstrip()

            multiline = "\n" in inline
            line_sep = '\n' if multiline else ' '
            if print_nums:
                print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width),
                        line_sep),  file=outfile, end=u'')
            if pyprompts:
                print(u">>> ", end=u"", file=outfile)
                if multiline:
                    inline = "\n... ".join(inline.splitlines()) + "\n..."
            print(inline, file=outfile)
            if get_output and output:
                print(output, file=outfile)

        if close_at_end:
            outfile.close()
示例#50
0
 def write_local_html(self, soup, filename):
     """Simple method for writing a BeautifulSoup's HTML to a local file.  Didn't know where else to put it."""
     with io_open(filename, 'w', encoding='utf8') as htmlfile:
         htmlfile.write(unicode(soup))
示例#51
0
 def access():
     logging.basicConfig(level=logging.INFO)
     dest = argv[1]
     save = lambda obj: json.dump(obj, io_open(dest, 'wb'), indent=2)
     return mkInputs(argv, io_open, splitext), save
示例#52
0
文件: mkdocs.py 项目: tqdm/tqdm

def doc2rst(doc, arglist=True):
    """
    arglist  : bool, whether to create argument lists
    """
    doc = dedent(doc).replace('`', '``')
    if arglist:
        doc = '\n'.join([i if not i or i[0] == ' ' else '* ' + i + '  '
                         for i in doc.split('\n')])
    return doc


src_dir = path.abspath(path.dirname(__file__))
README_rst = path.join(src_dir, '.readme.rst')
with io_open(README_rst, mode='r', encoding='utf-8') as fd:
    README_rst = fd.read()
DOC_tqdm = doc2rst(tqdm.tqdm.__doc__, False).replace('\n', '\n      ')
DOC_tqdm_init = doc2rst(tqdm.tqdm.__init__.__doc__)
DOC_tqdm_init_args = DOC_tqdm_init.partition(doc2rst(HEAD_ARGS))[-1]\
    .replace('\n      ', '\n    ')
DOC_tqdm_init_args, _, DOC_tqdm_init_rets = DOC_tqdm_init_args\
    .partition(doc2rst(HEAD_RETS))
DOC_cli = doc2rst(tqdm._main.CLI_EXTRA_DOC).partition(doc2rst(HEAD_CLI))[-1]

# special cases
DOC_tqdm_init_args = DOC_tqdm_init_args.replace(' *,', ' ``*``,')
DOC_tqdm_init_args = DOC_tqdm_init_args.partition('* gui  : bool, optional')[0]

README_rst = README_rst.replace('{DOC_tqdm}', DOC_tqdm)\
    .replace('{DOC_tqdm.tqdm.__init__.Parameters}', DOC_tqdm_init_args)\
示例#53
0
	parser.add_argument('-c', '--config', action="store", dest="config", default="config.ini", help="Configuration file defining transformation")
	parser.add_argument('-d', '--docname', action="store_true", dest="docname", help="Begin output with # newdoc id =...")
	parser.add_argument('-s', '--sent_id', action="store_true", dest="sent_id", help="Add running sentence ID comments")
	parser.add_argument('-q', '--quiet', action="store_true", dest="quiet", help="Do not output warnings and messages")
	group = parser.add_argument_group('Batch mode options')
	group.add_argument('-o', '--outdir', action="store", dest="outdir", default="", help="Output directory in batch mode")
	group.add_argument('-e', '--extension', action="store", dest="extension", default="", help="Extension for output files in batch mode")
	group.add_argument('-i', '--infix', action="store", dest="infix", default=".depedit", help="Infix to denote edited files in batch mode (default: .depedit)")
	parser.add_argument('--version', action='version', version=depedit_version)
	options = parser.parse_args()

	if options.extension.startswith("."):  # Ensure user specified extension does not include leading '.'
		options.extension = options.extension[1:]

	try:
		config_file = io_open(options.config, encoding="utf8")
	except IOError:
		sys.stderr.write("\nConfiguration file not found (specify with -c or use the default 'config.ini')\n")
		sys.exit()
	depedit = DepEdit(config_file=config_file,options=options)

	if sys.platform == "win32":  # Print \n new lines in Windows
		import os, msvcrt
		msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)

	files = glob(options.file)
	for filename in files:
		infile = io_open(filename, encoding="utf8")
		basename = os.path.basename(filename)
		if options.docname or options.sent_id:
			docname = basename[:basename.rfind(".")]
示例#54
0
def cythonize(module_list, exclude=[], nthreads=0, aliases=None, quiet=False, force=False, language=None,
              exclude_failures=False, **options):
    """
    Compile a set of source modules into C/C++ files and return a list of distutils
    Extension objects for them.

    As module list, pass either a glob pattern, a list of glob patterns or a list of
    Extension objects.  The latter allows you to configure the extensions separately
    through the normal distutils options.

    When using glob patterns, you can exclude certain module names explicitly
    by passing them into the 'exclude' option.

    To globally enable C++ mode, you can pass language='c++'.  Otherwise, this
    will be determined at a per-file level based on compiler directives.  This
    affects only modules found based on file names.  Extension instances passed
    into cythonize() will not be changed.

    For parallel compilation, set the 'nthreads' option to the number of
    concurrent builds.

    For a broad 'try to compile' mode that ignores compilation failures and
    simply excludes the failed extensions, pass 'exclude_failures=True'. Note
    that this only really makes sense for compiling .py files which can also
    be used without compilation.

    Additional compilation options can be passed as keyword arguments.
    """
    if 'include_path' not in options:
        options['include_path'] = ['.']
    if 'common_utility_include_dir' in options:
        if options.get('cache'):
            raise NotImplementedError("common_utility_include_dir does not yet work with caching")
        if not os.path.exists(options['common_utility_include_dir']):
            os.makedirs(options['common_utility_include_dir'])
    c_options = CompilationOptions(**options)
    cpp_options = CompilationOptions(**options); cpp_options.cplus = True
    ctx = c_options.create_context()
    options = c_options
    module_list, module_metadata = create_extension_list(
        module_list,
        exclude=exclude,
        ctx=ctx,
        quiet=quiet,
        exclude_failures=exclude_failures,
        language=language,
        aliases=aliases)
    deps = create_dependency_tree(ctx, quiet=quiet)
    build_dir = getattr(options, 'build_dir', None)

    modules_by_cfile = {}
    to_compile = []
    for m in module_list:
        if build_dir:
            root = os.path.realpath(os.path.abspath(find_root_package_dir(m.sources[0])))
            def copy_to_build_dir(filepath, root=root):
                filepath_abs = os.path.realpath(os.path.abspath(filepath))
                if os.path.isabs(filepath):
                    filepath = filepath_abs
                if filepath_abs.startswith(root):
                    mod_dir = os.path.join(build_dir,
                            os.path.dirname(_relpath(filepath, root)))
                    if not os.path.isdir(mod_dir):
                        os.makedirs(mod_dir)
                    shutil.copy(filepath, mod_dir)
            for dep in m.depends:
                copy_to_build_dir(dep)

        new_sources = []
        for source in m.sources:
            base, ext = os.path.splitext(source)
            if ext in ('.pyx', '.py'):
                if m.language == 'c++':
                    c_file = base + '.cpp'
                    options = cpp_options
                else:
                    c_file = base + '.c'
                    options = c_options

                # setup for out of place build directory if enabled
                if build_dir:
                    c_file = os.path.join(build_dir, c_file)
                    dir = os.path.dirname(c_file)
                    if not os.path.isdir(dir):
                        os.makedirs(dir)

                if os.path.exists(c_file):
                    c_timestamp = os.path.getmtime(c_file)
                else:
                    c_timestamp = -1

                # Priority goes first to modified files, second to direct
                # dependents, and finally to indirect dependents.
                if c_timestamp < deps.timestamp(source):
                    dep_timestamp, dep = deps.timestamp(source), source
                    priority = 0
                else:
                    dep_timestamp, dep = deps.newest_dependency(source)
                    priority = 2 - (dep in deps.immediate_dependencies(source))
                if force or c_timestamp < dep_timestamp:
                    if not quiet:
                        if source == dep:
                            print("Compiling %s because it changed." % source)
                        else:
                            print("Compiling %s because it depends on %s." % (source, dep))
                    if not force and hasattr(options, 'cache'):
                        extra = m.language
                        fingerprint = deps.transitive_fingerprint(source, extra)
                    else:
                        fingerprint = None
                    to_compile.append((priority, source, c_file, fingerprint, quiet,
                                       options, not exclude_failures, module_metadata.get(m.name)))
                new_sources.append(c_file)
                if c_file not in modules_by_cfile:
                    modules_by_cfile[c_file] = [m]
                else:
                    modules_by_cfile[c_file].append(m)
            else:
                new_sources.append(source)
                if build_dir:
                    copy_to_build_dir(source)
        m.sources = new_sources

    if hasattr(options, 'cache'):
        if not os.path.exists(options.cache):
            os.makedirs(options.cache)
    to_compile.sort()
    # Drop "priority" component of "to_compile" entries and add a
    # simple progress indicator.
    N = len(to_compile)
    progress_fmt = "[{0:%d}/{1}] " % len(str(N))
    for i in range(N):
        progress = progress_fmt.format(i+1, N)
        to_compile[i] = to_compile[i][1:] + (progress,)

    if N <= 1:
        nthreads = 0
    if nthreads:
        # Requires multiprocessing (or Python >= 2.6)
        try:
            import multiprocessing
            pool = multiprocessing.Pool(
                nthreads, initializer=_init_multiprocessing_helper)
        except (ImportError, OSError):
            print("multiprocessing required for parallel cythonization")
            nthreads = 0
        else:
            # This is a bit more involved than it should be, because KeyboardInterrupts
            # break the multiprocessing workers when using a normal pool.map().
            # See, for example:
            # http://noswap.com/blog/python-multiprocessing-keyboardinterrupt
            try:
                result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
                pool.close()
                while not result.ready():
                    try:
                        result.get(99999)  # seconds
                    except multiprocessing.TimeoutError:
                        pass
            except KeyboardInterrupt:
                pool.terminate()
                raise
            pool.join()
    if not nthreads:
        for args in to_compile:
            cythonize_one(*args)

    if exclude_failures:
        failed_modules = set()
        for c_file, modules in modules_by_cfile.iteritems():
            if not os.path.exists(c_file):
                failed_modules.update(modules)
            elif os.path.getsize(c_file) < 200:
                f = io_open(c_file, 'r', encoding='iso8859-1')
                try:
                    if f.read(len('#error ')) == '#error ':
                        # dead compilation result
                        failed_modules.update(modules)
                finally:
                    f.close()
        if failed_modules:
            for module in failed_modules:
                module_list.remove(module)
            print("Failed compilations: %s" % ', '.join(sorted([
                module.name for module in failed_modules])))

    if hasattr(options, 'cache'):
        cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
    # cythonize() is often followed by the (non-Python-buffered)
    # compiler output, flush now to avoid interleaving output.
    sys.stdout.flush()
    return module_list
示例#55
0
文件: history.py 项目: 2t7/ipython
    def history(self, parameter_s = ''):
        """Print input history (_i<n> variables), with most recent last.

        By default, input history is printed without line numbers so it can be
        directly pasted into an editor. Use -n to show them.

        By default, all input history from the current session is displayed.
        Ranges of history can be indicated using the syntax:
        
        ``4``
            Line 4, current session
        ``4-6``
            Lines 4-6, current session
        ``243/1-5``
            Lines 1-5, session 243
        ``~2/7``
            Line 7, session 2 before current
        ``~8/1-~6/5``
            From the first line of 8 sessions ago, to the fifth line of 6
            sessions ago.
        
        Multiple ranges can be entered, separated by spaces

        The same syntax is used by %macro, %save, %edit, %rerun

        Examples
        --------
        ::

          In [6]: %history -n 4-6
          4:a = 12
          5:print a**2
          6:%history -n 4-6

        """

        args = parse_argstring(self.history, parameter_s)

        # For brevity
        history_manager = self.shell.history_manager

        def _format_lineno(session, line):
            """Helper function to format line numbers properly."""
            if session in (0, history_manager.session_number):
                return str(line)
            return "%s/%s" % (session, line)

        # Check if output to specific file was requested.
        outfname = args.filename
        if not outfname:
            outfile = io.stdout  # default
            # We don't want to close stdout at the end!
            close_at_end = False
        else:
            if os.path.exists(outfname):
                try:
                    ans = io.ask_yes_no("File %r exists. Overwrite?" % outfname)
                except StdinNotImplementedError:
                    ans = True
                if not ans:
                    print('Aborting.')
                    return
                print("Overwriting file.")
            outfile = io_open(outfname, 'w', encoding='utf-8')
            close_at_end = True

        print_nums = args.print_nums
        get_output = args.get_output
        pyprompts = args.pyprompts
        raw = args.raw

        pattern = None
        limit = None if args.limit is _unspecified else args.limit

        if args.pattern is not None:
            if args.pattern:
                pattern = "*" + " ".join(args.pattern) + "*"
            else:
                pattern = "*"
            hist = history_manager.search(pattern, raw=raw, output=get_output,
                                          n=limit, unique=args.unique)
            print_nums = True
        elif args.limit is not _unspecified:
            n = 10 if limit is None else limit
            hist = history_manager.get_tail(n, raw=raw, output=get_output)
        else:
            if args.range:      # Get history by ranges
                hist = history_manager.get_range_by_str(" ".join(args.range),
                                                        raw, get_output)
            else:               # Just get history for the current session
                hist = history_manager.get_range(raw=raw, output=get_output)

        # We could be displaying the entire history, so let's not try to pull
        # it into a list in memory. Anything that needs more space will just
        # misalign.
        width = 4

        for session, lineno, inline in hist:
            # Print user history with tabs expanded to 4 spaces.  The GUI
            # clients use hard tabs for easier usability in auto-indented code,
            # but we want to produce PEP-8 compliant history for safe pasting
            # into an editor.
            if get_output:
                inline, output = inline
            inline = inline.expandtabs(4).rstrip()

            multiline = "\n" in inline
            line_sep = '\n' if multiline else ' '
            if print_nums:
                print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width),
                        line_sep),  file=outfile, end=u'')
            if pyprompts:
                print(u">>> ", end=u"", file=outfile)
                if multiline:
                    inline = "\n... ".join(inline.splitlines()) + "\n..."
            print(inline, file=outfile)
            if get_output and output:
                print(cast_unicode_py2(output), file=outfile)

        if close_at_end:
            outfile.close()
示例#56
0
def magic_history(self, parameter_s = ''):
    """Print input history (_i<n> variables), with most recent last.

    %history [-o -p -t -n] [-f filename] [range | -g pattern | -l number]

    By default, input history is printed without line numbers so it can be
    directly pasted into an editor. Use -n to show them.

    By default, all input history from the current session is displayed.
    Ranges of history can be indicated using the syntax: 
    4      : Line 4, current session
    4-6    : Lines 4-6, current session
    243/1-5: Lines 1-5, session 243
    ~2/7   : Line 7, session 2 before current
    ~8/1-~6/5 : From the first line of 8 sessions ago, to the fifth line
                of 6 sessions ago.
    Multiple ranges can be entered, separated by spaces

    The same syntax is used by %macro, %save, %edit, %rerun

    Options:

      -n: print line numbers for each input.
      This feature is only available if numbered prompts are in use.

      -o: also print outputs for each input.

      -p: print classic '>>>' python prompts before each input.  This is useful
       for making documentation, and in conjunction with -o, for producing
       doctest-ready output.

      -r: (default) print the 'raw' history, i.e. the actual commands you typed.

      -t: print the 'translated' history, as IPython understands it.  IPython
      filters your input and converts it all into valid Python source before
      executing it (things like magics or aliases are turned into function
      calls, for example). With this option, you'll see the native history
      instead of the user-entered version: '%cd /' will be seen as
      'get_ipython().magic("%cd /")' instead of '%cd /'.

      -g: treat the arg as a pattern to grep for in (full) history.
      This includes the saved history (almost all commands ever written).
      Use '%hist -g' to show full saved history (may be very long).

      -l: get the last n lines from all sessions. Specify n as a single arg, or
      the default is the last 10 lines.

      -f FILENAME: instead of printing the output to the screen, redirect it to
       the given file.  The file is always overwritten, though *when it can*,
       IPython asks for confirmation first. In particular, running the command
       "history -f FILENAME" from the IPython Notebook interface will replace
       FILENAME even if it already exists *without* confirmation.

    Examples
    --------
    ::

      In [6]: %hist -n 4-6
      4:a = 12
      5:print a**2
      6:%hist -n 4-6

    """

    if not self.shell.displayhook.do_full_cache:
        print('This feature is only available if numbered prompts are in use.')
        return
    opts,args = self.parse_options(parameter_s,'noprtglf:',mode='string')

    # For brevity
    history_manager = self.shell.history_manager

    def _format_lineno(session, line):
        """Helper function to format line numbers properly."""
        if session in (0, history_manager.session_number):
            return str(line)
        return "%s/%s" % (session, line)

    # Check if output to specific file was requested.
    try:
        outfname = opts['f']
    except KeyError:
        outfile = io.stdout  # default
        # We don't want to close stdout at the end!
        close_at_end = False
    else:
        if os.path.exists(outfname):
            try:
                ans = io.ask_yes_no("File %r exists. Overwrite?" % outfname)
            except StdinNotImplementedError:
                ans = True
            if not ans:
                print('Aborting.')
                return
            print("Overwriting file.")
        outfile = io_open(outfname, 'w', encoding='utf-8')
        close_at_end = True

    print_nums = 'n' in opts
    get_output = 'o' in opts
    pyprompts = 'p' in opts
    # Raw history is the default
    raw = not('t' in opts)

    default_length = 40
    pattern = None

    if 'g' in opts:         # Glob search
        pattern = "*" + args + "*" if args else "*"
        hist = history_manager.search(pattern, raw=raw, output=get_output)
        print_nums = True
    elif 'l' in opts:       # Get 'tail'
        try:
            n = int(args)
        except ValueError, IndexError:
            n = 10
        hist = history_manager.get_tail(n, raw=raw, output=get_output)
示例#57
0
"""
Setup for keepa
"""
from setuptools import setup
import os
from io import open as io_open

package_name = 'keepa'

# Get version from ./_version.py
__version__ = None
version_file = os.path.join(os.path.dirname(__file__), package_name, '_version.py')

with io_open(version_file, mode='r') as fd:
    exec(fd.read())

filepath = os.path.dirname(__file__)
readme_file = os.path.join(filepath, 'README.rst')

setup(
    name=package_name,
    packages=[package_name],
    version=__version__,
    description='Interfaces with keepa.com',
    long_description=open(readme_file).read(),
    author='Alex Kaszynski',
    author_email='*****@*****.**',
    license='Apache Software License',
    classifiers=[
        'Development Status :: 5 - Production/Stable',
        'Intended Audience :: End Users/Desktop',
示例#58
0
文件: setup.py 项目: casperdcl/argopt
def parse_makefile_aliases(filepath):
    """
    Parse a makefile to find commands and substitute variables. Expects a
    makefile with only aliases and a line return between each command.

    Returns a dict, with a list of commands for each alias.
    """

    # -- Parsing the Makefile using ConfigParser
    # Adding a fake section to make the Makefile a valid Ini file
    ini_str = '[root]\n'
    with io_open(filepath, mode='r') as fd:
        ini_str = ini_str + RE_MAKE_CMD.sub('\t', fd.read())
    ini_fp = StringIO.StringIO(ini_str)
    # Parse using ConfigParser
    config = ConfigParser.RawConfigParser()
    config.readfp(ini_fp)
    # Fetch the list of aliases
    aliases = config.options('root')

    # -- Extracting commands for each alias
    commands = {}
    for alias in aliases:
        if alias.lower() in ['.phony']:
            continue
        # strip the first line return, and then split by any line return
        commands[alias] = config.get('root', alias).lstrip('\n').split('\n')

    # -- Commands substitution
    # Loop until all aliases are substituted by their commands:
    # Check each command of each alias, and if there is one command that is to
    # be substituted by an alias, try to do it right away. If this is not
    # possible because this alias itself points to other aliases , then stop
    # and put the current alias back in the queue to be processed again later.

    # Create the queue of aliases to process
    aliases_todo = list(commands.keys())
    # Create the dict that will hold the full commands
    commands_new = {}
    # Loop until we have processed all aliases
    while aliases_todo:
        # Pick the first alias in the queue
        alias = aliases_todo.pop(0)
        # Create a new entry in the resulting dict
        commands_new[alias] = []
        # For each command of this alias
        for cmd in commands[alias]:
            # Ignore self-referencing (alias points to itself)
            if cmd == alias:
                pass
            # Substitute full command
            elif cmd in aliases and cmd in commands_new:
                # Append all the commands referenced by the alias
                commands_new[alias].extend(commands_new[cmd])
            # Delay substituting another alias, waiting for the other alias to
            # be substituted first
            elif cmd in aliases and cmd not in commands_new:
                # Delete the current entry to avoid other aliases
                # to reference this one wrongly (as it is empty)
                del commands_new[alias]
                aliases_todo.append(alias)
                break
            # Full command (no aliases)
            else:
                commands_new[alias].append(cmd)
    commands = commands_new
    del commands_new

    # -- Prepending prefix to avoid conflicts with standard setup.py commands
    # for alias in commands.keys():
    #     commands['make_'+alias] = commands[alias]
    #     del commands[alias]

    return commands
示例#59
0
文件: setup.py 项目: tqdm/tqdm
except ImportError:
    from distutils.core import setup

    def find_packages(where='.'):
        # os.walk -> list[(dirname, list[subdirs], list[files])]
        return [folder.replace("/", ".").lstrip(".")
                for (folder, _, fils) in os.walk(where)
                if "__init__.py" in fils]
import sys
from io import open as io_open

# Get version from tqdm/_version.py
__version__ = None
src_dir = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(src_dir, 'tqdm', '_version.py')
with io_open(version_file, mode='r') as fd:
    exec(fd.read())

# Executing makefile commands if specified
if sys.argv[1].lower().strip() == 'make':
    import pymake
    # Filename of the makefile
    fpath = os.path.join(src_dir, 'Makefile')
    pymake.main(['-f', fpath] + sys.argv[2:])
    # Stop to avoid setup.py raising non-standard command error
    sys.exit(0)

extras_require = {}
requirements_dev = os.path.join(src_dir, 'requirements-dev.txt')
with io_open(requirements_dev, mode='r') as fd:
    extras_require['dev'] = [i.strip().split('#', 1)[0].strip()