コード例 #1
0
def update_gherkin(ctx, dry_run=False):
    """Update "gherkin-languages.json" file from cucumber-repo.

    * Download "gherkin-languages.json" from cucumber repo
    * Update "gherkin-languages.json"
    * Generate "i18n.py" file from "gherkin-languages.json"
    * Update "behave/i18n.py" file (optional; not in dry-run mode)
    """
    with cd("etc/gherkin"):
        # -- BACKUP-FILE:
        gherkin_languages_file = Path("gherkin-languages.json")
        gherkin_languages_file.copy("gherkin-languages.json.SAVED")

        print('Downloading "gherkin-languages.json" from github:cucumber ...')
        download_request = requests.get(GHERKIN_LANGUAGES_URL)
        assert download_request.ok
        print('Download finished: OK (size={0})'.format(
            len(download_request.content)))
        with open(gherkin_languages_file, "wb") as f:
            f.write(download_request.content)

        print('Generating "i18n.py" ...')
        ctx.run("./convert_gherkin-languages.py")
        ctx.run("diff i18n.py ../../behave/i18n.py")
        if not dry_run:
            print("Updating behave/i18n.py ...")
            Path("i18n.py").move("../../behave/i18n.py")
コード例 #2
0
ファイル: tango_doc.py プロジェクト: smunix/Tango-D2
def write_PDF(DIL, SRC, VERSION, TMP):
    TMP = TMP / "pdf"
    TMP.exists or TMP.mkdir()

    pdf_gen = PDFGenerator()
    pdf_gen.fetch_files(DIL, TMP)
    html_files = SRC.glob("*.html")
    html_files = filter(lambda path: path.name != "index.html", html_files)
    # Get the logo if present.
    logo_png = Path("logo_tango.png")
    if logo_png.exists:
        logo_png.copy(TMP)
        logo = "<br/>" * 2 + "<img src='%s'/>" % logo_png.name
        #logo_svg = open(logo_svg).read()
        #logo_svg = "<br/>"*2 + logo_svg[logo_svg.index("\n"):]
    else:
        logo = ''
    symlink = "http://dil.googlecode.com/svn/doc/Tango_%s" % VERSION
    params = {
        "pdf_title": "Tango %s API" % VERSION,
        "cover_title": "Tango %s<br/><b>API</b>%s" % (VERSION, logo),
        "author": "Tango Team",
        "subject": "Programming API",
        "keywords": "Tango standard library API documentation",
        "x_html": "HTML",
        "nested_toc": True,
        "symlink": symlink
    }
    pdf_gen.run(html_files, SRC / ("Tango.%s.API.pdf" % VERSION), TMP, params)
コード例 #3
0
ファイル: tango_doc.py プロジェクト: fawzi/oldTango
def write_PDF(DIL, SRC, VERSION, TMP):
  TMP = TMP/"pdf"
  TMP.exists or TMP.mkdir()

  pdf_gen = PDFGenerator()
  pdf_gen.fetch_files(DIL, TMP)
  html_files = SRC.glob("*.html")
  html_files = filter(lambda path: path.name != "index.html", html_files)
  # Get the logo if present.
  logo_png = Path("logo_tango.png")
  if logo_png.exists:
    logo_png.copy(TMP)
    logo = "<br/>"*2 + "<img src='%s'/>" % logo_png.name
    #logo_svg = open(logo_svg).read()
    #logo_svg = "<br/>"*2 + logo_svg[logo_svg.index("\n"):]
  else:
    logo = ''
  symlink = "http://dil.googlecode.com/svn/doc/Tango_%s" % VERSION
  params = {
    "pdf_title": "Tango %s API" % VERSION,
    "cover_title": "Tango %s<br/><b>API</b>%s" % (VERSION, logo),
    "author": "Tango Team",
    "subject": "Programming API",
    "keywords": "Tango standard library API documentation",
    "x_html": "HTML",
    "nested_toc": True,
    "symlink": symlink
  }
  pdf_gen.run(html_files, SRC/("Tango.%s.API.pdf"%VERSION), TMP, params)
コード例 #4
0
    def render(dir, f):
        # povray has strange file access policy,
        # better to generate under tmp

        # cli doc:
        # http://library.thinkquest.org/3285/language/cmdln.html

        templ = '#local pcb_rotate_%s = %s'
        pov = Path(f.replace('.brd', '.pov'))
        if pcb_rotate != (0, 0, 0):
            s = pov.text()
            s = s.replace(templ % ('x', 0), templ % ('x', pcb_rotate[0]))
            s = s.replace(templ % ('y', 0), templ % ('y', pcb_rotate[1]))
            s = s.replace(templ % ('z', 0), templ % ('z', pcb_rotate[2]))
            pov.write_text(s)
        fpng = Path(f.replace('.brd', '.png'))
        cmd = []
        cmd += ["povray"]
        cmd += ["-d"]  # no display
        cmd += ["-a"]  # anti-aliasing
        cmd += ['+W' + str(size[0])]  # width
        cmd += ['+H' + str(size[1])]  # height
        cmd += ['-o' + fpng]
        cmd += ['-L' + eagle3d]
        cmd += [pov]
        p = Proc(cmd).call()
        if not fpng.exists():
            raise EagleError('povray error, proc=%s' % p)
        fpng.copy(output)
コード例 #5
0
    def pre_setup(self):
        """ Make a copy of at the ini files and set the port number and host in the new testing.ini
        """
        self.working_config = self.workspace / self.config_filename

        # We need the other ini files as well here as they may be chained
        for filename in glob.glob(os.path.join(self.config_dir, '*.ini')):
            shutil.copy(filename, self.workspace)

        Path.copy(self.original_config, self.working_config)

        parser = configparser.ConfigParser()
        parser.read(self.original_config)
        parser.set('server:main', 'port', str(self.port))
        parser.set('server:main', 'host', self.hostname)
        [
            parser.set(section, k, v)
            for section, cfg in self.extra_config_vars.items()
            for (k, v) in cfg.items()
        ]
        with open(str(self.working_config), 'w') as fp:
            parser.write(fp)

        # Set the uri to be the external hostname and the url prefix
        self._uri = "http://%s:%s/%s" % (os.uname()[1], self.port,
                                         parser.get('app:main', 'url_prefix'))
コード例 #6
0
ファイル: utils.py プロジェクト: juju-solutions/jujubigdata
def get_ssh_key(user):
    generate_ssh_key(user)
    # allow ssh'ing to localhost; useful for things like start_dfs.sh
    authfile = ssh_key_dir(user) / 'authorized_keys'
    if not authfile.exists():
        Path.copy(ssh_pub_key(user), authfile)
    return ssh_pub_key(user).text()
コード例 #7
0
 def build(self, dockerfile):
     dockerfile = Path(dockerfile)
     tag = 'rf-' + dockerfile.basename().replace('.dkf', '')
     dockerfile.copy('redfish-client/tests/Dockerfile')
     response = [line for line in self.cli.build(
         path='redfish-client/tests',
         tag=tag,
         rm=True)]
     return(response)
コード例 #8
0
 def build(self, dockerfile):
     dockerfile = Path(dockerfile)
     tag = 'rf' + dockerfile.basename().replace('Dockerfile.', '')
     dockerfile.copy('redfish-client/tests/Dockerfile')
     response = [line for line in self.cli.build(
         path='redfish-client/tests',
         tag=tag,
         rm=True)]
     return(response)
コード例 #9
0
    def copyfiles(self, skip_sky=False, skip_pattern=False, skip_opt=False):
        d = self.tempdir

        if os.path.exists(self.scene):
            fn = Path(self.scene)
            fn.copy(d / fn.basename())
        else:
            fn = d / 'cscene.can'
            fn.write_text(self.scene)
        self.scene = Path(fn.basename())

        if not skip_sky:
            if os.path.exists(self.sky):
                fn = Path(self.sky)
                fn.copy(d / fn.basename())
            else:
                fn = d / 'sky.light'
                fn.write_text(self.sky)
            self.sky = Path(fn.basename())

        if not skip_pattern:
            if self.infinity:
                if os.path.exists(self.pattern):
                    fn = Path(self.pattern)
                    fn.copy(d / fn.basename())
                else:
                    fn = d / 'pattern.8'
                    fn.write_text(self.pattern)
                self.pattern = Path(fn.basename())

        if self.sensor is not None:
            if os.path.exists(self.sensor):
                fn = Path(self.sensor)
                fn.copy(d / fn.basename())
            else:
                fn = d / 'sensor.can'
                fn.write_text(self.sensor)
            self.sensor = Path(fn.basename())

        if not skip_opt:
            optn = map(lambda (x): x + '.opt', _safe_iter(self.optnames))
            try:
                for i, opt in enumerate(_safe_iter(self.opticals)):
                    # safe_iter allows not to iterate along character composing the optfile name when only one optfile is given
                    if os.path.exists(opt):
                        # print opt
                        fn = Path(opt)
                        fn.copy(d / optn[i])
                    else:
                        fn = d / optn[i]
                        fn.write_text(opt)
                self.opticals = map(Path, _safe_iter(optn))
            except IndexError:
                raise CaribuOptionError(
                    "Optnames list must be None or as long as optfiles list")
コード例 #10
0
    def copyfiles(self, skip_sky=False, skip_pattern=False, skip_opt=False):
        d = self.tempdir

        if os.path.exists(self.scene):
            fn = Path(self.scene)
            fn.copy(d / fn.basename())
        else:
            fn = d / 'cscene.can'
            fn.write_text(self.scene)
        self.scene = Path(fn.basename())

        if not skip_sky:
            if os.path.exists(self.sky):
                fn = Path(self.sky)
                fn.copy(d / fn.basename())
            else:
                fn = d / 'sky.light'
                fn.write_text(self.sky)
            self.sky = Path(fn.basename())

        if not skip_pattern:
            if self.infinity:
                if os.path.exists(self.pattern):
                    fn = Path(self.pattern)
                    fn.copy(d / fn.basename())
                else:
                    fn = d / 'pattern.8'
                    fn.write_text(self.pattern)
                self.pattern = Path(fn.basename())

        if self.sensor is not None:
            if os.path.exists(self.sensor):
                fn = Path(self.sensor)
                fn.copy(d / fn.basename())
            else:
                fn = d / 'sensor.can'
                fn.write_text(self.sensor)
            self.sensor = Path(fn.basename())

        if not skip_opt:
            optn = map(lambda (x): x + '.opt', _safe_iter(self.optnames))
            try:
                for i, opt in enumerate(_safe_iter(self.opticals)):
                    # safe_iter allows not to iterate along character composing the optfile name when only one optfile is given
                    if os.path.exists(opt):
                        # print opt
                        fn = Path(opt)
                        fn.copy(d / optn[i])
                    else:
                        fn = d / optn[i]
                        fn.write_text(opt)
                self.opticals = map(Path, _safe_iter(optn))
            except IndexError:
                raise CaribuOptionError("Optnames list must be None or as long as optfiles list")
コード例 #11
0
    def configure_examples(self):
        """
        Install sparkpi.sh and sample data to /home/ubuntu.

        The sparkpi.sh script demonstrates spark-submit with the SparkPi class
        included with Spark. This small script is packed into the spark charm
        source in the ./scripts subdirectory.

        The sample data is used for benchmarks (only PageRank for now). This
        may grow quite large in the future, so we utilize Juju Resources for
        getting this data onto the unit. Sample data originated as follows:

        - PageRank: https://snap.stanford.edu/data/web-Google.html
        """
        # Handle sparkpi.sh
        script_source = 'scripts/sparkpi.sh'
        script_path = Path(script_source)
        if script_path.exists():
            script_target = '/home/ubuntu/sparkpi.sh'
            new_hash = host.file_hash(script_source)
            old_hash = unitdata.kv().get('sparkpi.hash')
            if new_hash != old_hash:
                hookenv.log('Installing SparkPi script')
                script_path.copy(script_target)
                Path(script_target).chmod(0o755)
                Path(script_target).chown('ubuntu', 'hadoop')
                unitdata.kv().set('sparkpi.hash', new_hash)
                hookenv.log('SparkPi script was installed successfully')

        # Handle sample data
        sample_source = hookenv.resource_get('sample-data')
        sample_path = sample_source and Path(sample_source)
        if sample_path and sample_path.exists() and sample_path.stat().st_size:
            sample_target = '/home/ubuntu'
            new_hash = host.file_hash(sample_source)
            old_hash = unitdata.kv().get('sample-data.hash')
            if new_hash != old_hash:
                hookenv.log('Extracting Spark sample data')
                # Extract the sample data; since sample data does not impact
                # functionality, log any extraction error but don't fail.
                try:
                    archive.extract(sample_path, destpath=sample_target)
                except Exception:
                    hookenv.log(
                        'Unable to extract Spark sample data: {}'.format(
                            sample_path))
                else:
                    unitdata.kv().set('sample-data.hash', new_hash)
                    hookenv.log('Spark sample data was extracted successfully')
コード例 #12
0
ファイル: utils.py プロジェクト: ktsakalozos/jujubigdata
def get_ssh_key(user):
    sshdir = Path("/home/%s/.ssh" % user)
    if not sshdir.exists():
        host.mkdir(sshdir, owner=user, group="hadoop", perms=0o755)
    keyfile = sshdir / "id_rsa"
    pubfile = sshdir / "id_rsa.pub"
    authfile = sshdir / "authorized_keys"
    if not pubfile.exists():
        (sshdir / "config").write_lines(["Host *", "    StrictHostKeyChecking no"], append=True)
        check_call(["ssh-keygen", "-t", "rsa", "-P", "", "-f", keyfile])
        host.chownr(sshdir, user, "hadoop")
    # allow ssh'ing to localhost; useful for things like start_dfs.sh
    if not authfile.exists():
        Path.copy(pubfile, authfile)
    return pubfile.text()
コード例 #13
0
def select_data(root, output):
    data_dir = Path(root) / 'whole_data'
    output = Path(output)
    images = data_dir.files('*.png')
    sample_images = random.sample(images, 20)
    for img in sample_images:
        img.copy(output / img.name)
        velodyne_name = img.stem + '.bin'
        velodyne_path = data_dir / velodyne_name
        velodyne_path.copy(output / velodyne_name)

    cam2cam = Path(root) / 'calib_cam_to_cam.txt'
    velo2cam = Path(root) / 'calib_velo_to_cam.txt'
    cam2cam.copy(output / 'calib_cam_to_cam.txt')
    velo2cam.copy(output / 'calib_velo_to_cam.txt')
コード例 #14
0
def cfy(module_tmpdir, logger):
    os.environ['CFY_WORKDIR'] = module_tmpdir
    logger.info('CFY_WORKDIR is set to %s', module_tmpdir)
    # Copy CLI configuration file if exists in home folder
    # this way its easier to customize the configuration when running
    # tests locally.
    cli_config_path = Path(os.path.expanduser('~/.cloudify/config.yaml'))
    if cli_config_path.exists():
        logger.info('Using CLI configuration file from: %s', cli_config_path)
        new_cli_config_dir = module_tmpdir / '.cloudify'
        new_cli_config_dir.mkdir()
        cli_config_path.copy(new_cli_config_dir / 'config.yaml')
    cfy = util.sh_bake(sh.cfy)
    cfy(['--version'])
    return cfy
コード例 #15
0
    def set_values_and_compare(
            self,
            rhythmdb_without_cout_rating: Path,
            itunes_library_path: Path,
            expected_rhythmboxdb: Path,
            output_file_name: str,
            assert_something_was_changed: bool,
            itunes_library_root: str = "D:/Music/",
            rhythmbox_library_root: str = "/home/pha/Music/"
    ) -> IntegrationLog:
        target_rhythmdb = self.target_folder.joinpath(output_file_name)
        rhythmdb_without_cout_rating.copy(target_rhythmdb)
        itunes_library = str(itunes_library_path)
        songs = itunes_library_reader.read_songs(itunes_library)
        log = rhythmbox_count_rating_integrator.set_values(
            itunes_songs=songs,
            target_rhythmdb=target_rhythmdb,
            itunes_library_root=itunes_library_root,
            rhythmbox_library_root=rhythmbox_library_root)
        print("Expect something has changed: {}".format(
            assert_something_was_changed))
        if assert_something_was_changed:
            self.assertTrue(
                log.something_was_changed(),
                "No song entries was changed! But they should be!")
        else:
            self.assertFalse(
                log.something_was_changed(),
                "A song entries was changed! But they shouldn't be!")

        print("Compare content of {} (actual) with {} (expected)".format(
            target_rhythmdb, expected_rhythmboxdb))
        with expected_rhythmboxdb.open(
                mode="r", encoding="UTF-8"
        ) as expected_rhythmboxdb_opened, target_rhythmdb.open(
                "r") as target_rhythmdb_opened:
            actual_playlist_xml = target_rhythmdb_opened.read()
            expected_playlist_xml = expected_rhythmboxdb_opened.read()
        # comparing xml is a pain. simple string comparision doesn't work due to different tag order and formatting (newline after each tag or not).
        # so let's sort each character in both xml strings. this leads to rubbish. but if the sorted rubbish is equal, the origin is xml is very likely to be equal.
        actual_playlist_xml_normalized = sort_and_clean(actual_playlist_xml)
        expected_playlist_xml_normalized = sort_and_clean(
            expected_playlist_xml)
        self.assertEqual(
            actual_playlist_xml_normalized, expected_playlist_xml_normalized,
            "Normalized content of {} and {} are different!".format(
                expected_rhythmboxdb, target_rhythmdb))
        return log
コード例 #16
0
ファイル: bigtop_spark.py プロジェクト: apache/bigtop
    def configure_examples(self):
        """
        Install sparkpi.sh and sample data to /home/ubuntu.

        The sparkpi.sh script demonstrates spark-submit with the SparkPi class
        included with Spark. This small script is packed into the spark charm
        source in the ./scripts subdirectory.

        The sample data is used for benchmarks (only PageRank for now). This
        may grow quite large in the future, so we utilize Juju Resources for
        getting this data onto the unit. Sample data originated as follows:

        - PageRank: https://snap.stanford.edu/data/web-Google.html
        """
        # Handle sparkpi.sh
        script_source = 'scripts/sparkpi.sh'
        script_path = Path(script_source)
        if script_path.exists():
            script_target = '/home/ubuntu/sparkpi.sh'
            new_hash = host.file_hash(script_source)
            old_hash = unitdata.kv().get('sparkpi.hash')
            if new_hash != old_hash:
                hookenv.log('Installing SparkPi script')
                script_path.copy(script_target)
                Path(script_target).chmod(0o755)
                Path(script_target).chown('ubuntu', 'hadoop')
                unitdata.kv().set('sparkpi.hash', new_hash)
                hookenv.log('SparkPi script was installed successfully')

        # Handle sample data
        sample_source = hookenv.resource_get('sample-data')
        sample_path = sample_source and Path(sample_source)
        if sample_path and sample_path.exists() and sample_path.stat().st_size:
            sample_target = '/home/ubuntu'
            new_hash = host.file_hash(sample_source)
            old_hash = unitdata.kv().get('sample-data.hash')
            if new_hash != old_hash:
                hookenv.log('Extracting Spark sample data')
                # Extract the sample data; since sample data does not impact
                # functionality, log any extraction error but don't fail.
                try:
                    archive.extract(sample_path, destpath=sample_target)
                except Exception:
                    hookenv.log('Unable to extract Spark sample data: {}'
                                .format(sample_path))
                else:
                    unitdata.kv().set('sample-data.hash', new_hash)
                    hookenv.log('Spark sample data was extracted successfully')
コード例 #17
0
    def build_arscons(self, sources=None):
        # TODO: remove tempdir
        tempdir = tmpdir(dir=tmpdir())

        SConstruct = Path(__file__).parent / 'SConstruct'
        SConstruct.copy(tempdir / 'SConstruct')

        allfiles = self.setup_sources(tempdir, sources)
        projname = self.guess_projname(allfiles)
        tempdir = rename(tempdir, tempdir.parent / projname)
        cmd = self.command_list()

        self.proc = Proc(cmd, cwd=tempdir).call()
        if not self.ok:
            raise ArduinoCompileError(cmd, sources, self.error_text)
        self.output = tempdir.files('*.elf')[0]
コード例 #18
0
ファイル: arduino.py プロジェクト: ponty/pyavrutils
    def build_arscons(self, sources=None):
        # TODO: remove tempdir
        tempdir = tmpdir(dir=tmpdir())

        SConstruct = Path(__file__).parent / "SConstruct"
        SConstruct.copy(tempdir / "SConstruct")

        allfiles = self.setup_sources(tempdir, sources)
        projname = self.guess_projname(allfiles)
        tempdir = rename(tempdir, tempdir.parent / projname)
        cmd = self.command_list()

        self.proc = Proc(cmd, cwd=tempdir).call()
        if not self.ok:
            raise ArduinoCompileError(cmd, sources, self.error_text)
        self.output = tempdir.files("*.elf")[0]
コード例 #19
0
    def get_background_path(self, background_name, file_name):
        """Get the accurate path of one background

        Args:
            file_name (str): name of the background file.

        Returns:
            path.Path: absolute path to the background file.
        """
        # trying to load from custom directory
        if self.directory:
            file_path = self.directory / file_name
            if file_path.exists():
                logger.debug(
                    "Loading custom %s background file '%s'", background_name, file_name
                )
                return file_path.copy(self.destination)

        # trying to load from package by default
        try:
            with path(self.package, file_name) as file:
                logger.debug(
                    "Loading default %s background file '%s'",
                    background_name,
                    file_name,
                )
                file_path = Path(file)
                return file_path.copy(self.destination)

        except FileNotFoundError as error:
            raise BackgroundNotFoundError(
                f"No {background_name} background file found for '{file_name}'"
            ) from error
コード例 #20
0
ファイル: path_learn.py プロジェクト: thinker3/py_learn
def path_file_operations():
    testdir = pathpy('~/testdir').expanduser()
    print type(testdir)

    assert isinstance(testdir, pathpy)
    testdir = testdir.normpath()
    print type(testdir)

    abspath = testdir.abspath()
    basename = testdir.basename()
    print type(abspath)
    print type(basename)

    print testdir[2:]
    print 'test' in basename

    if not testdir.exists():
        testdir.makedirs()

    # TypeError: unbound method expanduser() must be called with Path instance
    # as first argument (got str instance instead)
    #print pathpy.expanduser('~/temp')

    testfile = pathpy('~/testfile').expanduser()
    assert isinstance(testfile, pathpy)

    if not testfile.exists():
        testfile.touch()
        testfile.move(testdir)

    testfile = pathpy('~/testfile2').expanduser()
    if not testfile.exists():
        testfile.touch()
        pathpy.move(testfile, testdir)

    testfile = pathpy('~/testfile3').expanduser()
    if not testfile.exists():
        testfile.touch()
        r = pathpy.copy(testfile, testdir)
        print 'New path is %s' % r  # None
        #testfile.rename('abcd')  # move to the working folder and renamed

        # TypeError: descriptor 'join' requires a 'unicode' object but received a 'list'
        #new_name = pathpy.join([testfile.dirname(), 'abcd'])

        new_name = pathpy.join(testfile.dirname(), 'abcd')  # problematic
        print new_name  # a/Users/kenb/Users/kenc/Users/kend

        new_name = os.path.join(testfile.dirname(), 'abcd')
        print new_name
        testfile.rename(new_name)  # no error even the file exists

    testfile.remove_p()

    for f in testdir.files():
        assert isinstance(f, pathpy)
        f.remove()
    testdir.rmdir()
コード例 #21
0
def path_file_operations():
    testdir = pathpy('~/testdir').expanduser()
    print(type(testdir))

    assert isinstance(testdir, pathpy)
    testdir = testdir.normpath()
    print(type(testdir))

    abspath = testdir.abspath()
    basename = testdir.basename()
    print(type(abspath))
    print(type(basename))

    print(testdir[2:])
    print('test' in basename)

    if not testdir.exists():
        testdir.makedirs()

    # TypeError: unbound method expanduser() must be called with Path instance
    # as first argument (got str instance instead)
    #print pathpy.expanduser('~/temp')

    testfile = pathpy('~/testfile').expanduser()
    assert isinstance(testfile, pathpy)

    if not testfile.exists():
        testfile.touch()
        testfile.move(testdir)

    testfile = pathpy('~/testfile2').expanduser()
    if not testfile.exists():
        testfile.touch()
        pathpy.move(testfile, testdir)

    testfile = pathpy('~/testfile3').expanduser()
    if not testfile.exists():
        testfile.touch()
        r = pathpy.copy(testfile, testdir)
        print('New path is %s' % r)  # None
        #testfile.rename('abcd')  # move to the working folder and renamed

        # TypeError: descriptor 'join' requires a 'unicode' object but received a 'list'
        #new_name = pathpy.join([testfile.dirname(), 'abcd'])

        new_name = pathpy.join(testfile.dirname(), 'abcd')  # problematic
        print(new_name)  # a/Users/kenb/Users/kenc/Users/kend

        new_name = os.path.join(testfile.dirname(), 'abcd')
        print(new_name)
        testfile.rename(new_name)  # no error even the file exists

    testfile.remove_p()

    for f in testdir.files():
        assert isinstance(f, pathpy)
        f.remove()
    testdir.rmdir()
コード例 #22
0
ファイル: utils.py プロジェクト: ktsakalozos/jujubigdata-dev
def get_ssh_key(user):
    sshdir = Path('/home/%s/.ssh' % user)
    if not sshdir.exists():
        host.mkdir(sshdir, owner=user, group='hadoop', perms=0o755)
    keyfile = sshdir / 'id_rsa'
    pubfile = sshdir / 'id_rsa.pub'
    authfile = sshdir / 'authorized_keys'
    if not pubfile.exists():
        (sshdir / 'config').write_lines([
            'Host *',
            '    StrictHostKeyChecking no'
        ], append=True)
        check_call(['ssh-keygen', '-t', 'rsa', '-P', '', '-f', keyfile])
        host.chownr(sshdir, user, 'hadoop')
    # allow ssh'ing to localhost; useful for things like start_dfs.sh
    if not authfile.exists():
        Path.copy(pubfile, authfile)
    return pubfile.text()
コード例 #23
0
    def pre_setup(self):
        """ Make a copy of at the ini files and set the port number and host in the new testing.ini
        """
        self.working_config = self.workspace / self.config_filename

        # We need the other ini files as well here as they may be chained
        for filename in glob.glob(os.path.join(self.config_dir, '*.ini')):
            shutil.copy(filename, self.workspace)

        Path.copy(self.original_config, self.working_config)

        parser = configparser.ConfigParser()
        parser.read(self.original_config)
        parser.set('server:main', 'port', str(self.port))
        parser.set('server:main', 'host', self.hostname)
        [parser.set(section, k, v) for section, cfg in self.extra_config_vars.items() for (k, v) in cfg.items()]
        with open(str(self.working_config), 'w') as fp:
            parser.write(fp)

        # Set the uri to be the external hostname and the url prefix
        self._uri = "http://%s:%s/%s" % (os.uname()[1], self.port, parser.get('app:main', 'url_prefix'))
コード例 #24
0
ファイル: develop.py プロジェクト: nsone/behave-fork
def update_gherkin(ctx, dry_run=False, verbose=False):
    """Update "gherkin-languages.json" file from cucumber-repo.

    * Download "gherkin-languages.json" from cucumber repo
    * Update "gherkin-languages.json"
    * Generate "i18n.py" file from "gherkin-languages.json"
    * Update "behave/i18n.py" file (optional; not in dry-run mode)
    """
    with cd("etc/gherkin"):
        # -- BACKUP-FILE:
        gherkin_languages_file = Path("gherkin-languages.json")
        gherkin_languages_file.copy("gherkin-languages.json.SAVED")

        print('Downloading "gherkin-languages.json" from github:cucumber ...')
        download_request = requests.get(GHERKIN_LANGUAGES_URL)
        assert download_request.ok
        print('Download finished: OK (size={0})'.format(
            len(download_request.content)))
        with open(gherkin_languages_file, "wb") as f:
            f.write(download_request.content)

        print('Generating "i18n.py" ...')
        ctx.run("./convert_gherkin-languages.py")

        # -- DIFF: Returns normally w/ non-zero exitcode => NEEDS: warn=True
        languages_have_changed = False
        result = ctx.run("diff i18n.py ../../behave/i18n.py",
                         warn=True,
                         hide=True)
        languages_have_changed = not result.ok
        if verbose and languages_have_changed:
            # -- SHOW DIFF:
            print(result.stdout)

        if not languages_have_changed:
            print("NO_CHANGED: gherkin-languages.json")
        elif not dry_run:
            print("Updating behave/i18n.py ...")
            Path("i18n.py").move("../../behave/i18n.py")
コード例 #25
0
    def run_one(
        self,
        idf: Union[Path, eppy_IDF, str],
        epw_file: Path,
        backup_strategy: str = "on_error",
        backup_dir: Path = "./backup",
        simulation_name: Optional[str] = None,
        custom_process: Optional[Callable[[Simulation], None]] = None,
        version_mismatch_action: str = "raise",
    ) -> Simulation:
        """Run an EnergyPlus simulation with the provided idf and weather file.

        The IDF can be either a filename or an eppy IDF
        object.

        This function is process safe (as opposite as the one available in `eppy`).

        Arguments:
            idf {Union[Path, eppy_IDF, str]} -- idf file as filename or eppy IDF object.
            epw_file {Path} -- Weather file emplacement.

        Keyword Arguments:
            backup_strategy {str} -- when to save the files generated by e+
                (either"always", "on_error" or None) (default: {"on_error"})
            backup_dir {Path} -- where to save the files generated by e+
                (default: {"./backup"})
            simulation_name {str, optional} -- The simulation name. A random will be
                generated if not provided.
            custom_process {Callable[[Simulation], None], optional} -- overwrite the
                simulation post - process. Used to customize how the EnergyPlus files
                are treated after the simulation, but before cleaning the folder.
            version_mismatch_action {str} -- should be either ["raise", "warn",
                "ignore"] (default: {"raise"})

        Returns:
            Simulation -- the simulation object
        """
        if simulation_name is None:
            simulation_name = generate_slug()

        if backup_strategy not in ["on_error", "always", None]:
            raise ValueError(
                "`backup_strategy` argument should be either 'on_error', 'always'"
                " or None.")
        backup_dir = Path(backup_dir)

        with tempdir(prefix="energyplus_run_", dir=self.temp_dir) as td:
            if isinstance(idf, eppy_IDF):
                idf = idf.idfstr()
                idf_file = td / "eppy_idf.idf"
                with open(idf_file, "w") as idf_descriptor:
                    idf_descriptor.write(idf)
            else:
                idf_file = idf
                if version_mismatch_action in ["raise", "warn"]:
                    self.check_version_compat(
                        idf_file,
                        version_mismatch_action=version_mismatch_action)
            idf_file, epw_file = (Path(f).abspath()
                                  for f in (idf_file, epw_file))
            with td:
                logger.debug((idf_file, epw_file, td))
                if idf_file not in td.files():
                    idf_file.copy(td)
                epw_file.copy(td)
                sim = Simulation(
                    simulation_name,
                    self.eplus_bin,
                    idf_file,
                    epw_file,
                    self.idd_file,
                    working_dir=td,
                    post_process=custom_process,
                )
                try:
                    sim.run()
                except (ProcessExecutionError, KeyboardInterrupt):
                    if backup_strategy == "on_error":
                        sim.backup(backup_dir)
                    raise
                finally:
                    if backup_strategy == "always":
                        sim.backup(backup_dir)

        return sim
コード例 #26
0
def post(request, obj):
    try:
        data = request.POST or json.loads(request.body)["body"]
    except RawPostDataException:
        data = request.POST
    tags = data.get("tags", "").split(",")
    resetthumbnail = data.get("reset-thumbnail", False)
    crop = data.get("crop")
    res = Result()

    for tag in tags:
        try:
            t = Tag.objects.get(pk=int(tag))
        except ValueError:
            t, created = Tag.objects.get_or_create(name=tag)
            if created:
                res.append(t.json())
        obj.tags.add(t)

    if obj.custom_thumbnail and (crop or request.FILES or resetthumbnail):
        try:
            os.unlink(getRoot() / obj.custom_thumbnail.name)
        except OSError:
            pass

    if crop:
        box = [int(_) for _ in crop]
        # -- Handle thumbnail upload
        source = Path(obj.source.name)
        relativedest = obj.getPath(True) / "{:.0f}{}".format(
            time.time(), source.ext
        )
        dest = getRoot() / relativedest
        source = getRoot() / source
        if not dest.parent.exists():
            dest.parent.makedirs()
        source.copy(dest)
        obj.custom_thumbnail = relativedest

        image = pilImage.open(dest)

        # Crop from center
        image = image.crop(box)
        image.load()
        # Resize
        image.thumbnail(
            (FROG_THUMB_SIZE, FROG_THUMB_SIZE), pilImage.ANTIALIAS
        )
        image.save(dest)

        obj.save()

    if request.FILES:
        # -- Handle thumbnail upload
        f = request.FILES.get("file")
        relativedest = obj.getPath(True) / f.name
        dest = getRoot() / relativedest
        handle_uploaded_file(dest, f)
        obj.custom_thumbnail = relativedest

        try:
            if dest.ext == ".psd":
                image = psd_tools.PSDLoad(dest).as_PIL()
            else:
                image = pilImage.open(dest)
        except IOError as err:
            res.isError = True
            res.message = "{} is not a supported thumbnail image type".format(
                f.name
            )
            return JsonResponse(res.asDict())

        box, width, height = cropBox(*image.size)
        # Resize
        image.thumbnail((width, height), pilImage.ANTIALIAS)
        # Crop from center
        box = cropBox(*image.size)[0]
        image.crop(box).save(dest)

        obj.save()

    if resetthumbnail:
        obj.custom_thumbnail = None
        obj.save()

    res.value = obj.json()

    return JsonResponse(res.asDict())
コード例 #27
0
ファイル: job.py プロジェクト: mfiers/kea3
    def get_template(self):
        """
        Find, copy and load the template
        """
        if '~' in self.template:
            self.template = str(Path(self.template).expanduser())

        if Path(self.template).isdir():
            if self.transient:
                lg.error("must specify a template name or file, not a dir")
                lg.error("when in transient mode")
                exit(-1)

            k3dir = Path(self.template) / 'k3'
            subdirs = k3dir.dirs() if k3dir.exists() else []
            if len(subdirs) == 0:
                lg.warning("No template found")
                exit(-1)
            if len(subdirs) > 1:
                lg.error("Multiple templates found: %s",
                         ", ".join([x.basename() for x in subdirs]))
                lg.error("Please specify one")
                exit()
            self.name = subdirs[0].basename()
            lg.info('template "%s" found in  %s', self.name, self.template)

            if not self.template == '.':
                # not pointing at the current folder
                # copy the template (& argument file)
                template_file = Path(self.template) / 'k3' / \
                    self.name / 'template.k3'

                if template_file != self.template_file:
                    template_file.copy(self.template_file)

        elif not self.template.endswith('.k3') and \
                re.match('[A-Za-z_]\w*', self.template):
            if (Path('k3') / self.template / 'template.k3').exists():
                if self.transient:
                    lg.error("Not possible to use k3/template")
                    lg.error("when in transient mode")
                    exit(-1)

                # the template is present
                self.name = self.template
            else:
                template_file = Path(self.app.conf['default_template_dir'])\
                    .expanduser() / ('%s.k3' % self.template)
                if not template_file.exists():
                    lg.error("Cannot find template")
                    exit(-1)
                self.name = self.template
                self.retrieve_template_file(template_file)

        elif Path(self.template).exists() and self.template.endswith('.k3'):
            # template points to a file - get it
            lg.debug("Found template file: %s", self.template)
            template_file = Path(self.template)
            self.name = template_file.basename().replace('.k3', '')
            self.retrieve_template_file(template_file)
        else:
            raise NotImplementedError("Need other source for templates: %s",
                                      self.template)

        lg.debug("Template name is: %s", self.name)
        if not self.template_file.exists():
            lg.error("No valid template found in %s", self.workdir)
            exit(-1)

        lg.debug("Found template: %s", self.name)

        self.ctx['template']['name'] = self.name
コード例 #28
0
def test_sources():
    output = subprocess.check_output(["python", "setup.py", "sdist"])
    search = re.search(r"removing '(\S+)'", str(output))
    filename = Path('dist/' + search.group(1) + '.tar.gz')
    filename.copy('redfish-client/tests/python-redfish.src.tar.gz')
    assert Path('redfish-client/tests/python-redfish.src.tar.gz').isfile()
コード例 #29
0
def test_sources():
    output = subprocess.check_output(["python", "setup.py", "sdist"])
    search = re.search(r"removing '(\S+)'", str(output))
    filename = Path('dist/' + search.group(1) + '.tar.gz')
    filename.copy('redfish-client/tests/python-redfish.src.tar.gz')
    assert Path('redfish-client/tests/python-redfish.src.tar.gz').isfile()
コード例 #30
0
ファイル: job.py プロジェクト: mfiers/kea3
    def get_template(self):
        """
        Find, copy and load the template
        """
        if '~' in self.template:
            self.template = str(Path(self.template).expanduser())

        if Path(self.template).isdir():
            if self.transient:
                lg.error("must specify a template name or file, not a dir")
                lg.error("when in transient mode")
                exit(-1)

            k3dir = Path(self.template) / 'k3'
            subdirs = k3dir.dirs() if k3dir.exists() else []
            if len(subdirs) == 0:
                lg.warning("No template found")
                exit(-1)
            if len(subdirs) > 1:
                lg.error("Multiple templates found: %s",
                         ", ".join([x.basename() for x in subdirs]))
                lg.error("Please specify one")
                exit()
            self.name = subdirs[0].basename()
            lg.info('template "%s" found in  %s', self.name, self.template)

            if not self.template == '.':
                # not pointing at the current folder
                # copy the template (& argument file)
                template_file = Path(self.template) / 'k3' / \
                    self.name / 'template.k3'

                if template_file != self.template_file:
                    template_file.copy(self.template_file)

        elif not self.template.endswith('.k3') and \
                re.match('[A-Za-z_]\w*', self.template):
            if (Path('k3') / self.template / 'template.k3').exists():
                if self.transient:
                    lg.error("Not possible to use k3/template")
                    lg.error("when in transient mode")
                    exit(-1)

                # the template is present
                self.name = self.template
            else:
                template_file = Path(self.app.conf['default_template_dir'])\
                    .expanduser() / ('%s.k3' % self.template)
                if not template_file.exists():
                    lg.error("Cannot find template")
                    exit(-1)
                self.name = self.template
                self.retrieve_template_file(template_file)

        elif Path(self.template).exists() and self.template.endswith('.k3'):
            # template points to a file - get it
            lg.debug("Found template file: %s", self.template)
            template_file = Path(self.template)
            self.name = template_file.basename().replace('.k3', '')
            self.retrieve_template_file(template_file)
        else:
            raise NotImplementedError("Need other source for templates: %s",
                                      self.template)

        lg.debug("Template name is: %s", self.name)
        if not self.template_file.exists():
            lg.error("No valid template found in %s", self.workdir)
            exit(-1)

        lg.debug("Found template: %s", self.name)

        self.ctx['template']['name'] = self.name
コード例 #31
0
ファイル: piece.py プロジェクト: krashman/Frog
def post(request, obj):
    try:
        data = request.POST or json.loads(request.body)['body']
    except RawPostDataException:
        data = request.POST
    tags = data.get('tags', '').split(',')
    resetthumbnail = data.get('reset-thumbnail', False)
    crop = data.get('crop')
    res = Result()

    for tag in tags:
        try:
            t = Tag.objects.get(pk=int(tag))
        except ValueError:
            t, created = Tag.objects.get_or_create(name=tag)
            if created:
                res.append(t.json())
        obj.tags.add(t)

    if obj.custom_thumbnail and (crop or request.FILES or resetthumbnail):
        try:
            os.unlink(getRoot() / obj.custom_thumbnail.name)
        except OSError:
            pass

    if crop:
        box = [int(_) for _ in crop]
        # -- Handle thumbnail upload
        source = Path(obj.source.name)
        relativedest = source.parent / '{:.0f}{}'.format(
            time.time(), source.ext)
        dest = getRoot() / relativedest
        source = getRoot() / source
        source.copy(dest)
        obj.custom_thumbnail = relativedest

        image = pilImage.open(dest)

        # Crop from center
        image = image.crop(box)
        image.load()
        # Resize
        size = abs(box[2] - box[0])
        image.thumbnail((FROG_THUMB_SIZE, FROG_THUMB_SIZE), pilImage.ANTIALIAS)
        image.resize((size, size)).save(dest)

        obj.save()

    if request.FILES:
        # -- Handle thumbnail upload
        f = request.FILES.get('file')
        relativedest = Path(obj.source.name).parent / f.name
        dest = getRoot() / relativedest
        handle_uploaded_file(dest, f)
        obj.custom_thumbnail = relativedest

        image = pilImage.open(dest)
        sizeinterface = namedtuple('sizeinterface', 'width,height')
        size = sizeinterface(*image.size)
        box, width, height = cropBox(size)
        # Resize
        image.thumbnail((width, height), pilImage.ANTIALIAS)
        # Crop from center
        image.crop(box).save(dest)

        obj.save()

    if resetthumbnail:
        obj.custom_thumbnail = None
        obj.save()

    res.value = obj.json()

    return JsonResponse(res.asDict())