예제 #1
0
def _setxattr_inner(func, path, name, value):
    if isinstance(path, str):
        path = os.fsencode(path)
    name = os.fsencode(name)
    value = value and os.fsencode(value)
    size = len(value) if value else 0
    _check(func(path, name, value, size), path, detect_buffer_too_small=False)
예제 #2
0
def tester(fn, wantResult):
    fn = fn.replace("\\", "\\\\")
    gotResult = eval(fn)
    if wantResult != gotResult:
        raise TestFailed("%s should return: %s but returned: %s" \
              %(str(fn), str(wantResult), str(gotResult)))

    # then with bytes
    fn = fn.replace("('", "(b'")
    fn = fn.replace('("', '(b"')
    fn = fn.replace("['", "[b'")
    fn = fn.replace('["', '[b"')
    fn = fn.replace(", '", ", b'")
    fn = fn.replace(', "', ', b"')
    fn = os.fsencode(fn).decode('latin1')
    fn = fn.encode('ascii', 'backslashreplace').decode('ascii')
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", DeprecationWarning)
        gotResult = eval(fn)
    if isinstance(wantResult, str):
        wantResult = os.fsencode(wantResult)
    elif isinstance(wantResult, tuple):
        wantResult = tuple(os.fsencode(r) for r in wantResult)

    gotResult = eval(fn)
    if wantResult != gotResult:
        raise TestFailed("%s should return: %s but returned: %s" \
              %(str(fn), str(wantResult), repr(gotResult)))
예제 #3
0
파일: rot.py 프로젝트: Ark444/PoCz
    def __init__(self, shift, input_file = None, output_file = None,
                 bf=False, alpha=True, incremental='', decremental='',
                 increment_val=1, decrement_val=1):
        """
        initializes a Shifter
        """
        self._shift = int(shift) if shift != None else None
        self._infile = input_file
        self._outfile = output_file
        self._bf = bf
        self._alpha = alpha
        if self._infile != None:
            try:
                self._in = open(self._infile, "rb")
            except IOError(e):
                raise(e)
        else:
            self._in = sys.stdin

        if self._outfile != None:
            try:
                self._out = open(self._outfile, "wb")
            except IOError(e):
                raise(e)
        else:
            self._out = sys.stdout

        self._inc = os.fsencode(incremental if incremental != None else '')
        self._dec = os.fsencode(decremental if decremental != None else '')
        self._inc_v = int(increment_val, 0) if increment_val != None else 1
        self._dec_v = int(decrement_val, 0) if decrement_val != None else 1
예제 #4
0
파일: parser.py 프로젝트: tristan/nuus
def parse_articles(articles):
    """From the list of articles generate a map of releases->files->segments"""
    releases = dict()
    for n, article in articles:
        try:
            subject = os.fsencode(article['subject']).decode('latin-1')
        except:
            print("ERROR PARSING UTF-8:", article['subject'].encode('utf-8'))
            subject = article['subject']
        for pat in PATTERNS:
            rlsmatch = pat.match(subject)
            if rlsmatch:
                rlsmatch = rlsmatch.groupdict()
                date = parse_date(article['date'])
                size = int(article[':bytes'])
                poster = os.fsencode(article['from']).decode('latin-1')
                rlskey = (rlsmatch.get('release_name'), poster)
                if rlskey in releases:
                    release = releases.get(rlskey)
                else:
                    release = dict(date=date, total_parts=rlsmatch.get('release_total'), files={})
                    releases[rlskey] = release
                # get lowest date for the release
                if date < release['date']:
                    release['date'] = date
                if rlsmatch.get('file_name') in release['files']:
                    file = release['files'].get(rlsmatch.get('file_name'))
                else:
                    file = dict(segments=[], total_parts=rlsmatch.get('file_total'))
                    release['files'][rlsmatch.get('file_name')] = file
                file['segments'].append(dict(number=rlsmatch.get('file_part'),size=size,article_id=os.fsencode(article['message-id'][1:-1]).decode('latin-1')))
                break
    return releases
예제 #5
0
파일: uuid.py 프로젝트: adrian17/cpython
def _arp_getnode():
    """Get the hardware address on Unix by running arp."""
    import os, socket
    try:
        ip_addr = socket.gethostbyname(socket.gethostname())
    except OSError:
        return None

    # Try getting the MAC addr from arp based on our IP address (Solaris).
    mac = _find_mac('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1)
    if mac:
        return mac

    # This works on OpenBSD
    mac = _find_mac('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1)
    if mac:
        return mac

    # This works on Linux, FreeBSD and NetBSD
    mac = _find_mac('arp', '-an', [os.fsencode('(%s)' % ip_addr)],
                    lambda i: i+2)
    # Return None instead of 0.
    if mac:
        return mac
    return None
예제 #6
0
    def readdir(self, fh, offset):
        """A basic implementation `llfuse.Operations.readdir` method."""
        logging.debug('readdir %s %s', fh, offset)
        directory = self.getattr(fh)
        parent = directory.parent

        if parent is None:
            # For the ROOT_INODE the parent is itself, this seems to work for
            # some weird reason.
            parent = directory

        special_entries = []
        if directory.inode > offset:
            special_entries.append((os.fsencode('.'),
                                    directory,
                                    directory.inode))
        if parent and parent.inode > offset:
            special_entries.append((os.fsencode('..'),
                                    parent,
                                    parent.inode))

        entries = [(os.fsencode(c.name), c, c.inode) for c in
                   directory.children.values() if c.inode > offset]
        entries += special_entries
        entries = sorted(entries, key=lambda x: x[2])

        return entries
예제 #7
0
 def test_non_ascii_name(self):
     if PY3 and is_coverage_running():
         try:
             os.fsencode(u"t\u00e9st.html")
         except UnicodeEncodeError:
             self.skipTest("coverage tries to access unencodable filename")
     loader = DictLoader({u"t\u00e9st.html": "hello"})
     self.assertEqual(loader.load(u"t\u00e9st.html").generate(), b"hello")
예제 #8
0
 def read_mdl(self, filepath, options):
     """Parse a single mdl file."""
     with open(os.fsencode(filepath), 'rb') as f:
         if bytes(f.read(1)) == b'\x00':
             self.read_binary_mdl(options)
             return
     with open(os.fsencode(filepath), 'r') as f:
         self.read_ascii_mdl(f.read(), options)
예제 #9
0
 def read_wkm(self, filepath, wkm_type, options):
     """Parse a single walkmesh file."""
     with open(os.fsencode(filepath), 'rb') as f:
         if bytes(f.read(1)) == b'\x00':
             self.read_binary_wkm(options)
             return
     with open(os.fsencode(filepath), 'r') as f:
         self.read_ascii_wkm(f.read(), wkm_type, options)
예제 #10
0
파일: xattr.py 프로젝트: maugier/shatag
    def read(self):

        try:
            self.ts = xattr.getxattr(fsencode(self.filename), 'user.shatag.ts').decode('ascii')
            self.shatag = xattr.getxattr(fsencode(self.filename), 'user.shatag.sha256').decode('ascii')
        except IOError as e:
            if e.errno != errno.ENODATA:  # no tag present
               raise e
예제 #11
0
def path_to_bytes(path):
    """
    Return a `path` string as a byte string using the filesystem encoding.
    """
    if isinstance(path, bytes):
        return path
    if TRACE: logger_debug('path_to_bytes:' , repr(fsencode(path)))
    return fsencode(path)
예제 #12
0
def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True, binary=False):
    """Run a command through SSH.

    .. versionchanged:: 1.9
       Added *binary* optional parameter.
    """
    sanitized_cmd = strutils.mask_password(cmd)
    LOG.debug("Running cmd (SSH): %s", sanitized_cmd)
    if addl_env:
        raise InvalidArgumentError(_("Environment not supported over SSH"))

    if process_input:
        # This is (probably) fixable if we need it...
        raise InvalidArgumentError(_("process_input not supported over SSH"))

    stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
    channel = stdout_stream.channel

    # NOTE(justinsb): This seems suspicious...
    # ...other SSH clients have buffering issues with this approach
    stdout = stdout_stream.read()
    stderr = stderr_stream.read()

    stdin_stream.close()

    exit_status = channel.recv_exit_status()

    if six.PY3:
        # Decode from the locale using using the surrogateescape error handler
        # (decoding cannot fail). Decode even if binary is True because
        # mask_password() requires Unicode on Python 3
        stdout = os.fsdecode(stdout)
        stderr = os.fsdecode(stderr)
    stdout = strutils.mask_password(stdout)
    stderr = strutils.mask_password(stderr)

    # exit_status == -1 if no exit code was returned
    if exit_status != -1:
        LOG.debug("Result was %s" % exit_status)
        if check_exit_code and exit_status != 0:
            raise ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=sanitized_cmd)

    if binary:
        if six.PY2:
            # On Python 2, stdout is a bytes string if mask_password() failed
            # to decode it, or an Unicode string otherwise. Encode to the
            # default encoding (ASCII) because mask_password() decodes from
            # the same encoding.
            if isinstance(stdout, unicode):
                stdout = stdout.encode()
            if isinstance(stderr, unicode):
                stderr = stderr.encode()
        else:
            # fsencode() is the reverse operation of fsdecode()
            stdout = os.fsencode(stdout)
            stderr = os.fsencode(stderr)

    return (stdout, stderr)
예제 #13
0
    def test_unicode_name(self):
        try:
            os.fsencode("\xf1")
        except UnicodeError:
            raise unittest.SkipTest("Unable to encode for this platform.")

        self.file1 += "\xf1"
        self.file2 = self.file1 + "2"
        self._test_link(self.file1, self.file2)
예제 #14
0
파일: fuse.py 프로젝트: FabioPedretti/borg
    def process_leaf(self, name, item, parent, prefix, is_dir, item_inode):
        def file_version(item, path):
            if 'chunks' in item:
                file_id = blake2b_128(path)
                current_version, previous_id = self.versions_index.get(file_id, (0, None))

                chunk_ids = [chunk_id for chunk_id, _, _ in item.chunks]
                contents_id = blake2b_128(b''.join(chunk_ids))

                if contents_id != previous_id:
                    current_version += 1
                    self.versions_index[file_id] = current_version, contents_id

                return current_version

        def make_versioned_name(name, version, add_dir=False):
            if add_dir:
                # add intermediate directory with same name as filename
                path_fname = name.rsplit(b'/', 1)
                name += b'/' + path_fname[-1]
            # keep original extension at end to avoid confusing tools
            name, ext = os.path.splitext(name)
            version_enc = os.fsencode('.%05d' % version)
            return name + version_enc + ext

        if self.versions and not is_dir:
            parent = self.process_inner(name, parent)
            path = os.fsencode(item.path)
            version = file_version(item, path)
            if version is not None:
                # regular file, with contents - maybe a hardlink master
                name = make_versioned_name(name, version)
                self.file_versions[path] = version

        path = item.path
        del item.path  # save some space
        if 'source' in item and hardlinkable(item.mode):
            # a hardlink, no contents, <source> is the hardlink master
            source = os.fsencode(item.source)
            if self.versions:
                # adjust source name with version
                version = self.file_versions[source]
                source = make_versioned_name(source, version, add_dir=True)
                name = make_versioned_name(name, version)
            try:
                inode = self._find_inode(source, prefix)
            except KeyError:
                logger.warning('Skipping broken hard link: %s -> %s', path, item.source)
                return
            item = self.cache.get(inode)
            item.nlink = item.get('nlink', 1) + 1
            self.items[inode] = item
        else:
            inode = item_inode
        self.parent[inode] = parent
        if name:
            self.contents[parent][name] = inode
예제 #15
0
    def test_stat(self):
        self.assertTrue(posix.stat(support.TESTFN))
        self.assertTrue(posix.stat(os.fsencode(support.TESTFN)))
        self.assertTrue(posix.stat(bytearray(os.fsencode(support.TESTFN))))

        self.assertRaisesRegex(TypeError, "can't specify None for path argument", posix.stat, None)
        self.assertRaisesRegex(TypeError, "should be string, bytes or integer, not", posix.stat, list(support.TESTFN))
        self.assertRaisesRegex(
            TypeError, "should be string, bytes or integer, not", posix.stat, list(os.fsencode(support.TESTFN))
        )
예제 #16
0
    def test_expandvars_nonascii(self):
        if self.pathmodule.__name__ == 'macpath':
            self.skipTest('macpath.expandvars is a stub')
        expandvars = self.pathmodule.expandvars
        def check(value, expected):
            self.assertEqual(expandvars(value), expected)
        with support.EnvironmentVarGuard() as env:
            env.clear()
            nonascii = support.FS_NONASCII
            env['spam'] = nonascii
            env[nonascii] = 'ham' + nonascii
            check(nonascii, nonascii)
            check('$spam bar', '%s bar' % nonascii)
            check('${spam}bar', '%sbar' % nonascii)
            check('${%s}bar' % nonascii, 'ham%sbar' % nonascii)
            check('$bar%s bar' % nonascii, '$bar%s bar' % nonascii)
            check('$spam}bar', '%s}bar' % nonascii)

            check(os.fsencode(nonascii), os.fsencode(nonascii))
            check(b'$spam bar', os.fsencode('%s bar' % nonascii))
            check(b'${spam}bar', os.fsencode('%sbar' % nonascii))
            check(os.fsencode('${%s}bar' % nonascii),
                  os.fsencode('ham%sbar' % nonascii))
            check(os.fsencode('$bar%s bar' % nonascii),
                  os.fsencode('$bar%s bar' % nonascii))
            check(b'$spam}bar', os.fsencode('%s}bar' % nonascii))
예제 #17
0
def process_all():
    '''
    Processes all mdl files in the input directory
    '''
    # Load an empty file
    try:
        bpy.ops.wm.open_mainfile(filepath=empty_path,
                                 load_ui=False)
    except:
        log('ERROR: Unable to load empty.blend')
        return

    for filename in os.listdir(input_path):
        if filename.endswith('.mdl'):
            log('Processing ' + filename)

            # Import mdl file
            mdlfile = os.fsencode(os.path.join(input_path, filename))
            try:
                bpy.ops.nvb.mdlimport(filepath=mdlfile,
                                      importGeometry=True,
                                      importWalkmesh=False,
                                      importSmoothGroups=False,
                                      importAnim=False,
                                      materialMode='MUL',
                                      textureSearch=False,
                                      minimapMode=True,
                                      minimapSkipFade=skip_fading)
            except RuntimeError as ex:
                error_report = '\n'.join(ex.args)
                log('    ERROR: ', error_report)

            log('    Import succesful')
            # Get mdl root
            mdlRoot = None
            for obj in bpy.data.objects:
                if neverblender.nvb.nvb_utils.isRootDummy(obj, neverblender.nvb.nvb_def.Dummytype.MDLROOT):
                    mdlRoot = obj
                    break

            # Render minimap
            if mdlRoot:
                filename = 'mi_' + mdlRoot.name
                scene = bpy.context.scene
                scene.render.filepath = os.fsencode(os.path.join(output_path, filename))
                mdlRoot.nvb.minimapsize = minimap_size
                mdlRoot.nvb.minimapzoffset = z_offset
                neverblender.nvb.nvb_utils.setupMinimapRender(mdlRoot, scene, light_color, 'SKY')
                bpy.ops.render.render(animation=False, write_still=True)
                log('    Rendered to ' + filename)
            else:
                log('    ERROR: No rootdummy')

            # Load empty blend for next mdl file
            bpy.ops.wm.open_mainfile(filepath=empty_path, load_ui=False)
예제 #18
0
    def test_expandvars_nonascii(self):
        if self.pathmodule.__name__ == "macpath":
            self.skipTest("macpath.expandvars is a stub")
        expandvars = self.pathmodule.expandvars

        def check(value, expected):
            self.assertEqual(expandvars(value), expected)

        with support.EnvironmentVarGuard() as env:
            env.clear()
            nonascii = support.FS_NONASCII
            env["spam"] = nonascii
            env[nonascii] = "ham" + nonascii
            check(nonascii, nonascii)
            check("$spam bar", "%s bar" % nonascii)
            check("${spam}bar", "%sbar" % nonascii)
            check("${%s}bar" % nonascii, "ham%sbar" % nonascii)
            check("$bar%s bar" % nonascii, "$bar%s bar" % nonascii)
            check("$spam}bar", "%s}bar" % nonascii)

            check(os.fsencode(nonascii), os.fsencode(nonascii))
            check(b"$spam bar", os.fsencode("%s bar" % nonascii))
            check(b"${spam}bar", os.fsencode("%sbar" % nonascii))
            check(os.fsencode("${%s}bar" % nonascii), os.fsencode("ham%sbar" % nonascii))
            check(os.fsencode("$bar%s bar" % nonascii), os.fsencode("$bar%s bar" % nonascii))
            check(b"$spam}bar", os.fsencode("%s}bar" % nonascii))
예제 #19
0
def loadMdl(operator, context,
            filepath='',
            importGeometry=True,
            importWalkmesh=True,
            importSmoothGroups=True,
            importAnimations=True,
            importSupermodel=False,
            materialMode='SIN',
            textureSearch=False,
            minimapMode=False,
            minimapSkipFade=False):
    """Called from blender ui."""
    options = nvb_def.ImportOptions()
    options.importGeometry = importGeometry
    options.importSmoothGroups = importSmoothGroups
    options.importAnimations = importAnimations
    options.importSupermodel = importSupermodel

    options.materialMode = materialMode
    options.texturePath = os.path.dirname(filepath)
    options.textureSearch = textureSearch

    options.minimapMode = minimapMode
    options.minimapSkipFade = minimapSkipFade

    (mdlPath, mdlFilename) = os.path.split(filepath)
    options.mdlname = os.path.splitext(mdlFilename)[0]
    options.scene = bpy.context.scene

    with open(os.fsencode(filepath), 'r') as mdlfile:
        print('Neverblender: Loading ' + options.mdlname + ' ...')
        mdl = nvb_mdl.Mdl()
        asciiMdl = mdlfile.read()
        mdl.loadAscii(asciiMdl, options)
        print('Neverblender: ... done')
        # Try to load walkmeshes ... pwk (placeable) and dwk (door)
        if importWalkmesh:
            for wkmtype in ['pwk', 'dwk']:
                wkmFilename = options.mdlname + '.' + wkmtype
                wkmPath = os.fsencode(os.path.join(mdlPath, wkmFilename))
                try:
                    wkmFile = open(wkmPath, 'r')
                except IOError:
                    pass  # There is no such file
                else:
                    print('Neverblender: Loading ' + wkmFilename)
                    asciiWkm = wkmFile.read()
                    mdl.loadAsciiWalkmesh(asciiWkm, options)
                    wkmFile.close()
                    print('Neverblender: ... done')
        print('Neverblender: Creating objects ...')
        mdl.create(options)
        print('Neverblender: ... done')

    return {'FINISHED'}
예제 #20
0
 def test_warn_explicit_non_ascii_filename(self):
     with original_warnings.catch_warnings(record=True, module=self.module) as w:
         self.module.resetwarnings()
         self.module.filterwarnings("always", category=UserWarning)
         for filename in ("nonascii\xe9\u20ac", "surrogate\udc80"):
             try:
                 os.fsencode(filename)
             except UnicodeEncodeError:
                 continue
             self.module.warn_explicit("text", UserWarning, filename, 1)
             self.assertEqual(w[-1].filename, filename)
예제 #21
0
파일: xattr.py 프로젝트: Herover/borg
 def setxattr(path, name, value, *, follow_symlinks=True):
     name = os.fsencode(name)
     value = value and os.fsencode(value)
     if isinstance(path, str):
         path = os.fsencode(path)
     if isinstance(path, int):
         func = libc.fsetxattr
     elif follow_symlinks:
         func = libc.setxattr
     else:
         func = libc.lsetxattr
     _check(func(path, name, value, len(value) if value else 0, 0), path)
예제 #22
0
파일: xattr.py 프로젝트: Herover/borg
 def setxattr(path, name, value, *, follow_symlinks=True):
     name = os.fsencode(name)
     value = value and os.fsencode(value)
     func = libc.setxattr
     flags = 0
     if isinstance(path, str):
         path = os.fsencode(path)
     if isinstance(path, int):
         func = libc.fsetxattr
     elif not follow_symlinks:
         flags = XATTR_NOFOLLOW
     _check(func(path, name, value, len(value) if value else 0, 0, flags), path)
예제 #23
0
파일: xattr.py 프로젝트: Herover/borg
 def setxattr(path, name, value, *, follow_symlinks=True):
     name = os.fsencode(name)
     value = value and os.fsencode(value)
     if isinstance(path, str):
         path = os.fsencode(path)
     if isinstance(path, int):
         func = libc.extattr_set_fd
     elif follow_symlinks:
         func = libc.extattr_set_file
     else:
         func = libc.extattr_set_link
     _check(func(path, EXTATTR_NAMESPACE_USER, name, value, len(value) if value else 0), path)
예제 #24
0
파일: test_glob.py 프로젝트: 0jpq0/kbengine
 def glob(self, *parts):
     if len(parts) == 1:
         pattern = parts[0]
     else:
         pattern = os.path.join(*parts)
     p = os.path.join(self.tempdir, pattern)
     res = glob.glob(p)
     self.assertEqual(list(glob.iglob(p)), res)
     bres = [os.fsencode(x) for x in res]
     self.assertEqual(glob.glob(os.fsencode(p)), bres)
     self.assertEqual(list(glob.iglob(os.fsencode(p))), bres)
     return res
    def testBytesPath(self):
        filename = support.TESTFN + ".zip"
        self.addCleanup(support.unlink, filename)
        with ZipFile(filename, "w") as z:
            zinfo = ZipInfo(TESTMOD + ".py", time.localtime(NOW))
            zinfo.compress_type = self.compression
            z.writestr(zinfo, test_src)

        zipimport.zipimporter(filename)
        zipimport.zipimporter(os.fsencode(filename))
        zipimport.zipimporter(bytearray(os.fsencode(filename)))
        zipimport.zipimporter(memoryview(os.fsencode(filename)))
예제 #26
0
파일: test_fileio.py 프로젝트: Qointum/pypy
 def test_open_non_existent_unicode(self):
     import _io
     import os
     path = os.path.join(self.tmpdir, '_pypy-日本')
     try:
         os.fsencode(path)
     except UnicodeEncodeError:
         import sys
         skip("can't run this test with %s as filesystem encoding" %
              sys.getfilesystemencoding())
     exc = raises(IOError, _io.FileIO, path)
     expected = "[Errno 2] No such file or directory: %r" % path
     assert str(exc.value) == expected
예제 #27
0
def _getxattr_inner(func, path, name):
    if isinstance(path, str):
        path = os.fsencode(path)
    name = os.fsencode(name)
    size = len(buffer)
    while True:
        buf = buffer.get(size)
        try:
            n = _check(func(path, name, buf, size), path, detect_buffer_too_small=True)
        except BufferTooSmallError:
            size *= 2
        else:
            return n, buf.raw
예제 #28
0
def saveMdl(operator, context,
            filepath='',
            exportAnimations=True,
            exportWalkmesh=True,
            exportSmoothGroups=True,
            applyModifiers=True):
    """Called from blender ui."""
    options = nvb_def.ExportOptions()
    options.exportAnim = exportAnimations
    options.exportWalkmesh = exportWalkmesh
    options.exportSmoothGroups = exportSmoothGroups
    options.applyModifiers = applyModifiers

    if bpy.ops.object.mode_set.poll():
        bpy.ops.object.mode_set(mode='OBJECT')

    rootDummy = nvb_utils.findRootDummy(bpy.context.object)
    if rootDummy:
        print('Neverblender: Exporting ' + rootDummy.name + ' ...')
        options.mdlname = rootDummy.name
        options.classification = rootDummy.nvb.classification
        asciiLines = []
        nvb_mdl.Mdl.generateAscii(rootDummy, asciiLines, options)
        with open(os.fsencode(filepath), 'w') as f:
            f.write('\n'.join(asciiLines))
        print('Neverblender: ... done')
        if options.exportWalkmesh:
            print('Neverblender: Exporting walkmesh ...')
            # Get walkmesh type
            wkmtype = '.pwk'
            if rootDummy.nvb.classification == \
                    nvb_def.Classification.DOOR:
                wkmtype = '.dwk'
            elif rootDummy.nvb.classification == \
                    nvb_def.Classification.TILE:
                wkmtype = '.wok'
            print('Neverblender: ... detected type: ' + wkmtype + ' ...')
            # Only write to file if there is actually any data
            asciiLines = []
            nvb_mdl.Mdl.generateAsciiWalkmesh(rootDummy, asciiLines, options)
            if asciiLines:
                wkmPath = os.path.splitext(filepath)[0] + wkmtype
                with open(os.fsencode(wkmPath), 'w') as f:
                    f.write('\n'.join(asciiLines))
                print('Neverblender: ... done')
            else:
                print('Neverblender: ... no nodes found')
    else:
        return {'CANCELLED'}

    return {'FINISHED'}
예제 #29
0
 def test_glob_bytes_directory_with_trailing_slash(self):
     # Same as test_glob_directory_with_trailing_slash, but with a
     # bytes argument.
     res = glob.glob(os.fsencode(os.path.join(self.tempdir, 'Z*Z') + os.sep))
     self.assertEqual(res, [])
     res = glob.glob(os.fsencode(os.path.join(self.tempdir, 'ZZZ') + os.sep))
     self.assertEqual(res, [])
     res = glob.glob(os.fsencode(os.path.join(self.tempdir, 'aa*') + os.sep))
     self.assertEqual(len(res), 2)
     # either of these results are reasonable
     self.assertIn({os.fsdecode(x) for x in res}, [
                   {self.norm('aaa'), self.norm('aab')},
                   {self.norm('aaa') + os.sep, self.norm('aab') + os.sep},
                   ])
예제 #30
0
파일: cmd_fsck.py 프로젝트: Grumbel/dirtool
def check_utf8_path(path, recursive, verbose):
    if recursive and os.path.isdir(path):
        for root, dirs, files in os.walk(path):
            if len(dirs) > 1000:
                sys.stdout.buffer.write(b"MANY DIRS FAIL ")
                sys.stdout.buffer.write(os.fsencode(root))
                sys.stdout.buffer.write(b"\n")
            if len(files) > 1000:
                sys.stdout.buffer.write(b"MANY FILES FAIL ")
                sys.stdout.buffer.write(os.fsencode(root))
                sys.stdout.buffer.write(b"\n")
            for f in files:
                check_file(os.path.join(root, f), verbose)
    else:
        check_file(path, verbose)
예제 #31
0
import csv
import numpy as np
import scipy.stats
from shared import crunch_number


# https://stackoverflow.com/a/15034143/6397601
def mean_confidence_interval(data, confidence=0.95):
    a = 1.0 * np.array(data)
    n = len(a)
    m, se = np.mean(a), scipy.stats.sem(a)
    h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
    return m, m - h, m + h


directory = os.fsencode("variations")
files = []
for file in os.listdir(directory):
    filename = os.fsdecode(file)
    if filename == ".DS_Store":
        continue
    files.append(filename)

files.sort()

keys = [
    'wall', 'user', 'system', 'read', 'write', 'bitrate', 'avg_memory',
    'max_memory'
]
rows = [['name'] + keys + [
    'bitrate gamma', 'bitrate t_u', 'bitrate t_v', 'cpu gamma', 'cpu t_u',
예제 #32
0
import collections
import xml.etree.ElementTree as ET
import os
import sys
import pandas
import csv

csv_writer = csv.writer(sys.stdout)
csv_writer.writerow(('day', 'count', 'mean', 'std', 'min', '25%', '50%', '75%',
                     '90%', '99%', 'max'))

for directory in sys.argv[1:]:

    vehicle_activity_deltas = []

    for file in os.listdir(os.fsencode(directory)):
        filename = os.path.join(directory, os.fsdecode(file))
        if not filename.endswith(".xml"):
            continue

        # Reciept timestamp, from filename
        basename = os.path.basename(filename)
        timestamp = basename[:10]
        our_timestamp = arrow.get(timestamp)

        tree = ET.parse(filename)
        root = tree.getroot()

        service_delivery = root.find(
            '{http://www.siri.org.uk/siri}ServiceDelivery')
        if service_delivery is None:
예제 #33
0
파일: _api.py 프로젝트: Julian/libraw-cffi
 def from_path(cls, path):
     raw = cls()
     raw.open_file(os.fsencode(path))
     return raw
예제 #34
0
def decode_arg(value):
    return os.fsencode(value).decode('iso-8859-1')
    if args.resume is None and args.soft_resume is not None:
        if not os.path.exists(args.soft_resume):
            print(
                'WARNING: resume path ({}) was not found, starting from scratch'
                .format(args.soft_resume))
        else:
            args.resume = args.soft_resume
    if args.resume is not None and (config is None or 'override' not in config
                                    or not config['override']):
        if args.config is not None:
            logger.warning('Warning: --config overridden by --resume')
        config = torch.load(args.resume)['config']
    elif args.config is not None and args.resume is None:
        path = os.path.join(config['trainer']['save_dir'], config['name'])
        if os.path.exists(path):
            directory = os.fsencode(path)
            for file in os.listdir(directory):
                filename = os.fsdecode(file)
                if filename != 'config.json':
                    assert False, "Path {} already used!".format(path)

    assert config is not None

    if args.gpu is not None:
        config['gpu'] = args.gpu
        print('override gpu to ' + str(config['gpu']))

    if config['cuda']:
        with torch.cuda.device(config['gpu']):
            main(config, args.resume)
    else:
예제 #36
0
def gettempprefixb():
    """The default prefix for temporary directories as bytes."""
    return _os.fsencode(gettempprefix())
예제 #37
0
			chance_3 = data_cell.try_3/(data_cell.try_2 + data_cell.try_3)

			shot_cell['chance_2'] = chance_2
			shot_cell['chance_3'] = chance_3
			shot_cell['acc_2'] = data_cell.acc_2
			shot_cell['acc_3'] = data_cell.acc_3

	return shot_chances

################################################################################
################################################################################
################################################################################
################################################################################

evenDir = 'data/splitted/even/'
evenDirCod = os.fsencode(evenDir)
evenResultsFile = open('results/evenGames.txt', 'w')

oneDir = 'data/splitted/one_sided/'
oneDirCod = os.fsencode(oneDir)
oneResultsFile = open('results/oneGames.txt', 'w')

dirs = [evenDir, oneDir]

data = build_data_dict()

ignore_files = [
	'201710200PHI.txt',
	'201710240ORL.txt',
	'201710250PHI.txt',
	'201710280MEM.txt',
예제 #38
0
NC = "\x1b[0m"  # No Color
Blink = "\x1b[5m"

# file = "/home/tegwyn/ultrasonic_classifier/my_audio/noctula_Oct_31_2019_01.wav"
file = "/home/tegwyn/ultrasonic_classifier/my_audio/11_oct_2019_01.wav"  # 110 Mb
file2 = "/home/tegwyn/ultrasonic_classifier/Final_result.txt"
file3 = "/home/tegwyn/ultrasonic_classifier/Final_result_copy.txt"
file4 = '/home/tegwyn/ultrasonic_classifier/helpers/combo_01.txt'
file5 = "/home/tegwyn/ultrasonic_classifier/helpers/toggled_02.txt"  # text or spectigram or graph.
folder1 = "/home/tegwyn/ultrasonic_classifier/"
folder2 = "/home/tegwyn/ultrasonic_classifier/processed_audio/"
folder3 = "/home/tegwyn/ultrasonic_classifier/unknown_bat_audio/"
folder4 = "/home/tegwyn/ultrasonic_classifier/my_audio"
folder5 = "/home/tegwyn/ultrasonic_classifier/temp/"
folder6 = "/home/tegwyn/ultrasonic_classifier/helpers/"
directory = os.fsencode("/home/tegwyn/ultrasonic_classifier/my_audio")

# Define command and arguments
command = 'Rscript'
command_python = "python3"
command_bash = "bash"

path_to_create_spectogram = "/home/tegwyn/ultrasonic_classifier/create_spectogram.py"
# path_to_create_spectogram = "/home/tegwyn/ultrasonic_classifier/create_spectogram_batch_process.py"       # Use this for experiments with spectograpghs.
path_to_create_graph = "/home/tegwyn/ultrasonic_classifier/create_barchart.py"
path_to_battery = "/home/tegwyn/ultrasonic_classifier/battery_info.sh"  # Not used anymore.

n = 1
line = [1, 2, 3, 4, 5]
newText = ""
예제 #39
0
def gorhammap():
    import folium
    import os
    import json

    m = folium.Map(location=[43.6819, -70.4490],
                   tiles='OpenStreetMap',
                   prefer_canvas=True,
                   zoom_control=False,
                   min_zoom=10)

    residentiallots = "/home/carter/PycharmProjects/campusParkingMap/flaskr/Resources/GeoJSON/Gorham/Residential Lots"
    commuterlots = "/home/carter/PycharmProjects/campusParkingMap/flaskr/Resources/GeoJSON/Gorham/Commuter Lots"
    stafflots = "/home/carter/PycharmProjects/campusParkingMap/flaskr/Resources/GeoJSON/Gorham/Staff Lots"

    commuterstyle = {
        'fillColor': 'red',
        'color': 'black',
        'fillOpacity': '0.5'
    }
    residentialstyle = {
        'fillColor': 'green',
        'color': 'black',
        'fillOpacity': '0.6'
    }
    staffstyle = {'fillColor': 'blue', 'color': 'black', 'fillOpacity': '0.6'}
    closedstyle = {
        'fillColor': 'black',
        'color': 'black',
        'fillOpacity': '0.6'
    }

    gorhambancounter = 0

    # Residential Lots
    resdirectory = os.fsencode(residentiallots)
    for file in os.listdir(resdirectory):
        filename = os.fsdecode(file)
        if filename.endswith(".json5"):
            with open(residentiallots + "/" + filename) as f:
                data = json.load(f)
                if data['features'][0]['properties']['lot_status'] == 'closed':
                    folium.GeoJson(os.path.join(residentiallots, filename),
                                   style_function=lambda x: closedstyle,
                                   highlight_function=lambda x: {
                                       'weight': 3,
                                       'color': 'black',
                                       'fillColor': 'grey'
                                   },
                                   popup=folium.GeoJsonPopup(
                                       fields=[
                                           'lot_name', 'lot_type',
                                           'lot_status', 'handicapped_spots'
                                       ],
                                       aliases=[
                                           'Lot Name:', 'Lot Type:',
                                           'Lot Status:', 'Handicapped Spots:'
                                       ],
                                       class_name="respopup")).add_to(m)
                    gorhambancounter += 1
                else:
                    folium.GeoJson(os.path.join(residentiallots, filename),
                                   style_function=lambda x: residentialstyle,
                                   highlight_function=lambda x: {
                                       'weight': 3,
                                       'color': 'green',
                                       'fillColor': 'grey'
                                   },
                                   popup=folium.GeoJsonPopup(
                                       fields=[
                                           'lot_name', 'lot_type',
                                           'lot_status', 'handicapped_spots'
                                       ],
                                       aliases=[
                                           'Lot Name:', 'Lot Type:',
                                           'Lot Status:', 'Handicapped Spots:'
                                       ],
                                       class_name="respopup")).add_to(m)
            continue

    # Commuter Lots
    commdirectory = os.fsencode(commuterlots)
    for file in os.listdir(commdirectory):
        filename = os.fsdecode(file)
        if filename.endswith(".json5"):
            with open(commuterlots + "/" + filename) as f:
                data = json.load(f)
                if data['features'][0]['properties']['lot_status'] == 'closed':
                    folium.GeoJson(os.path.join(commuterlots, filename),
                                   style_function=lambda x: closedstyle,
                                   highlight_function=lambda x: {
                                       'weight': 3,
                                       'color': 'black',
                                       'fillColor': 'grey'
                                   },
                                   popup=folium.GeoJsonPopup(
                                       fields=[
                                           'lot_name', 'lot_type',
                                           'lot_status', 'handicapped_spots'
                                       ],
                                       aliases=[
                                           'Lot Name:', 'Lot Type:',
                                           'Lot Status:', 'Handicapped Spots:'
                                       ],
                                       class_name="commpopup")).add_to(m)
                    gorhambancounter += 1
                else:
                    folium.GeoJson(os.path.join(commuterlots, filename),
                                   style_function=lambda x: commuterstyle,
                                   highlight_function=lambda x: {
                                       'weight': 3,
                                       'color': 'red',
                                       'fillColor': 'grey'
                                   },
                                   popup=folium.GeoJsonPopup(
                                       fields=[
                                           'lot_name', 'lot_type',
                                           'lot_status', 'handicapped_spots'
                                       ],
                                       aliases=[
                                           'Lot Name:', 'Lot Type:',
                                           'Lot Status:', 'Handicapped Spots:'
                                       ],
                                       class_name="commpopup")).add_to(m)
            continue

    # Staff Lots
    staffdirectory = os.fsencode(stafflots)
    for file in os.listdir(staffdirectory):
        filename = os.fsdecode(file)
        if filename.endswith(".json5"):
            with open(stafflots + "/" + filename) as f:
                data = json.load(f)
                if data['features'][0]['properties']['lot_status'] == 'closed':
                    folium.GeoJson(os.path.join(stafflots, filename),
                                   style_function=lambda x: closedstyle,
                                   highlight_function=lambda x: {
                                       'weight': 3,
                                       'color': 'black',
                                       'fillColor': 'grey'
                                   },
                                   popup=folium.GeoJsonPopup(
                                       fields=[
                                           'lot_name', 'lot_type',
                                           'lot_status', 'handicapped_spots'
                                       ],
                                       aliases=[
                                           'Lot Name:', 'Lot Type:',
                                           'Lot Status:', 'Handicapped Spots:'
                                       ],
                                       class_name="staffpopup")).add_to(m)
                    gorhambancounter += 1
                else:
                    folium.GeoJson(os.path.join(stafflots, filename),
                                   style_function=lambda x: staffstyle,
                                   highlight_function=lambda x: {
                                       'weight': 3,
                                       'color': 'blue',
                                       'fillColor': 'grey'
                                   },
                                   popup=folium.GeoJsonPopup(
                                       fields=[
                                           'lot_name', 'lot_type',
                                           'lot_status', 'handicapped_spots'
                                       ],
                                       aliases=[
                                           'Lot Name:', 'Lot Type:',
                                           'Lot Status:', 'Handicapped Spots:'
                                       ],
                                       class_name="staffpopup")).add_to(m)
        continue

    m.fit_bounds([[43.6785, -70.4521], [43.6856, -70.4455]])
    m.save(
        "/home/carter/PycharmProjects/campusParkingMap/flaskr/static/GorhamMap.html"
    )
    return gorhambancounter
예제 #40
0
 def _load_pending_archive(self, inode):
     # Check if this is an archive we need to load
     archive = self.pending_archives.pop(inode, None)
     if archive:
         self.process_archive(archive, [os.fsencode(archive.name)])
예제 #41
0
 def _find_inode(self, path, prefix=[]):
     segments = prefix + os.fsencode(os.path.normpath(path)).split(b'/')
     inode = 1
     for segment in segments:
         inode = self.contents[inode][segment]
     return inode
예제 #42
0
 def readlink(self, inode, ctx=None):
     item = self.get_item(inode)
     return os.fsencode(item[b'source'])
예제 #43
0
def main():
    '''
        Main method which iterates over all inputs and calls `solve` on each.
        The student should modify `solve` to return their solution and modify
        the portion which writes it to a file to make sure their output is
        formatted correctly.
    '''
    # ws = [1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
    ws = [
        0.02, 0.04, 0.02, 0.02, 0.0, 0.0, 0.02, 0.08, 0.02, 0.0, 0.02, 0.06,
        0.0, 0.14, 0.08, 0.02, 0.0, 0.02, 0.02, 0.0, 0.0, 0.06, 0.02, 0.0,
        0.02, 0.0, 0.0, 0.02, 0.02, 0.06, 0.02, 0.02, 0.0, 0.02, 0.02, 0.08,
        0.12, 0.02, 0.0, 0.12, 0.18, 0.14, 0.06, 0.0, 0.02, 0.0, 0.0, 0.02,
        0.0, 0.0, 0.12, 0.02, 0.02, 0.0, 0.14, 0.0, 0.02, 0.0, 0.0, 0.02, 0.08,
        0.04, 0.0, 0.0, 0.02, 0.02, 0.08, 0.0, 0.06, 0.08, 0.08, 0.04, 0.04,
        0.02, 0.06, 0.0, 0.0, 0.0, 0.04, 0.02, 0.02, 0.0, 0.02, 0.06, 0.08,
        0.0, 0.04, 0.0, 0.0, 0.02, 0.04, 0.02, 0.02, 0.0, 0.0, 0.04, 0.08,
        0.04, 0.02, 0.02, 0.02, 0.0, 0.0, 0.0, 0.02, 0.02, 0.08, 0.0, 0.02,
        0.0, 0.04, 0.02, 0.02, 0.02, 0.06, 0.02, 0.0, 0.0, 0.02, 0.02, 0.02,
        0.02, 0.0, 0.02, 0.02, 0.0, 0.0, 0.02, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.0, 0.02, 0.02, 0.02, 0.0, 0.02,
        0.0, 0.02, 0.02, 0.0, 0.0, 0.02, 0.02, 0.0, 0.02, 0.0, 0.0, 0.0, 0.08,
        0.02, 0.08, 0.0, 0.02, 0.02, 0.08, 0.06, 0.0, 0.08, 0.02, 0.02, 0.0,
        0.02, 0.0, 0.02, 0.04, 0.04, 0.02, 0.02, 0.0, 0.06, 0.04, 0.08, 0.02,
        0.0, 0.0, 0.0, 0.04, 0.02, 0.02, 0.06, 0.04, 0.0, 0.02, 0.0, 0.02,
        0.04, 0.02, 0.04, 0.02, 0.08, 0.08, 0.06, 0.02, 0.0, 0.0, 0.02, 0.02,
        0.0, 0.0, 0.02, 0.02, 0.04, 0.02, 0.02, 0.04, 0.08, 0.0, 0.0, 0.0,
        0.02, 0.04, 0.0, 0.02, 0.04, 0.02, 0.0, 0.06, 0.02, 0.08, 0.02, 0.0,
        0.06, 0.02, 0.0, 0.04, 0.06, 0.02, 0.0, 0.02, 0.0, 0.02, 0.02, 0.08,
        0.02, 0.02, 0.0, 0.08, 0.06, 0.0, 0.06, 0.06, 0.0, 0.02, 0.08, 0.02,
        0.02, 0.0, 0.0, 0.04, 0.0, 0.02, 0.08, 0.0, 0.0, 0.02, 0.02, 0.06,
        0.08, 0.02, 0.0, 0.02, 0.02, 0.06, 0.0, 0.0, 0.02, 0.0, 0.02, 0.02,
        0.06, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.04, 0.0, 0.0, 0.02,
        0.0, 0.04, 0.16, 0.02, 0.06, 0.02, 0.14, 0.02, 0.0, 0.02, 0.02, 0.08,
        0.0, 0.0, 0.1, 0.0, 0.12, 0.02, 0.06
    ]
    size_categories = ["all_large"]
    if not os.path.isdir(path_to_outputs):
        os.mkdir(path_to_outputs)

    print('=========reading data=======')
    if os.path.exists('./all_large.pkl'):
        cache = open('./all_large.pkl', 'rb')
        work = pickle.load(cache)
        cache.close()

    else:
        work = []
        count = 0
        for size in size_categories:
            category_path = path_to_inputs + "/" + size
            output_category_path = path_to_outputs + "/" + size
            category_dir = os.fsencode(category_path)

            if not os.path.isdir(output_category_path):
                os.mkdir(output_category_path)

            for input_folder in os.listdir(category_dir):
                input_name = os.fsdecode(input_folder)
                print('reading', category_path, input_name)
                pack = parse_input(category_path + "/" + input_name)
                work.extend([(pack, ws[count], category_path,
                              output_category_path, input_name)])
                count += 1

        cache = open('all_large.pkl', 'wb')
        pickle.dump(work, cache)
        cache.close()
        print('saving done')

    print('============done reading')

    # work = itertools.permutations(work)
    start_time = time.time()
    p = Pool()
    towrite = p.map(threadwork, work)
    elapsed_time = time.time() - start_time

    print('saving result ====================')

    avg = 0.0

    # newws = []
    for f, sol, score in towrite:
        avg += score
        # newws.append(bestw)
        print(f, score)
        output_file = open(f, "w")
        output_file.write(sol)
        output_file.close()
    # print(newws)
    print(elapsed_time, 's for computing')
    print('avg score', avg / len(towrite))
예제 #44
0
        sys.stdout.flush()

if __name__ == '__main__' :

	if len(sys.argv) < 4 :
		print("usage : \n")
		print(str(sys.argv[0]) + " <input directory> <output directory> <file_name_start_counter>\n")
		sys.exit(0)

	from_dir = str(sys.argv[1])
	to_dir = str(sys.argv[2])
	start_count = int( str(sys.argv[3]) )

	# os.mkdir(to_dir)

	directory = os.fsencode(from_dir)
	file_num = start_count
    
    # Convert images to jpg and place into image_jpg directory
	num_files = 0
	for file in os.listdir(directory) :
	    num_files += 1 

	for file in os.listdir(directory):
	    filename = os.fsdecode(file)
	    
	    copyfile(from_dir + "\\" + filename, to_dir + "\\" + str(file_num))

	    new_fname = to_dir + "\\" + "image_" + str(file_num).zfill(4) + ".jpg"

	    Printer(str(file_num + 1) + " / " + str(num_files))
예제 #45
0
    if not os.path.isdir(folder):
        os.makedirs(folder)
    pickle.dump(files, open(folder + name + '.encd', 'wb'), pickle.HIGHEST_PROTOCOL)

if __name__ == "__main__":
    
    hiddev = DmdDeviceHID()

    import PIL.Image

    try:
        #hiddev.reset()
        images=[]
         
        directory_in_str = "X:\\Gianmaria\\DMD\\Patterns\\DMD_patterns\\bin_sinusoidal_pattern\\"
        directory = os.fsencode(directory_in_str)
        i = 0
        for file in sorted(os.listdir(directory)):
            filename = os.fsdecode(file)
            if filename.endswith("320.png"):
                """
                Here is necessary to speciofy the array as boolean, since, otherwise, python
                sees an 8 bit image, adn, when we merge images, there are overflow issues, besides
                of issues in adding patterns. With boolean, I think, Python automatically transforms
                the image in a "boolean" image.
                """
                arr = numpy.asarray(PIL.Image.open(directory_in_str+filename), dtype = numpy.bool)
                images.append(arr)
                i += 1
            if i > 1:
                break
예제 #46
0
import os
import io
import torch
import pickle
import tarfile
import time
from zipfile import ZipFile
from util import fetch_and_cache

FORCE_DOWNLOAD = False
start_time = time.time()
print("Starting setup... This might take a while.")
print("Making directories...", end=" ")
if not os.path.isdir("./data_zipped"):
    os.mkdir(os.fsencode('./data_zipped'))
if not os.path.isdir("./data"):
    os.mkdir(os.fsencode('./data'))
if not os.path.isdir("./pickles/word_embeddings"):
    os.mkdir(os.fsencode("./pickles/word_embeddings"))
if not os.path.isdir("./pickles/models"):
    os.mkdir(os.fsencode("./pickles/models"))
if not os.path.isdir("./pickles/nuswide_metadata"):
    os.mkdir(os.fsencode("./pickles/nuswide_metadata"))
print("Done!")

print("Downloading NUSWIDE_metadata...")
fetch_and_cache(
    data_url='http://dl.nextcenter.org/public/nuswide/NUS_WID_Tags.zip',
    file='tags.zip',
    data_dir='./data_zipped',
예제 #47
0
 def test_listdir_bytes(self):
     # When listdir is called with a bytes object,
     # the returned strings are of type bytes.
     self.assertTrue(os.fsencode(support.TESTFN) in posix.listdir(b'.'))
#Import required Image library
from PIL import Image
import os

directory = os.fsencode('/Code/Uni/P8-Accelerated_EnDP/Raw_images')

for file in os.listdir(directory):
    filename = os.fsdecode(file)
    if filename.endswith(".JPEG"):
        # Create an Image Object from an Image
        im = Image.open(f'/Code/Uni/P8-Accelerated_EnDP/Raw_images/{filename}')

        # Display actual image
        # im.show()

        # Make the new image half the width and half the height of the original image
        resized_im = im.resize((224, 224))

        # Display the resized imaged
        # resized_im.show()

        # Save the cropped image
        resized_im.save(
            f'/Code/Uni/P8-Accelerated_EnDP/Resized_images/resized_{filename}')
    else:
        pass
예제 #49
0
def gettempdirb():
    """A bytes version of tempfile.gettempdir()."""
    return _os.fsencode(gettempdir())
예제 #50
0
def LocalMain(config):

    if not input_json_str and not input_json_dir:
        logger.error("JSON input file is not provided")
        exit(1)

    if not output_json_file_name:
        logger.error("JSON output file is not provided")
        exit(1)

    if not server_uri:
        logger.error("Server URI is not provided")
        exit(1)

    logger.info('Execute work order')
    uri_client = GenericServiceClient(server_uri)
    response = None
    if input_json_dir:
        directory = os.fsencode(input_json_dir)
        files = os.listdir(directory)

        for file in sorted(files):
            logger.info(
                "------------------Input file name: %s ---------------\n",
                file.decode("utf-8"))
            input_json_str1 = helper.read_json_file(
                (directory.decode("utf-8") + file.decode("utf-8")))

            #----------------------------------------------------------------------------------

            #If Client request is WorkOrderSubmit,a requester payload’s signature with the requester private signing key is generated.
            if "WorkOrderSubmit" in input_json_str1:
                session_iv = helper.generate_sessioniv()
                encrypted_session_key = helper.generate_encrypted_session_key(
                    session_iv, worker_obj.worker_encryption_key)
                input_json_obj = json.loads(input_json_str1)
                wo_id = None
                input_json_str1 = sig_obj.generate_client_signature(
                    input_json_str1, worker_obj, private_key, session_iv,
                    encrypted_session_key)
                if input_json_str1 is None:
                    logger.error(
                        "Request could not be signed hence rejecting this workorder submit request"
                    )
                    wo_id = input_json_obj["params"]["workOrderId"]
                    continue
            #----------------------------------------------------------------------------------

            # Update the worker ID
            if response:
                if "workerId" in input_json_str1:
                    #Retrieving the worker id from the "WorkerRetrieve" response and update the worker id information for the further json requests
                    if 'result' in response and 'ids' in response[
                            "result"].keys():
                        input_json_final = json.loads(input_json_str1)
                        input_json_final['params']['workerId'] = response[
                            'result']['ids'][0]
                        input_json_str1 = json.dumps(input_json_final)
                        logger.info(
                            "**********Worker details Updated with Worker ID*********\n%s\n",
                            response['result']['ids'][0])
            #-----------------------------------------------------------------------------------

            logger.info("*********Request Json********* \n%s\n",
                        input_json_str1)
            response = uri_client._postmsg(input_json_str1)
            logger.info("**********Received Response*********\n%s\n", response)
            #-----------------------------------------------------------------------------------

            #Worker details are loaded into Worker_Obj
            if "WorkerRetrieve" in input_json_str1 and 'result' in response:
                worker_obj.load_worker(response)
            #----------------------------------------------------------------------------------

            # Polling for the "WorkOrderGetResult" and break when you get the result
            while ('WorkOrderGetResult' in input_json_str1
                   and 'result' not in response):
                if response["error"]["code"] == 9:
                    break
                input_json_obj = json.loads(input_json_str1)
                if wo_id == input_json_obj["params"]["workOrderId"]:
                    logger.error(
                        "This work order request was rejected by client. Hence no result "
                    )
                    break
                response = uri_client._postmsg(input_json_str1)
                logger.info(" Received Response : %s, \n \n ", response)
                time.sleep(3)
            #----------------------------------------------------------------------------------

            #Verify the signature
            if ('WorkOrderGetResult' in input_json_str1):
                sig_bool = sig_obj.verify_signature(json.dumps(response),
                                                    worker_obj)
                try:
                    if sig_bool > 0:
                        logger.info('Signature Verified')
                    else:
                        logger.info('Signature Failed')
                except:
                    logger.error(
                        "ERROR: Failed to analyze Signature Verification")
                helper.decrypted_response(json.dumps(response),
                                          encrypted_session_key)

            #----------------------------------------------------------------------------------
    else:
        logger.info('Input Request %s', input_json_str)
        response = uri_client._postmsg(input_json_str_1)
        logger.info("Received Response : %s , \n \n ", response)

    exit(0)
예제 #51
0
    def do_test_with_pip(self, system_site_packages):
        rmtree(self.env_dir)
        with EnvironmentVarGuard() as envvars:
            # pip's cross-version compatibility may trigger deprecation
            # warnings in current versions of Python. Ensure related
            # environment settings don't cause venv to fail.
            envvars["PYTHONWARNINGS"] = "e"
            # ensurepip is different enough from a normal pip invocation
            # that we want to ensure it ignores the normal pip environment
            # variable settings. We set PIP_NO_INSTALL here specifically
            # to check that ensurepip (and hence venv) ignores it.
            # See http://bugs.python.org/issue19734
            envvars["PIP_NO_INSTALL"] = "1"
            # Also check that we ignore the pip configuration file
            # See http://bugs.python.org/issue20053
            with tempfile.TemporaryDirectory() as home_dir:
                envvars["HOME"] = home_dir
                bad_config = "[global]\nno-install=1"
                # Write to both config file names on all platforms to reduce
                # cross-platform variation in test code behaviour
                win_location = ("pip", "pip.ini")
                posix_location = (".pip", "pip.conf")
                # Skips win_location due to http://bugs.python.org/issue20541
                for dirname, fname in (posix_location, ):
                    dirpath = os.path.join(home_dir, dirname)
                    os.mkdir(dirpath)
                    fpath = os.path.join(dirpath, fname)
                    with open(fpath, 'w') as f:
                        f.write(bad_config)

                # Actually run the create command with all that unhelpful
                # config in place to ensure we ignore it
                try:
                    self.run_with_capture(
                        venv.create,
                        self.env_dir,
                        system_site_packages=system_site_packages,
                        with_pip=True)
                except subprocess.CalledProcessError as exc:
                    # The output this produces can be a little hard to read,
                    # but at least it has all the details
                    details = exc.output.decode(errors="replace")
                    msg = "{}\n\n**Subprocess Output**\n{}"
                    self.fail(msg.format(exc, details))
        # Ensure pip is available in the virtual environment
        envpy = os.path.join(os.path.realpath(self.env_dir), self.bindir,
                             self.exe)
        # Ignore DeprecationWarning since pip code is not part of Python
        out, err = check_output([
            envpy, '-W', 'ignore::DeprecationWarning', '-I', '-m', 'pip',
            '--version'
        ])
        # We force everything to text, so unittest gives the detailed diff
        # if we get unexpected results
        err = err.decode("latin-1")  # Force to text, prevent decoding errors
        self.assertEqual(err, "")
        out = out.decode("latin-1")  # Force to text, prevent decoding errors
        expected_version = "pip {}".format(ensurepip.version())
        self.assertEqual(out[:len(expected_version)], expected_version)
        env_dir = os.fsencode(self.env_dir).decode("latin-1")
        self.assertIn(env_dir, out)

        # http://bugs.python.org/issue19728
        # Check the private uninstall command provided for the Windows
        # installers works (at least in a virtual environment)
        with EnvironmentVarGuard() as envvars:
            out, err = check_output([
                envpy, '-W', 'ignore::DeprecationWarning', '-I', '-m',
                'ensurepip._uninstall'
            ])
        # We force everything to text, so unittest gives the detailed diff
        # if we get unexpected results
        err = err.decode("latin-1")  # Force to text, prevent decoding errors
        # Ignore the warning:
        #   "The directory '$HOME/.cache/pip/http' or its parent directory
        #    is not owned by the current user and the cache has been disabled.
        #    Please check the permissions and owner of that directory. If
        #    executing pip with sudo, you may want sudo's -H flag."
        # where $HOME is replaced by the HOME environment variable.
        err = re.sub(
            "^The directory .* or its parent directory is not owned "
            "by the current user .*$",
            "",
            err,
            flags=re.MULTILINE)
        self.assertEqual(err.rstrip(), "")
        # Being fairly specific regarding the expected behaviour for the
        # initial bundling phase in Python 3.4. If the output changes in
        # future pip versions, this test can likely be relaxed further.
        out = out.decode("latin-1")  # Force to text, prevent decoding errors
        self.assertIn("Successfully uninstalled pip", out)
        self.assertIn("Successfully uninstalled setuptools", out)
        # Check pip is now gone from the virtual environment. This only
        # applies in the system_site_packages=False case, because in the
        # other case, pip may still be available in the system site-packages
        if not system_site_packages:
            self.assert_pip_not_installed()
예제 #52
0
    # folder that contains all xvgs
    xvg_folder = sys.argv[1]

    # pulling rate
    velocity = float(sys.argv[2])
    
    # option to plot search work
    plot_search_work = bool(int(sys.argv[3]))

    # customize title
    if len(sys.argv) > 4:
        fig_title = sys.argv[4]
    else:
        fig_title = ""
    
    directory = os.fsencode(xvg_folder)

    # data structure that contains all runs
    work_runs = []
    
    # data structure that contains all forces
    force_runs = []
    
    
    for file in os.scandir(directory):
        filename = os.fsdecode(file)
         # only reads xvgs
        if filename.endswith(".xvg"): 
            one_time, N, one_work, force = get_one_work(filename, velocity)
            work_runs.append(one_work)
            force_runs.append(force)
            qt += r.tolist()
            qt.append(execute_query_dens(b, data))
            queries.append(qt)
        elif t == 'aggr':
            qt = q.tolist()
            qt += r.tolist()
            qt.append(execute_query_aggr(b, data))
            queries.append(qt)
        i += 1
    logger.debug("Generated {0} queries".format(len(queries)))
    return queries


if __name__ == '__main__':
    #Generate Queries
    directory = os.fsencode('../input')
    for file in os.listdir(directory):
        qs = []
        filename = os.fsdecode(file)
        if not filename.endswith(".csv") and filename.startswith("data"):
            a = filename.split('_')
            t = a[1]
            dim = int(a[2].split('=')[1])
            multi = a[-1]
            #Check if query file has been generated and skip
            qf = '../input/queries/queries-uniform-{0}-multi_{1}-{2}'.format(
                dim, multi, t)
            if Path(qf).exists():
                print("Query file '{0}' already exists skipping ".format(qf))
                continue
            logger.debug('Loading file')
예제 #54
0
파일: appenv.py 프로젝트: sallner/batou
    def _prepare(self):
        # copy used requirements.txt into the target directory so we can use that
        # to check later
        # - when to clean up old versions? keep like one or two old revisions?
        # - enumerate the revisions and just copy the requirements.txt, check
        #   for ones that are clean or rebuild if necessary
        os.chdir(self.base)
        if not os.path.exists('requirements.lock'):
            print('Running unclean installation from requirements.txt')
            env_dir = os.path.join(self.appenv_dir, 'unclean')
            ensure_venv(env_dir)
            print('Ensuring unclean install ...')
            cmd('{env_dir}/bin/python -m pip install -r requirements.txt --upgrade'
                .format(env_dir=env_dir))
        else:
            hash_content = []
            requirements = open("requirements.lock", "rb").read()
            hash_content.append(os.fsencode(os.path.realpath(sys.executable)))
            hash_content.append(requirements)
            hash_content.append(open(__file__, "rb").read())
            env_hash = hashlib.new("sha256",
                                   b"".join(hash_content)).hexdigest()[:8]
            env_dir = os.path.join(self.appenv_dir, env_hash)

            whitelist = set(
                [env_dir, os.path.join(self.appenv_dir, "unclean")])
            for path in glob.glob(
                    "{appenv_dir}/*".format(appenv_dir=self.appenv_dir)):
                if not path in whitelist:
                    print(
                        "Removing expired path: {path} ...".format(path=path))
                    if not os.path.isdir(path):
                        os.unlink(path)
                    else:
                        shutil.rmtree(path)
            if os.path.exists(env_dir):
                # check whether the existing environment is OK, it might be nice
                # to rebuild in a separate place if necessary to avoid interruptions
                # to running services, but that isn't what we're using it for at the
                # moment
                try:
                    if not os.path.exists(
                            "{env_dir}/appenv.ready".format(env_dir=env_dir)):
                        raise Exception()
                except Exception:
                    print("Existing envdir not consistent, deleting")
                    cmd("rm -rf {env_dir}".format(env_dir=env_dir))

            if not os.path.exists(env_dir):
                ensure_venv(env_dir)

                with open(os.path.join(env_dir, "requirements.lock"),
                          "wb") as f:
                    f.write(requirements)

                print("Installing ...")
                cmd("{env_dir}/bin/python -m pip install --no-deps -r {env_dir}/requirements.lock"
                    .format(env_dir=env_dir))

                cmd("{env_dir}/bin/python -m pip check".format(
                    env_dir=env_dir))

                with open(os.path.join(env_dir, "appenv.ready"), "w") as f:
                    f.write("Ready or not, here I come, you can't hide\n")

        self.env_dir = env_dir
예제 #55
0
 async def readlink(self, inode: INode, ctx: RequestContext) -> bytes:
     try:
         return os.fsencode(os.readlink(self.paths[inode]))
     except OSError as exc:
         raise FUSEError(exc.errno) from None
예제 #56
0
if __name__=='__main__':
    # Read configuration file
    with open('../config.yaml') as file:
        config = yaml.safe_load(file)

    available_gpu = torch.cuda.is_available()
    if available_gpu:
        print(f'GPU is available: {torch.cuda.get_device_name(torch.cuda.current_device())}')
        flair.device = torch.device('cuda')
    else:
        flair.device = torch.device('cpu')


    seed = config['seed']

    flair.set_seed(seed)
    torch.cuda.empty_cache()

    
    actual_path = os.getcwd()
    directory = os.fsencode(f"{config['data_folder']}/")

    for file in os.listdir(directory):
        entity_type = os.fsdecode(file)
        corpus = NERCorpus(config['data_folder'], entity_type).create_corpus()
        tag_dictionary = corpus.make_label_dictionary(label_type = 'ner')
        embeddings = Embeddings(config).create_embeddings()
        tagger = NERTagger(embeddings = embeddings, tag_dictionary = tag_dictionary, config = config).create_tagger()
        trainer = NERTrainer(corpus = corpus, tagger = tagger, entity_type = entity_type, config = config).train()
예제 #57
0
for i in range(len(bhca)):
    BHCK_items.append('BHCA' + str(bhca[i]))

for i in range(len(bhcw)):
    BHCK_items.append('BHCW' + str(bhcw[i]))

# one long list
ItemList = np.concatenate(
    (RSSD_items, BHCK_items, BHCB_items, BHOD_items, BHCT_items, BHFN_items,
     BHDM_items, BHCA_items, BHCW_items))

#set directory
#FRY9_location = os.getcwd() + '/Data/FRY9/'
FRY9_location = 'Data/FRY9/'
directory = os.fsencode(FRY9_location)
"""
Filtering Data
"""
# initialize dataframe
GenDF = pd.DataFrame()

iterator = 1
#merge all data files in folder
for file in os.listdir(directory):

    print('Iteration', iterator, ' out of', len(os.listdir(directory)))
    iterator = iterator + 1

    #initialize temporary dataframe
    newDF = pd.DataFrame()
예제 #58
0
 def __bytes__(self):
     """Return the bytes representation of the path.  This is only
     recommended to use under Unix."""
     return os.fsencode(str(self))
gene_file = args["gene"]
gene = " ".join(gene_file.split(".")[:1])

host_dict = {
    "Horse": "Equus_caballus",
    "horses": "Equus_caballus",
    "horse": "Equus_caballus",
    "Equus caballus": "Equus_caballus",
    "Equus_caballus": "Equus_caballus"
}

host_name = host_dict[host_folder]
host_fname = host_name.replace("_", " ")

#directory = os.fsencode(host_folder)
directory = os.fsencode(host_name)

gfile = os.fsencode(gene_file)

for files in os.listdir(directory):
    #print(files)
    if files == gfile:
        with open(gfile, "r") as f1:
            for line in f1:
                if line.startswith("EXPRESS"):
                    #print(line)
                    line = " ".join(line.split()[1:])
                    tissues = line.split("|")
                    tissues = [x.strip(' ') for x in tissues]
                    print("\nFound Gene", gene, "for", host_fname)
                    print("In", host_fname, "there are", len(tissues),
예제 #60
0
def hdf5_loader(path: str, pattern: str = '_[A-Z][0-9]{2}_', suffix_data: str = '.h5',
                suffix_data_zipped: str = '.h5.zip', suffix_label: str = '_label.h5',
                gp_current: int = 0, gp_max: int = 0, normalize_enum: int = 1, n_jobs: int = 1,
                skip_predicted: bool = False, force_verbose: bool = False,
                load_labels: bool = True, load_samples: bool = True):
    '''Helper function which loads all datasets from a hdf5 file in
    a specified file at a specified path.

    # Arguments
        The split argument is used to split the key-string and sort alphanumerically
        instead of sorting the Python-standard way of 1,10,2,9,...
        The two suffix arguments define the way the datasets are looked up:
        (Training) data should always end in .h5 and corresponding labels should
        carry the same name and end in _label.h5
        normalize_enum is an enum to determine normalisation as follows:
         0 = no normalisation
         1 = normalize between 0 and 255
         2 = normalize every cell individually with every color channel independent
         3 = normalize every cell individually with every color channel using the min / max of all three
         4 = normalize every cell but with bounds determined by the brightest cell in the same well

    # Returns
        X and y lists containing all of the data.

    # Usage
        path = 'path/to/folder'
        X, y = hdf5_loader(path, split=3)
        X = np.asarray(X)
        y = np.asarray(y)
        print(X.shape)
        print(y.shape)
    '''

    X = []
    y = []

    os.chdir(path)
    directory = os.fsencode(path)

    if platform == "linux" or platform == "linux2":
        os.system('ls ' + str(path) + ' > /dev/null')

    directory_contents = os.listdir(directory)
    directory_contents.sort()
    # well_regex = "(\w\d+)_\d+$"
    # well_regex = re.compile(well_regex)

    try:
        terminal_rows, terminal_columns = os.popen('stty size', 'r').read().split()
        terminal_columns = int(terminal_columns)
    except Exception as e:
        terminal_columns = None

    assert isinstance(suffix_label, str)
    assert isinstance(suffix_data, str)

    n_jobs = max(n_jobs, 1)
    verbose: bool = n_jobs == 1
    if force_verbose:
        verbose = force_verbose

    file_count = len(directory_contents)
    if verbose:
        print('Reading ' + str(file_count) + ' files with normalization strategy index: ' + str(
            normalize_enum) + '. Skipping already predicted: ' + str(skip_predicted))

    executor = ThreadPoolExecutor(max_workers=n_jobs)
    future_list = []
    worker_verbose: bool = n_jobs == 1

    if verbose:
        print('Setting up futures for path: ' + path)
    for i in range(file_count):
        filename = os.fsdecode(directory_contents[i])
        if verbose:
            line_print(str(i) + '/' + str(file_count) + ': Preparing future to load: ' + filename)

        future = executor.submit(hdf5_loader_worker,  # the actual function i want to run. Args, see below
                                 filename,  # filename
                                 path,  # path
                                 pattern,  # pattern
                                 suffix_data,  # suffix_data
                                 suffix_data_zipped,  # suffix_data_zipped
                                 suffix_label,  # suffix_label
                                 gp_current,  # gp_current
                                 gp_max,  # gp_max
                                 normalize_enum,  # normalize_enum
                                 worker_verbose,  # verbose
                                 terminal_columns,  # terminal_columns#
                                 load_labels,  # load_labels
                                 load_samples  # load_samples
                                 )
        future_list.append(future)

        # Running through the future list again to check if preivously added futures have already finished.
        # On ProDi Server "Ehrlich", adding tasks is so slow, it might be more efficient to check even while adding futures. Wow.
        for ft in future_list:
            if ft.done():
                e = ft.exception()
                if e is None:
                    _, _, prediction_future = ft.result()
                    if prediction_future and skip_predicted:
                        if verbose:
                            print(
                                '\n' + 'While adding tasks: Predicted well(s) already found!! Skipping this experiment.')
                        executor.shutdown(wait=False)
                        return None, None, None, True

    if verbose:
        print(gct() + ' Starting to read ' + str(file_count) + ' label / data .h5 files on ' + str(
            n_jobs) + ' thread(s).')
    start_time = gct(raw=True)
    all_finished: bool = False
    executor.shutdown(wait=False)

    while not all_finished:
        finished_count = 0
        predicted_count = 0
        error_count = 0

        for future in future_list:
            if future.done():
                finished_count = finished_count + 1

                e = future.exception()
                if e is None:
                    _, _, prediction_future = future.result()
                    if prediction_future:
                        predicted_count = predicted_count + 1
                else:
                    error_count = error_count + 1

        if verbose:
            line_print('[' + str(gp_current) + ' / ' + str(gp_max) + '] ' + str(
                n_jobs) + ' Threads running. Finished: ' + str(finished_count) + '/' + str(
                len(future_list)) + '. Already predicted: ' + str(predicted_count) + '. Errors: ' + str(
                error_count) + '. Running: ' + get_time_diff(
                start_time) + '. ' + gct(), max_width=terminal_columns)
        all_finished = finished_count == len(future_list)
        time.sleep(1)

        if skip_predicted and predicted_count > 0:
            if verbose:
                print('\n' + str(predicted_count) + ' predicted well(s) found. Skipping this experiment.\n')
            executor.shutdown(wait=False)
            return None, None, None, True

    if verbose:
        print('\n' + gct() + ' Finished concurrent execution. Fetching results.')

    error_list = []
    errors_found = False
    for i in range(len(future_list)):
        future = future_list[i]
        if verbose:
            line_print('Extracting future: ' + str(i) + '/' + str(len(future_list)))

        e = future.exception()
        if e is None:
            X_w, y_w, prediction_file = future.result()
            if X_w is not None:
                X.extend(X_w)
            if y_w is not None:
                y.extend(y_w)
        else:
            print('\n' + gct() + 'Error extracting future results: "' + str(e) + '"!\n')
            error_list.append(e)
            errors_found = True

    if verbose:
        print(gct() + ' Fully Finished Loading Path.')

    if errors_found:
        # Print that there were errors, regardless of verbosity
        print('## WARNING! ##\nTHERE WAS AT LEAST ONE ERROR WHILE LOADING DATA! - Erorrs: ' + str(len(error_list)))
        print('Errors: ' + str(len(error_list)) + '/' + str(len(future_list)))

    # Deleting the futures and the future list to immediately releasing the memory.
    del future_list[:]
    del future_list

    return X, y, error_list, False