Exemplo n.º 1
0
    def close(self):
        """Close this file, saving the lockfile over the original.

        :note: If this method fails, it will attempt to delete the lockfile.
            However, it is not guaranteed to do so (e.g. if a filesystem
            becomes suddenly read-only), which will prevent future writes to
            this file until the lockfile is removed manually.
        :raises OSError: if the original file could not be overwritten. The
            lock file is still closed, so further attempts to write to the same
            file object will raise ValueError.
        """
        if self._closed:
            return
        os.fsync(self._file.fileno())
        self._file.close()
        try:
            if getattr(os, 'replace', None) is not None:
                os.replace(self._lockfilename, self._filename)
            else:
                if sys.platform != 'win32':
                    os.rename(self._lockfilename, self._filename)
                else:
                    # Windows versions prior to Vista don't support atomic
                    # renames
                    _fancy_rename(self._lockfilename, self._filename)
        finally:
            self.abort()
Exemplo n.º 2
0
    def _dump(self):
        temp = '%s-%s.tmp' % (uuid.uuid4(), self.name)
        with open(temp, 'w', encoding='utf-8') as tmp:
            json.dump(self._db.copy(), tmp, ensure_ascii=True, cls=self.encoder, separators=(',', ':'))

        # atomically move the file
        os.replace(temp, self.name)
Exemplo n.º 3
0
def save_json(filename: str, data: Union[List, Dict],
              private: bool = False) -> None:
    """Save JSON data to a file.

    Returns True on success.
    """
    tmp_filename = filename + "__TEMP__"
    try:
        json_data = json.dumps(data, sort_keys=True, indent=4)
        mode = 0o600 if private else 0o644
        with open(os.open(tmp_filename, O_WRONLY | O_CREAT | O_TRUNC, mode),
                  'w', encoding='utf-8') as fdesc:
            fdesc.write(json_data)
        os.replace(tmp_filename, filename)
    except TypeError as error:
        _LOGGER.exception('Failed to serialize to JSON: %s',
                          filename)
        raise SerializationError(error)
    except OSError as error:
        _LOGGER.exception('Saving JSON file failed: %s',
                          filename)
        raise WriteError(error)
    finally:
        if os.path.exists(tmp_filename):
            try:
                os.remove(tmp_filename)
            except OSError as err:
                # If we are cleaning up then something else went wrong, so
                # we should suppress likely follow-on errors in the cleanup
                _LOGGER.error("JSON replacement cleanup failed: %s", err)
Exemplo n.º 4
0
def sanitizeTree(filetree):
    chapterNames = {}
    for root, dirs, files in os.walk(filetree, False):
        for name in files:
            if name.startswith('.') or name.lower() == 'thumbs.db':
                os.remove(os.path.join(root, name))
            else:
                splitname = os.path.splitext(name)
                slugified = slugify(splitname[0])
                while os.path.exists(os.path.join(root, slugified + splitname[1])) and splitname[0].upper()\
                        != slugified.upper():
                    slugified += "A"
                newKey = os.path.join(root, slugified + splitname[1])
                key = os.path.join(root, name)
                if key != newKey:
                    os.replace(key, newKey)
        for name in dirs:
            if name.startswith('.'):
                os.remove(os.path.join(root, name))
            else:
                tmpName = name
                slugified = slugify(name)
                while os.path.exists(os.path.join(root, slugified)) and name.upper() != slugified.upper():
                    slugified += "A"
                chapterNames[slugified] = tmpName
                newKey = os.path.join(root, slugified)
                key = os.path.join(root, name)
                if key != newKey:
                    os.replace(key, newKey)
    return chapterNames
Exemplo n.º 5
0
Arquivo: cli.py Projeto: dcos/dcos
def _write_file_bytes(path, data, mode):
    """
    Atomically write `data` to `path` using the file permissions
    `stat.S_IMODE(mode)`.

    File consumers can rely on seeing valid file contents once they are able to
    open the file. This is achieved by performing all relevant operations on a
    temporary file followed by a `os.replace()` which, if successful, renames to
    the desired path (and overwrites upon conflict) in an atomic operation (on
    both, Windows, and Linux).

    If acting on the temporary file fails (be it writing, closing, chmodding,
    replacing) an attempt is performed to remove the temporary file; and the
    original exception is re-raised.
    """
    assert isinstance(data, bytes)

    basename = os.path.basename(path)
    tmpfile_dir = os.path.dirname(os.path.realpath(path))

    fd, tmpfile_path = tempfile.mkstemp(prefix=basename, dir=tmpfile_dir)

    try:
        try:
            os.write(fd, data)
        finally:
            os.close(fd)
        os.chmod(tmpfile_path, stat.S_IMODE(mode))
        os.replace(tmpfile_path, path)
    except Exception:
        os.remove(tmpfile_path)
        raise
Exemplo n.º 6
0
    def sync_save_log(self, reason):
        started = time.time()
        new_file = os.path.join(self.log_dir, f"{int(started)}.pickle")
        try:
            with open(new_file, 'xb') as f:
                pickle.dump(self.event_log, f)
        except FileExistsError:
            log.exception(
                f"unable to dump the log, file {new_file} already exists, "
                f"too many updates/sec? current: {self.log_updates}, "
                f"threshold: {self.log_save_updates}"
            )
            return False

        # atomically replace `current` symlink
        tmplink = os.path.join(self.log_dir, "tmp")
        try:
            os.remove(tmplink)
        except FileNotFoundError:
            pass
        os.symlink(new_file, tmplink)
        os.replace(tmplink, self.log_current)

        duration = time.time() - started
        log.info(f"log dumped to disk because {reason}, took {duration:.4}s")
        return True
Exemplo n.º 7
0
def save_json(filename: str, data: Union[List, Dict],
              private: bool = False, *,
              encoder: Optional[json.JSONEncoder] = None) -> None:
    """Save JSON data to a file.

    Returns True on success.
    """
    tmp_filename = ""
    tmp_path = os.path.split(filename)[0]
    try:
        json_data = json.dumps(data, sort_keys=True, indent=4, cls=encoder)
        # Modern versions of Python tempfile create this file with mode 0o600
        with tempfile.NamedTemporaryFile(mode="w", encoding='utf-8',
                                         dir=tmp_path, delete=False) as fdesc:
            fdesc.write(json_data)
            tmp_filename = fdesc.name
        if not private:
            os.chmod(tmp_filename, 0o644)
        os.replace(tmp_filename, filename)
    except TypeError as error:
        _LOGGER.exception('Failed to serialize to JSON: %s',
                          filename)
        raise SerializationError(error)
    except OSError as error:
        _LOGGER.exception('Saving JSON file failed: %s',
                          filename)
        raise WriteError(error)
    finally:
        if os.path.exists(tmp_filename):
            try:
                os.remove(tmp_filename)
            except OSError as err:
                # If we are cleaning up then something else went wrong, so
                # we should suppress likely follow-on errors in the cleanup
                _LOGGER.error("JSON replacement cleanup failed: %s", err)
Exemplo n.º 8
0
def updateSingleDocumentToC(input_file, min_toc_len, verbose=False):
    """Add or update table of contents in specified file. Return 1 if file changed, 0 otherwise."""
    if verbose :
        print( 'file: {}'.format(input_file))

    output_file = input_file + '.tmp'

    markdownToclify(
        input_file=input_file,
        output_file=output_file,
        min_toc_len=min_toc_len,
        github=True,
        back_to_top=False,
        nolink=False,
        no_toc_header=False,
        spacer=False,
        placeholder=False,
        exclude_h=excludeHeadingsFor(input_file))

    # prevent race-condition (Python 3.3):
    if sys.version_info >= (3, 3):
        os.replace(output_file, input_file)
    else:
        os.remove(input_file)
        os.rename(output_file, input_file)

    return 1
Exemplo n.º 9
0
 def move(cls, item, to_collection, to_href):
     os.replace(
         path_to_filesystem(item.collection._filesystem_path, item.href),
         path_to_filesystem(to_collection._filesystem_path, to_href))
     cls._sync_directory(to_collection._filesystem_path)
     if item.collection._filesystem_path != to_collection._filesystem_path:
         cls._sync_directory(item.collection._filesystem_path)
Exemplo n.º 10
0
    def _save_file(self, path, config, is_theme):
        kind = 'theme' if is_theme else 'configuration'

        new_path = path + '_.new'
        old_path = path + '_.old'

        # Write new data
        out_data = json.dumps(config, indent=4, cls=_ConfigEncoder, sort_keys=True)
        assert len(out_data) <= self._MAX_CHARS
        try:
            with open(new_path, 'w', encoding='utf-8') as f:
                f.write(out_data)
        except OSError as e:
            print('Could not write new {} file: {}'.format(kind, e.strerror),
                    file=sys.stderr)
            return False

        # Replace old file with the new one
        try:
            if os.path.exists(path):
                os.replace(path, old_path)
            os.replace(new_path, path)
            if os.path.exists(old_path):
                os.remove(old_path)
        except OSError as e:
            print('An error occurred while replacing old {} file: {}'.format(
                kind, e.strerror,
                file=sys.stderr))
            return False

        return True
Exemplo n.º 11
0
def replaceFile( output_path, input_path ):
    # prevent race-condition (Python 3.3):
    if sys.version_info >= (3, 3):
        os.replace( output_path, input_path )
    else:
        os.remove( input_path )
        os.rename( output_path, input_path )
Exemplo n.º 12
0
def main():
    infilename = "program_11_10.cpp"
    outfilename = "_program_11_10.cpp";

    try:
        infile = open(infilename, "r")
    except:
        print("File " + infilename + " did not open")
        infile = open(infilename, "r")

    try:
        outfile = open(outfilename, "w")
    except:
        print("File " + outfilename + " did not open")
        outfile = open(outfilename, "w")

    for line_of_text in infile:
        match = re.match(r"^[0-9]*", line_of_text)
        line_of_text = line_of_text[:match.start()] + line_of_text[match.end():]
        outfile.write(line_of_text)

    infile.close()
    outfile.close()


    if os.path.isfile(infilename):
        os.remove(infilename)
        os.replace(outfilename, infilename)
        print_file_list(".", get_file_list("."))
Exemplo n.º 13
0
def download(module: str):
    name, address = module.split('/')[1], 'https://github.com/{module}/archive/master.zip'.format(module=module)
    print('loading module', name, 'from github:', address)

    print('starting download ...')
    try:
        zip_file = request.urlopen(address)
    except urllib.error.HTTPError:
        print('\nError: No such GitHub project')
        raise SystemExit
    os.makedirs(os.path.dirname('AElfi/modules/_temp/download.zip'), exist_ok=True)
    temporary_zip = open('AElfi/modules/_temp/download.zip', 'wb')
    temporary_zip.write(zip_file.read())
    zip_file.close()
    temporary_zip.close()
    print('download complete ...')

    temporary_zip = zipfile.ZipFile('AElfi/modules/_temp/download.zip', 'r')
    temporary_zip.extractall('AElfi/modules/_temp/download')
    temporary_zip.close()
    os.remove('AElfi/modules/_temp/download.zip')
    print('extracted zip ...')

    shutil.rmtree('AElfi/modules/{}/'.format(name), ignore_errors=True)
    location, directories = next(os.walk('AElfi/modules/_temp/download'))[:2]
    directory = directories[0]
    os.replace(location + '/' + directory, 'AElfi/modules/{}/'.format(name))
    print('moved to', 'AElfi/modules/{}/'.format(name), '...')
    shutil.rmtree('AElfi/modules/_temp/')

    print('cleaning up', '...')
    for dirname, dirs, files in os.walk('AElfi/modules/{}/'.format(name)):
        for file in files:
            os.chmod(dirname + '/' + file, 0o755)
    print('finised!')
Exemplo n.º 14
0
def fileRead(mydir, commit):
    """Reads files recursively and renames it if need."""
    data = defaultdict(list)
    try:
        for fName in os.listdir(mydir):
            pathSrc = os.path.join(mydir, fName)
            if os.path.isfile(pathSrc):
                """Shrinks only files over 255-bytes length currenty."""
                if (utf8len(fName) > 255):
                    if '.' in fName:
                        pieces = fName.split('.')
                        fExt = "."+ pieces[len(pieces)-1]
                        newfName = " ".join(pieces[:-1])
                    else:
                        newfName = fName
                        fExt = ''
                    newfName = fineCut(newfName, 255-utf8len(fExt)) + fExt
                    pathDst = os.path.join(mydir, newfName)
                    data[mydir].append(fName)
                    if commit:
                        os.replace(pathSrc, pathDst)
            else:
                for k, v in fileRead(pathSrc, commit).items():
                    data[k].append(str(v).strip("'[]'"))
    except OSError as e:
        print(e, file=sys.stderr)
    return data
Exemplo n.º 15
0
def replace_file(src, dst):
    """
    Overwrite a file (in an atomic fashion when possible).

    :param src: The pathname of the source file (a string).
    :param dst: The pathname of the destination file (a string).
    """
    # Try os.replace() which was introduced in Python 3.3
    # (this should work on POSIX as well as Windows systems).
    try:
        os.replace(src, dst)
        return
    except AttributeError:
        pass
    # Try os.rename() which is atomic on UNIX but refuses to overwrite existing
    # files on Windows.
    try:
        os.rename(src, dst)
        return
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise
    # Finally we fall back to the dumb approach required only on Windows.
    # See https://bugs.python.org/issue8828 for a long winded discussion.
    os.remove(dst)
    os.rename(src, dst)
Exemplo n.º 16
0
def ini_file_helper_add_or_update_key_value(ini_file_name, ini_key_val_pair):

    if not Path(ini_file_name).is_file():
        return False

    if ini_key_val_pair is None or ini_key_val_pair == "":
        return False

    ini_key_val_pair_splitted = ini_key_val_pair.split(":", maxsplit=1)
    key_to_replace = ini_key_val_pair_splitted[0]
    val_to_replace = ini_key_val_pair_splitted[1]

    ## remove 'old' key
    ini_out_file_name = ini_file_name + ".tmpout"
    with open(ini_file_name, 'r') as ini_file:
        with open(ini_out_file_name, 'w') as out_file:
            for ini_file_line in ini_file:
                if not re.match("^" + key_to_replace + "=", ini_file_line):
                    out_file.write(ini_file_line)

    ## append to end
    with open(ini_out_file_name, 'a') as out_file:
        print(key_to_replace + "=" + val_to_replace, file=out_file)

    os.replace(ini_out_file_name, ini_file_name)

    return True
Exemplo n.º 17
0
def content_write(file, content, append=False):
	"""Write content to file. Content may be str or bytes."""
	file = path.expanduser(file)

	prepare_folder(path.dirname(file))

	original = file

	if append:
		mode = "a"
	else:
		mode = "w"
		file = file + time.strftime("@%Y-%m-%d_%H%M%S")

	if isinstance(content, bytes):
		mode += "b"
		with io.open(file, mode) as f:
			write_big(f, content)

	else:
		if not isinstance(content, str):
			content = pprint.pformat(content)

		with io.open(file, mode, encoding="utf-8") as f:
			write_big(f, content)

	if file != original:
		os.replace(file, original)
Exemplo n.º 18
0
def save_yaml(fname: str, data: JSON_TYPE) -> None:
    """Save a YAML file."""
    yaml = YAML(typ='rt')
    yaml.indent(sequence=4, offset=2)
    tmp_fname = fname + "__TEMP__"
    try:
        try:
            file_stat = os.stat(fname)
        except OSError:
            file_stat = stat_result(
                (0o644, -1, -1, -1, -1, -1, -1, -1, -1, -1))
        with open(os.open(tmp_fname, O_WRONLY | O_CREAT | O_TRUNC,
                          file_stat.st_mode), 'w', encoding='utf-8') \
                as temp_file:
            yaml.dump(data, temp_file)
        os.replace(tmp_fname, fname)
        if hasattr(os, 'chown') and file_stat.st_ctime > -1:
            try:
                os.chown(fname, file_stat.st_uid, file_stat.st_gid)
            except OSError:
                pass
    except YAMLError as exc:
        _LOGGER.error(str(exc))
        raise HomeAssistantError(exc)
    except OSError as exc:
        _LOGGER.exception('Saving YAML file %s failed: %s', fname, exc)
        raise WriteError(exc)
    finally:
        if os.path.exists(tmp_fname):
            try:
                os.remove(tmp_fname)
            except OSError as exc:
                # If we are cleaning up then something else went wrong, so
                # we should suppress likely follow-on errors in the cleanup
                _LOGGER.error("YAML replacement cleanup failed: %s", exc)
Exemplo n.º 19
0
def atomicupdate(filepath):
    dirname, basename = os.path.split(filepath)

    if not basename:
        raise ValueError("must name a file")

    basename, ext = os.path.splitext(basename)

    f = _mktemp(dirname, prefix=basename + "-", suffix=ext, delete=False)
    tempfilename = f.name

    try:
        stat = os.stat(filepath)
    except FileNotFoundError:
        os.chmod(tempfilename, 0o644)
    else:
        # Copy user, group and access flags
        os.chown(tempfilename, stat.st_uid, stat.st_gid)
        os.chmod(tempfilename, stat.st_mode)

    try:
        yield f
    except BaseException:
        f.close()
        os.remove(tempfilename)
        raise
    else:
        f.close()

    try:
        os.replace(tempfilename, filepath)
    except OSError:
        os.remove(tempfilename)
        raise
Exemplo n.º 20
0
    def write_two_phases(filename, data, io):
        """
        Writes a file in two phase to the filesystem.

        First write the data to a temporary file (in the same directory) and than renames the temporary file. If the
        file already exists and its content is equal to the data that must be written no action is taken. This has the
        following advantages:
        * In case of some write error (e.g. disk full) the original file is kep in tact and no file with partially data
        is written.
        * Renaming a file is atomic. So, running processes will never read a partially written data.

        :param str filename: The name of the file were the data must be stored.
        :param str data: The data that must be written.
        :param pystratum.style.PyStratumStyle.PyStratumStyle io: The output decorator.
        """
        write_flag = True
        if os.path.exists(filename):
            with open(filename, 'r') as file:
                old_data = file.read()
                if data == old_data:
                    write_flag = False

        if write_flag:
            tmp_filename = filename + '.tmp'
            with open(tmp_filename, 'w+') as file:
                file.write(data)
            os.replace(tmp_filename, filename)
            io.text('Wrote: <fso>{0}</fso>'.format(filename))
        else:
            io.text('File <fso>{0}</fso> is up to date'.format(filename))
Exemplo n.º 21
0
 def move(cls, item, to_collection, to_href):
     if not pathutils.is_safe_filesystem_path_component(to_href):
         raise pathutils.UnsafePathError(to_href)
     os.replace(
         pathutils.path_to_filesystem(
             item.collection._filesystem_path, item.href),
         pathutils.path_to_filesystem(
             to_collection._filesystem_path, to_href))
     cls._sync_directory(to_collection._filesystem_path)
     if item.collection._filesystem_path != to_collection._filesystem_path:
         cls._sync_directory(item.collection._filesystem_path)
     # Move the item cache entry
     cache_folder = os.path.join(item.collection._filesystem_path,
                                 ".Radicale.cache", "item")
     to_cache_folder = os.path.join(to_collection._filesystem_path,
                                    ".Radicale.cache", "item")
     cls._makedirs_synced(to_cache_folder)
     try:
         os.replace(os.path.join(cache_folder, item.href),
                    os.path.join(to_cache_folder, to_href))
     except FileNotFoundError:
         pass
     else:
         cls._makedirs_synced(to_cache_folder)
         if cache_folder != to_cache_folder:
             cls._makedirs_synced(cache_folder)
     # Track the change
     to_collection._update_history_etag(to_href, item)
     item.collection._update_history_etag(item.href, None)
     to_collection._clean_history()
     if item.collection._filesystem_path != to_collection._filesystem_path:
         item.collection._clean_history()
Exemplo n.º 22
0
def update_torrent(client, torrent, new_data, to_save_customs, to_set_customs):
    data_file_path = tools.make_sub_name(torrent.get_path(), ".", ".newdata")
    with open(data_file_path, "wb") as data_file:
        data_file.write(new_data)
    with client_hooks(client, torrent, to_save_customs, to_set_customs):
        os.replace(data_file_path, torrent.get_path())
        torrent.load_from_data(new_data, torrent.get_path())
Exemplo n.º 23
0
def dotranscode(action, args):
    if action.action != 'transcode':
        print('error: dotranscode got non-transcode action:', action)
        return

    # TODO: make sure the directory exists

    # Instead of writing directly to $destpath, write to $destpath.partial,
    # so that if we crash we don't leave partially-encoded files laying around
    tmppath = action.destpath + ".partial"
    cmd = [opusenc_path, '--quiet', '--bitrate', str(args.bitrate), action.filepath, tmppath]
    kwargs = dict(
        stdin=subprocess.DEVNULL,
        stdout=subprocess.DEVNULL,
        stderr=subprocess.PIPE,
    )
    with subprocess.Popen(cmd, **kwargs) as process:
        try:
            _, stderr = process.communicate()
        except:
            process.kill()
            process.wait()
            raise
        returncode = process.poll()

    if returncode != 0:
        print("error: transcode failed:", stderr.decode('utf-8', 'replace').strip())
        print("info: failed command:", " ".join(cmd))
        return

    try:
        os.replace(tmppath, action.destpath)
    except OSError as e:
        print("error: rename failed: %s: %s" % (action.destpath, e))
        return
Exemplo n.º 24
0
def _write_cache():
    # We *could* load the cache here, but calling write_cache indicates an error up the stack
    assert _cache is not None
    with open('cache/index.json.next', 'w') as fp:
        json.dump(_cache, fp, sort_keys=True, indent=0)  # No indent to save some space
    # Hope that os.replace doesn't corrupt the file *and* the git backup.
    os.replace('cache/index.json.next', 'cache/index.json')
Exemplo n.º 25
0
Arquivo: util.py Projeto: dcos/dcos
def write_string(filename, data):
    """
    Write a string to a file.
    Overwrite any data in that file.

    We use an atomic write practice of creating a temporary file and then
    moving that temporary file to the given ``filename``. This prevents race
    conditions such as the file being read by another process after it is
    opened here but not yet written to.

    It also prevents us from creating or truncating a file before we fail to
    write data to it because of low disk space.

    If no file already exists at ``filename``, the new file is created with
    permissions 0o644.
    """
    prefix = os.path.basename(filename)
    tmp_file_dir = os.path.dirname(os.path.realpath(filename))
    fd, temporary_filename = tempfile.mkstemp(prefix=prefix, dir=tmp_file_dir)

    try:
        permissions = os.stat(filename).st_mode
    except FileNotFoundError:
        permissions = 0o644

    try:
        try:
            os.write(fd, data.encode())
        finally:
            os.close(fd)
        os.chmod(temporary_filename, stat.S_IMODE(permissions))
        os.replace(temporary_filename, filename)
    except Exception:
        os.remove(temporary_filename)
        raise
Exemplo n.º 26
0
    def save(self):
      if self.headers['Content-Length'] == None:
        self.my_error(411)
        return
      length = int(self.headers['Content-Length'])

      if self.headers['X-File'] in editable_files:
        filename = self.headers['X-File']
      else:
        self.my_error(403)
        return

      # we're going to be saving really often, so it's likely
      # that if the system crashes it'll be during a write,
      # so make sure writes are atomic
      temp_filename = join(os.path.dirname(abspath(filename)),
        'temp-hilarious-editor-'+create_token()+'.txt~')
      with open(temp_filename, 'wt', encoding='utf-8') as f:
        f.write(self.rfile.read(length).decode('utf-8'))
      os.replace(temp_filename, filename)
      if on_save != None:
        on_save(filename)
      self.send_response(204)
      self.send_header('Content-Type', 'text/plain')
      self.boilerplate_headers()
      self.end_headers()
Exemplo n.º 27
0
    def write_two_phases(the_filename, the_data):
        """
        Writes a file in two phase to the filesystem.

        First write the data to a temporary file (in the same directory) and than renames the temporary file. If the
        file already exists and its content is equal to the data that must be written no action is taken. This has the
        following advantages:
        * In case of some write error (e.g. disk full) the original file is kep in tact and no file with partially data
        is written.
        * Renaming a file is atomic. So, running processes will never read a partially written data.

        :param str the_filename: The name of the file were the data must be stored.
        :param str the_data: The data that must be written.
        """
        write_flag = True
        if os.path.exists(the_filename):
            with open(the_filename, 'r') as file:
                old_data = file.read()
                if the_data == old_data:
                    write_flag = False

        if write_flag:
            tmp_filename = the_filename + '.tmp'
            with open(tmp_filename, 'w+') as file:
                file.write(the_data)
            os.replace(tmp_filename, the_filename)
            print("Wrote: '%s'." % the_filename)
Exemplo n.º 28
0
def set_info(fname, info):
    """
        Writes the magic to the first line that starts with # validated
        or # novalidate. If no such line exists, write to the first line
        of the file
    """

    with open(fname, "r") as fin, tempfile.NamedTemporaryFile(dir=dirname(fname), mode="w", delete=False) as fout:

        found = False
        written = False

        # search for the line first
        for line in fin:
            if line.startswith("# validated") or line.startswith("# novalidate"):
                found = True
                break

        fin.seek(0)

        # Now rewrite the file
        for line in fin:
            if not written:
                if not found:
                    fout.write(info.line)
                    written = True

                elif line.startswith("# validated") or line.startswith("# novalidate"):
                    line = info.line
                    written = True

            fout.write(line)

    os.replace(fout.name, fname)
Exemplo n.º 29
0
    async def _download(self):
        if self._is_downloading:
            return

        self._is_downloading = True
        try:
            result = await extract_info(self.playlist.loop, self.url, download=True)

            unmoved_filename = ytdl.prepare_filename(result)
            unmoved_filename = md5sum(unmoved_filename, 8).join('-.').join(unmoved_filename.rsplit('.',1))

            self.filename = os.path.join(AUDIO_CACHE_PATH, unmoved_filename)

            # Ensure the folder that we're going to move into exists.
            directory = os.path.dirname(self.filename)
            if not os.path.exists(directory):
                os.makedirs(directory)

            # Move the temporary file to it's final location.
            os.replace(ytdl.prepare_filename(result), self.filename)

            # Trigger ready callbacks.
            self._for_each_future(lambda future: future.set_result(self))

        except Exception as e:
            self._for_each_future(lambda future: future.set_exception(e))

        finally:
            self._is_downloading = False
Exemplo n.º 30
0
def write_header(header_text, user_filepath, insert_linenum=0, cut_lines=0):
    """Insert header into `user_filepath` starting at line `insert_linenum`
    (zero based). Removes `cut_lines` amount of lines after the header.
    `cut_lines` is useful for cases where existing header lines need to be
    removed.
    """
    if insert_linenum > HEADER_IN_FIRST_N_LINES:
        raise ValueError(
            (
                "Header should not be written at line {}. It should be placed"
                " near the top of the file."
            ).format(insert_linenum)
        )

    try:
        with open(user_filepath, "rt") as user_file:
            with tempfile.NamedTemporaryFile(
                mode="wt",
                delete=False) as outfile:
                for i, line in enumerate(user_file):
                    if i == insert_linenum:
                        outfile.write(header_text)

                    if i < insert_linenum or i >= insert_linenum + cut_lines:
                        outfile.write(line)

        os.replace(outfile.name, user_filepath)
    except UnicodeDecodeError:
        print((
            "WARNING: File {} is not standard unicode and has been skipped. It"
            " may be a binary."
        ).format(user_filepath))
Exemplo n.º 31
0
def backup_dir(src,bkp):
    print (datetime.datetime.now()," Backing up " + src + " to " + bkp)
    os.makedirs(bkp, mode=0o700, exist_ok=True)
    for root, dirs, files in os.walk(src):
        for name in files:
            os.replace(os.path.join(root, name),os.path.join(bkp, name))
Exemplo n.º 32
0
def get_from_cache(
    url,
    cache_dir=None,
    force_download=False,
    proxies=None,
    etag_timeout=10,
    resume_download=False,
    user_agent=None,
    local_files_only=False,
) -> Optional[str]:
    """
    Given a URL, look for the corresponding file in the local cache.
    If it's not there, download it. Then return the path to the cached file.

    Return:
        None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
        Local path (string) otherwise
    """
    if cache_dir is None:
        cache_dir = TRANSFORMERS_CACHE
    if isinstance(cache_dir, Path):
        cache_dir = str(cache_dir)

    os.makedirs(cache_dir, exist_ok=True)

    etag = None
    if not local_files_only:
        try:
            response = requests.head(url,
                                     allow_redirects=True,
                                     proxies=proxies,
                                     timeout=etag_timeout)
            if response.status_code == 200:
                etag = response.headers.get("ETag")
        except (EnvironmentError, requests.exceptions.Timeout):
            # etag is already None
            pass

    filename = url_to_filename(url, etag)

    # get cache path to put the file
    cache_path = os.path.join(cache_dir, filename)

    # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
    # try to get the last downloaded one
    if etag is None:
        if os.path.exists(cache_path):
            return cache_path
        else:
            matching_files = [
                file
                for file in fnmatch.filter(os.listdir(cache_dir), filename +
                                           ".*")
                if not file.endswith(".json") and not file.endswith(".lock")
            ]
            if len(matching_files) > 0:
                return os.path.join(cache_dir, matching_files[-1])
            else:
                # If files cannot be found and local_files_only=True,
                # the models might've been found if local_files_only=False
                # Notify the user about that
                if local_files_only:
                    raise ValueError(
                        "Cannot find the requested files in the cached path and outgoing traffic has been"
                        " disabled. To enable model look-ups and downloads online, set 'local_files_only'"
                        " to False.")
                return None

    # From now on, etag is not None.
    if os.path.exists(cache_path) and not force_download:
        return cache_path

    # Prevent parallel downloads of the same file with a lock.
    lock_path = cache_path + ".lock"
    with FileLock(lock_path):

        # If the download just completed while the lock was activated.
        if os.path.exists(cache_path) and not force_download:
            # Even if returning early like here, the lock will be released.
            return cache_path

        if resume_download:
            incomplete_path = cache_path + ".incomplete"

            @contextmanager
            def _resumable_file_manager():
                with open(incomplete_path, "a+b") as f:
                    yield f

            temp_file_manager = _resumable_file_manager
            if os.path.exists(incomplete_path):
                resume_size = os.stat(incomplete_path).st_size
            else:
                resume_size = 0
        else:
            temp_file_manager = partial(tempfile.NamedTemporaryFile,
                                        dir=cache_dir,
                                        delete=False)
            resume_size = 0

        # Download to temporary file, then copy to cache dir once finished.
        # Otherwise you get corrupt cache entries if the download gets interrupted.
        with temp_file_manager() as temp_file:
            logger.info(
                "%s not found in cache or force_download set to True, downloading to %s",
                url, temp_file.name)

            http_get(url,
                     temp_file,
                     proxies=proxies,
                     resume_size=resume_size,
                     user_agent=user_agent)

        logger.info("storing %s in cache at %s", url, cache_path)
        os.replace(temp_file.name, cache_path)

        logger.info("creating metadata file for %s", cache_path)
        meta = {"url": url, "etag": etag}
        meta_path = cache_path + ".json"
        with open(meta_path, "w") as meta_file:
            json.dump(meta, meta_file)

    return cache_path
Exemplo n.º 33
0
def cached_path(
    url_or_filename: Union[str, PathLike],
    cache_dir: Union[str, Path] = None,
    extract_archive: bool = False,
    force_extract: bool = False,
) -> str:
    """
    Given something that might be a URL (or might be a local path),
    determine which. If it's a URL, download the file and cache it, and
    return the path to the cached file. If it's already a local path,
    make sure the file exists and then return the path.

    # Parameters

    url_or_filename : `Union[str, Path]`
        A URL or local file to parse and possibly download.

    cache_dir : `Union[str, Path]`, optional (default = `None`)
        The directory to cache downloads.

    extract_archive : `bool`, optional (default = `False`)
        If `True`, then zip or tar.gz archives will be automatically extracted.
        In which case the directory is returned.

    force_extract : `bool`, optional (default = `False`)
        If `True` and the file is an archive file, it will be extracted regardless
        of whether or not the extracted directory already exists.
    """
    if cache_dir is None:
        cache_dir = CACHE_DIRECTORY

    if isinstance(url_or_filename, PathLike):
        url_or_filename = str(url_or_filename)

    file_path: str

    # If we're using the /a/b/foo.zip!c/d/file.txt syntax, handle it here.
    exclamation_index = url_or_filename.find("!")
    if extract_archive and exclamation_index >= 0:
        archive_path = url_or_filename[:exclamation_index]
        file_name = url_or_filename[exclamation_index + 1 :]

        # Call 'cached_path' recursively now to get the local path to the archive itself.
        cached_archive_path = cached_path(archive_path, cache_dir, True, force_extract)
        if not os.path.isdir(cached_archive_path):
            raise ValueError(
                f"{url_or_filename} uses the ! syntax, but does not specify an archive file."
            )

        # Now return the full path to the desired file within the extracted archive,
        # provided it exists.
        file_path = os.path.join(cached_archive_path, file_name)
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"file {file_name} not found within {archive_path}")

        return file_path

    url_or_filename = os.path.expanduser(url_or_filename)
    parsed = urlparse(url_or_filename)

    extraction_path: Optional[str] = None

    if parsed.scheme in ("http", "https", "s3"):
        # URL, so get it from the cache (downloading if necessary)
        file_path = get_from_cache(url_or_filename, cache_dir)

        if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)):
            # This is the path the file should be extracted to.
            # For example ~/.allennlp/cache/234234.21341 -> ~/.allennlp/cache/234234.21341-extracted
            extraction_path = file_path + "-extracted"

    elif os.path.exists(url_or_filename):
        # File, and it exists.
        file_path = url_or_filename

        if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)):
            # This is the path the file should be extracted to.
            # For example model.tar.gz -> model-tar-gz-extracted
            extraction_dir, extraction_name = os.path.split(file_path)
            extraction_name = extraction_name.replace(".", "-") + "-extracted"
            extraction_path = os.path.join(extraction_dir, extraction_name)

    elif parsed.scheme == "":
        # File, but it doesn't exist.
        raise FileNotFoundError(f"file {url_or_filename} not found")

    else:
        # Something unknown
        raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")

    if extraction_path is not None:
        # If the extracted directory already exists (and is non-empty), then no
        # need to extract again unless `force_extract=True`.
        if os.path.isdir(extraction_path) and os.listdir(extraction_path) and not force_extract:
            return extraction_path

        # Extract it.
        with FileLock(file_path + ".lock"):
            shutil.rmtree(extraction_path, ignore_errors=True)

            # We extract first to a temporary directory in case something goes wrong
            # during the extraction process so we don't end up with a corrupted cache.
            tmp_extraction_dir = tempfile.mkdtemp(dir=os.path.split(extraction_path)[0])
            try:
                if is_zipfile(file_path):
                    with ZipFile(file_path, "r") as zip_file:
                        zip_file.extractall(tmp_extraction_dir)
                        zip_file.close()
                else:
                    tar_file = tarfile.open(file_path)
                    tar_file.extractall(tmp_extraction_dir)
                    tar_file.close()
                # Extraction was successful, rename temp directory to final
                # cache directory.
                os.replace(tmp_extraction_dir, extraction_path)
            finally:
                shutil.rmtree(tmp_extraction_dir, ignore_errors=True)

        return extraction_path

    return file_path
Exemplo n.º 34
0
# 7/18/2022 Noelle Crump, to generate Run_All.sh
# this one is for Theobald Hose Stream and runs on batch4

import os
import pandas as pd

input_file_list = pd.read_csv('paramfile.csv',
                              header=0)['theobald_Template.fds']
forestring = '$QFDS $DEBUG $QUEUE -d $INDIR '

f = open('Run_All.sh', 'w')
f.write('#!/bin/bash\n\n')
f.write(
    '# This script runs a set of Validation Cases on a Linux machine with a batch queuing system.\n'
)
f.write('# See the file Validation/Common_Run_All.sh for more information.\n')
f.write(
    'export SVNROOT=`pwd`/../..\nsource $SVNROOT/Validation/Common_Run_All.sh\n\n'
)

for filename in input_file_list:
    f.write(forestring + filename + '\n')
f.write('\necho FDS cases submitted')
f.close()

# Move Run_All.sh  files up to Case Folder
os.replace("Run_All.sh", "../../Run_All.sh")
os.system('chmod +x  ../../Run_All.sh')
Exemplo n.º 35
0
numberNonCompliant = 0
numberFileSkipped = 0

clangFormatCommand = findClangFormat()
with tempfile.TemporaryDirectory(dir=nkt.getNetworKitRoot()) as tempDir:
    files = nkt.getCXXFiles()
    for file in files:
        if not subscribedToFormat(file):
            numberFileSkipped += 1
            continue

        tempFile = os.path.join(tempDir, 'cfOutput')
        runClangFormat(file, tempFile, clangFormatCommand)

        if not filecmp.cmp(file, tempFile, shallow=False):
            numberNonCompliant += 1
            nkt.reportChange(file + " is non-compliant")

            if nkt.doReportDiff():
                nkt.computeAndReportDiff(file, tempFile)

            if not nkt.isReadonly():
                os.replace(tempFile, file)

print(
    "Scanned %d files (skipped %d files without subscription). Non-compliant files: %d."
    % (len(files), numberFileSkipped, numberNonCompliant))

if numberNonCompliant > 0:
    nkt.failIfReadonly(__file__)
Exemplo n.º 36
0
def get_from_cache(
    url: str,
    cache_dir=None,
    force_download=False,
    proxies=None,
    etag_timeout=10,
    resume_download=False,
    user_agent: Union[Dict, str, None] = None,
    local_files_only=False,
) -> Optional[str]:
    """
    Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the
    path to the cached file.

    Return:
        Local path (string) of file or if networking is off, last version of file cached on disk.

    Raises:
        In case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
    """
    if cache_dir is None:
        cache_dir = TRANSFORMERS_CACHE
    if isinstance(cache_dir, Path):
        cache_dir = str(cache_dir)

    os.makedirs(cache_dir, exist_ok=True)

    url_to_download = url
    etag = None
    if not local_files_only:
        try:
            headers = {"user-agent": http_user_agent(user_agent)}
            r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=etag_timeout)
            r.raise_for_status()
            etag = r.headers.get("X-Linked-Etag") or r.headers.get("ETag")
            # We favor a custom header indicating the etag of the linked resource, and
            # we fallback to the regular etag header.
            # If we don't have any of those, raise an error.
            if etag is None:
                raise OSError(
                    "Distant resource does not have an ETag, we won't be able to reliably ensure reproducibility."
                )
            # In case of a redirect,
            # save an extra redirect on the request.get call,
            # and ensure we download the exact atomic version even if it changed
            # between the HEAD and the GET (unlikely, but hey).
            if 300 <= r.status_code <= 399:
                url_to_download = r.headers["Location"]
        except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
            # etag is already None
            pass

    filename = url_to_filename(url, etag)

    # get cache path to put the file
    cache_path = os.path.join(cache_dir, filename)

    # etag is None == we don't have a connection or we passed local_files_only.
    # try to get the last downloaded one
    if etag is None:
        if os.path.exists(cache_path):
            return cache_path
        else:
            matching_files = [
                file
                for file in fnmatch.filter(os.listdir(cache_dir), filename.split(".")[0] + ".*")
                if not file.endswith(".json") and not file.endswith(".lock")
            ]
            if len(matching_files) > 0:
                return os.path.join(cache_dir, matching_files[-1])
            else:
                # If files cannot be found and local_files_only=True,
                # the models might've been found if local_files_only=False
                # Notify the user about that
                if local_files_only:
                    raise ValueError(
                        "Cannot find the requested files in the cached path and outgoing traffic has been"
                        " disabled. To enable model look-ups and downloads online, set 'local_files_only'"
                        " to False."
                    )
                else:
                    raise ValueError(
                        "Connection error, and we cannot find the requested files in the cached path."
                        " Please try again or make sure your Internet connection is on."
                    )

    # From now on, etag is not None.
    if os.path.exists(cache_path) and not force_download:
        return cache_path

    # Prevent parallel downloads of the same file with a lock.
    lock_path = cache_path + ".lock"
    with FileLock(lock_path):

        # If the download just completed while the lock was activated.
        if os.path.exists(cache_path) and not force_download:
            # Even if returning early like here, the lock will be released.
            return cache_path

        if resume_download:
            incomplete_path = cache_path + ".incomplete"

            @contextmanager
            def _resumable_file_manager() -> "io.BufferedWriter":
                with open(incomplete_path, "ab") as f:
                    yield f

            temp_file_manager = _resumable_file_manager
            if os.path.exists(incomplete_path):
                resume_size = os.stat(incomplete_path).st_size
            else:
                resume_size = 0
        else:
            temp_file_manager = partial(tempfile.NamedTemporaryFile, mode="wb", dir=cache_dir, delete=False)
            resume_size = 0

        # Download to temporary file, then copy to cache dir once finished.
        # Otherwise you get corrupt cache entries if the download gets interrupted.
        with temp_file_manager() as temp_file:
            logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)

            http_get(url_to_download, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)

        logger.info("storing %s in cache at %s", url, cache_path)
        os.replace(temp_file.name, cache_path)

        logger.info("creating metadata file for %s", cache_path)
        meta = {"url": url, "etag": etag}
        meta_path = cache_path + ".json"
        with open(meta_path, "w") as meta_file:
            json.dump(meta, meta_file)

    return cache_path
Exemplo n.º 37
0
def recover_dir(src,bkp):
    print (datetime.datetime.now()," Recover from " + bkp + " to " + src)
    for root, dirs, files in os.walk(bkp):
        for name in files:
            os.replace(os.path.join(root, name),os.path.join(src, name))
    os.rmdir(bkp)
Exemplo n.º 38
0
def parse(path, clean):
    # print an erasable parsing message, helps with debugging
    parsingMessage = 'Parsing ' + path + "   "
    print(parsingMessage, end='', flush=True)

    with open(path, "r") as inFile:
        inLines = inFile.readlines()

    found = False
    outLines = []
    i = 0

    while i < len(inLines):
        line = inLines[i]

        # find the next auto-generated block
        if line.startswith("//!!! AUTOGENERATED:"):
            found = True
            outLines.append(line)

            # output the template (unless we're cleaning, in which case we completely leave it out)
            if not clean:
                generate(line, outLines)

            # skip lines until END_AUTOGENERATED
            while True:
                i += 1
                if i == len(inLines):
                    raise Exception(
                        "Missing END_AUTOGENERATED block before end of file!")
                skip = inLines[i]
                if skip.startswith("//!!! AUTOGENERATED"):
                    raise Exception(
                        "Missing END_AUTOGENERATED block before next AUTOGENERATED block!"
                    )
                if skip.startswith("//!!! END_AUTOGENERATED"):
                    line = skip
                    break

        outLines.append(line)
        i += 1

    # erase the parsing message
    print('\r' + ' ' * len(parsingMessage) + '\r', end='')

    # if we didn't find any auto-generated blocks, nothing to do
    if not found:
        return False

    # if the file hasn't change, mention it and nothing to do
    if inLines == outLines:
        #print("No change: " + path)
        return False

    # write the new file
    print("Meta-templates changed: " + path)
    temp = path + ".temp"
    with open(temp, "w", newline='\n') as outFile:
        outFile.writelines(outLines)
    os.replace(temp, path)
    return True
Exemplo n.º 39
0
def open_url(url: str,
             cache_dir: str = None,
             num_attempts: int = 10,
             verbose: bool = True) -> Any:
    """Download the given URL and return a binary-mode file object to access the data."""
    assert is_url(url, allow_file_urls=True)
    assert num_attempts >= 1

    # Handle file URLs.
    if url.startswith('file:///'):
        return open(url[len('file:///'):], "rb")

    # Lookup from cache.
    url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
    if cache_dir is not None:
        cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
        if len(cache_files) == 1:
            return open(cache_files[0], "rb")

    # Download.
    url_name = None
    url_data = None
    with requests.Session() as session:
        if verbose:
            print("Downloading %s ..." % url, end="", flush=True)
        for attempts_left in reversed(range(num_attempts)):
            try:
                with session.get(url) as res:
                    res.raise_for_status()
                    if len(res.content) == 0:
                        raise IOError("No data received")

                    if len(res.content) < 8192:
                        content_str = res.content.decode("utf-8")
                        if "download_warning" in res.headers.get(
                                "Set-Cookie", ""):
                            links = [
                                html.unescape(link)
                                for link in content_str.split('"')
                                if "export=download" in link
                            ]
                            if len(links) == 1:
                                url = requests.compat.urljoin(url, links[0])
                                raise IOError("Google Drive virus checker nag")
                        if "Google Drive - Quota exceeded" in content_str:
                            raise IOError(
                                "Google Drive download quota exceeded -- please try again later"
                            )

                    match = re.search(
                        r'filename="([^"]*)"',
                        res.headers.get("Content-Disposition", ""))
                    url_name = match[1] if match else url
                    url_data = res.content
                    if verbose:
                        print(" done")
                    break
            except:
                if not attempts_left:
                    if verbose:
                        print(" failed")
                    raise
                if verbose:
                    print(".", end="", flush=True)

    # Save to cache.
    if cache_dir is not None:
        safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
        cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
        temp_file = os.path.join(
            cache_dir,
            "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
        os.makedirs(cache_dir, exist_ok=True)
        with open(temp_file, "wb") as f:
            f.write(url_data)
        os.replace(temp_file, cache_file)  # atomic

    # Return data as file object.
    return io.BytesIO(url_data)
Exemplo n.º 40
0
    def run(self, run_id=None):
        """
        Executes a simulation run with the conditions set by the user. (See also set_parameter, set_component_value,
        add_instruction)
        The run_id parameter can be used to override the naming protocol of the log files.
        :return (raw filename, log filename) if simulation is successful else (None, log file name)
        """
        # update number of simulation
        self.runno += 1  # Using internal simulation number in case a run_id is not supplied

        # decide sim required
        if self.netlist is not None:
            # Write the new settings
            run_netlist_file = "%s_%i.net" % (self.circuit_radic, self.runno)
            self.write_netlist(run_netlist_file)
            cmd_run = LTspice_exe + LTspice_arg.get('run') + [run_netlist_file]

            # run the simulation
            start_time = clock_function()
            print(time.asctime(), ": Starting simulation %d" % self.runno)

            # start execution
            retcode = run_function(cmd_run)

            # process the logfile, user can rename it
            dest_log = "%s_%i.log" % (self.circuit_radic, self.runno)
            # print simulation time
            sim_time = time.strftime(
                "%H:%M:%S", time.gmtime(clock_function() - start_time))
            # handle simstate
            if retcode == 0:
                # simulation succesfull
                print(time.asctime() +
                      ": Simulation Successful. Time elapsed %s:%s" %
                      (sim_time, END_LINE_TERM))
                self.write_log("%d%s" % (self.runno, END_LINE_TERM))
                self.okSim += 1
            else:
                # simulation failed
                self.failSim += 1
                # raise exception for try/except construct
                # SRC: https://stackoverflow.com/questions/2052390/manually-raising-throwing-an-exception-in-python
                # raise ValueError(time.asctime() + ': Simulation number ' + str(self.runno) + ' Failed !')
                print(time.asctime() +
                      ": Simulation Failed. Time elapsed %s:%s" %
                      (sim_time, END_LINE_TERM))
                # update failed parameters and counter
                dest_log += 'fail'

            try:
                os.replace(self.circuit_radic + '_run.log', dest_log)
            except FileNotFoundError:
                pass

            if retcode == 0:  # If simulation is successful
                return self.circuit_radic + '_run.raw', dest_log  # Return rawfile and logfile if simulation was OK
            else:
                return None, dest_log
        else:
            # no simulation required
            raise UserWarning('skipping simulation ' + str(self.runno))
Exemplo n.º 41
0
import sys
import csv
import os
import os.path

source = 'config'
ikeinputfile = 'output/' + source + '_ike.csv'
configinputfile = 'output/' + source + '_VPN.csv'
outputpath = 'output/ASA_' + source + '.txt'

if os.path.isfile(outputpath):
    print("file ASA config already there")
    os.replace(outputpath, outputpath + '.old')
    outputfile = outputpath
else:
    outputfile = outputpath


def create_ike_pol(id, ikev, enc, hash, grp, lifetime, ikeout):
    #print("starting ike_pol")
    if ikev == '1':
        print("\ncrypto ikev1 policy", id, file=open(ikeout, 'a'))
        print("authentication pre-share", file=open(ikeout, "a"))
        print("encryption", enc, file=open(ikeout, "a"))
        print("hash", hash, file=open(ikeout, "a"))
        print("group", grp, file=open(ikeout, "a"))
        print("lifetime", lifetime, file=open(ikeout, "a"))
        #print("exit\n",file=open(ikeout,"a"))
    elif ikev == '2':
        print("\ncrypto ikev2 policy", id, file=open(ikeout, "a"))
        #print("authentication pre-share",file=open(ikeout,"a"))
Exemplo n.º 42
0
            if (round(dsf.V[v[0]][v[1]][0], 7), round(dsf.V[v[0]][v[1]][1],
                                                      7)) in changedVertices:
                if dsf.V[v[0]][v[1]][2] != newheight:
                    print(
                        "Vertex at coordinates", dsf.V[v[0]][v[1]][0],
                        dsf.V[v[0]][v[1]][1], "and height",
                        dsf.getElevation(dsf.V[v[0]][v[1]][0],
                                         dsf.V[v[0]][v[1]][1],
                                         dsf.V[v[0]][v[1]][2]),
                        "needs also adjustment!")
                    dsf.V[v[0]][v[1]][2] = newheight
                    count += 1
print(
    count,
    "additional vertices have been changed to new height (only vertices no full triangles)."
)

print("Completed adapting height of vertices now.")
print("Renaming original dsf-file to:", dsffile)
print(
    "CAUTION: If you run this tool again this original file might be overwritten!"
)
try:
    os.replace(dsffile, dsffile + ".org")
except:
    print('Error:', dsffile, 'can not be replaced!')
    sys.exit(1)
print("Applied changes will now be written to:", dsffile)
dsf.write(dsffile)
print("Done.")
Exemplo n.º 43
0
    def load(self,
             dirname,
             callback=None,
             mode=DBMODE_W,
             force_schema_upgrade=False,
             update=True,
             username=None,
             password=None):
        """
        Here we create a sqlite db, and copy the bsddb into it.
        The new db is initially in a new directory, when we finish the copy
        we replace the contents of the original directory with the new db.

        We alway raise an exception to complete this, as the new db still
        needs to be upgraded some more.  When we raise the exception, the new
        db is closed.
        """
        if not update:
            raise DbException("Not Available")
        if not force_schema_upgrade:  # make sure user wants to upgrade
            raise DbSupportedError(_("BSDDB"))

        UpdateCallback.__init__(self, callback)

        # Here we open the dbapi db (a new one) for writing
        new_path = find_next_db_dir()
        os.mkdir(new_path)
        # store dbid in new dir
        dbid = 'sqlite'
        backend_path = os.path.join(new_path, DBBACKEND)
        with open(backend_path, "w", encoding='utf8') as backend_file:
            backend_file.write(dbid)

        super().load(new_path,
                     callback=None,
                     mode='w',
                     force_schema_upgrade=False,
                     username=username,
                     password=password)

        # now read in the bsddb and copy to dpapi
        schema_vers = None
        total = 0
        tables = (('person', 'person'), ('family', 'family'),
                  ('event', 'event'), ('place', 'place'),
                  ('repo', 'repository'), ('source', 'source'),
                  ('citation', 'citation'), ('media', 'media'),
                  ('note', 'note'), ('tag', 'tag'), ('meta_data', 'metadata'))

        # open each dbmap, and get its length for the total
        file_name = os.path.join(dirname, 'name_group.db')
        if os.path.isfile(file_name):
            name_group_dbmap = DB()
            name_group_dbmap.set_flags(DB_DUP)
            name_group_dbmap.open(file_name, 'name_group', DB_HASH, DB_RDONLY)
            total += len(name_group_dbmap)
        else:
            name_group_dbmap = None

        table_list = []
        for old_t, new_t in (tables):

            file_name = os.path.join(dirname, old_t + '.db')
            if not os.path.isfile(file_name):
                continue
            dbmap = DB()
            dbmap.open(file_name, old_t, DB_HASH, DB_RDONLY)
            total += len(dbmap)
            table_list.append((old_t, new_t, dbmap))

        self.set_total(total)
        # copy data from each dbmap to sqlite table
        for old_t, new_t, dbmap in table_list:
            self._txn_begin()
            if new_t == 'metadata':
                sql = ("REPLACE INTO metadata (setting, value) VALUES "
                       "(?, ?)")
            else:
                sql = ("INSERT INTO %s (handle, blob_data) VALUES "
                       "(?, ?)" % new_t)

            for key in dbmap.keys():
                self.update()
                data = pickle.loads(dbmap[key], encoding='utf-8')

                if new_t == 'metadata':
                    if key == b'version':
                        # found a schema version in metadata
                        schema_vers = data
                    elif key == b'researcher':
                        if len(data[0]) == 7:  # Pre-3.3 format
                            # Upgrade researcher data to include a locality
                            # field in the address.
                            addr = tuple([data[0][0], ''] + list(data[0][1:]))
                            new_data = (addr, data[1], data[2], data[3])
                        else:
                            new_data = data
                        data = Researcher().unserialize(new_data)
                    elif key == b'name_formats':
                        # upgrade formats if they were saved in the old way
                        for format_ix in range(len(data)):
                            fmat = data[format_ix]
                            if len(fmat) == 3:
                                fmat = fmat + (True, )
                                data[format_ix] = fmat
                    elif key == b'gender_stats':
                        # data is a dict, containing entries (see GenderStats)
                        self.dbapi.execute("DELETE FROM gender_stats")
                        g_sql = ("INSERT INTO gender_stats "
                                 "(given_name, female, male, unknown) "
                                 "VALUES (?, ?, ?, ?)")
                        for name in data:
                            female, male, unknown = data[name]
                            self.dbapi.execute(g_sql,
                                               [name, female, male, unknown])
                        continue  # don't need this in metadata anymore
                    elif key == b'default':
                        # convert to string and change key
                        if isinstance(data, bytes):
                            data = data.decode('utf-8')
                        key = b'default-person-handle'
                    elif key == b'mediapath':
                        # change key
                        key = b'media-path'
                    elif key in [
                            b'surname_list',  # created by db now
                            b'pevent_names',  # obsolete
                            b'fevent_names'
                    ]:  # obsolete
                        continue
                    elif (b'_names' in key or b'refs' in key
                          or b'_roles' in key or b'rels' in key
                          or b'_types' in key):
                        # These are list, but need to be set
                        data = set(data)

                self.dbapi.execute(sql,
                                   [key.decode('utf-8'),
                                    pickle.dumps(data)])

            # get schema version from file if not in metadata
            if new_t == 'metadata' and schema_vers is None:
                versionpath = os.path.join(dirname, str(SCHVERSFN))
                if os.path.isfile(versionpath):
                    with open(versionpath, "r") as version_file:
                        schema_vers = int(version_file.read().strip())
                else:
                    schema_vers = 0
                # and put schema version into metadata
                self.dbapi.execute(sql, ["version", schema_vers])
            self._txn_commit()
            dbmap.close()
            if new_t == 'metadata' and schema_vers < _MINVERSION:
                raise DbVersionError(schema_vers, _MINVERSION, _DBVERSION)

        if name_group_dbmap:
            self._txn_begin()
            for key in name_group_dbmap.keys():
                self.update()
                # name_group data (grouping) is NOT pickled
                data = name_group_dbmap[key]
                name = key.decode('utf-8')
                grouping = data.decode('utf-8')
                self.dbapi.execute(
                    "INSERT INTO name_group (name, grouping) VALUES (?, ?)",
                    [name, grouping])
            self._txn_commit()
            name_group_dbmap.close()

        # done with new sqlite db, close it.  Cannot use normal close as it
        # overwrites the metadata.
        self._close()
        try:
            clear_lock_file(self.get_save_path())
        except IOError:
            pass
        self.db_is_open = False
        self._directory = None

        # copy tree name to new dir
        old_db_name = os.path.join(dirname, NAME_FILE)
        db_name = os.path.join(new_path, NAME_FILE)
        with open(old_db_name, "r", encoding='utf8') as _file:
            name = _file.read().strip()
        with open(db_name, "w", encoding='utf8') as _file:
            _file.write(name)
        # remove files from old dir
        for filename in os.listdir(dirname):
            file_path = os.path.join(dirname, filename)
            try:
                os.unlink(file_path)
            except Exception as e:
                LOG.error('Failed to delete %s. Reason: %s' % (file_path, e))
        # copy new db files to old dir
        for filename in os.listdir(new_path):
            old_file_path = os.path.join(new_path, filename)
            file_path = os.path.join(dirname, filename)
            try:
                os.replace(old_file_path, file_path)
            except Exception as e:
                LOG.error('Failed to move %s. Reason: %s' % (old_file_path, e))
        os.rmdir(new_path)

        # done preparing new db, but we still need to finish schema upgrades
        raise DbUpgradeRequiredError(schema_vers, 'xx')
Exemplo n.º 44
0
import os
path = "."

# read in all filenames
print("Reading all file names")
file_names = set()
for file in os.listdir(path):
    if file.endswith(".asc") | file.endswith(".prj"):
        file_names.add(file)

# create needed dirs
print("Creating needed directories")
dirs = set()
for file in file_names:
    dirs.add(file[:2])

sorted_dirs = list(dirs)
sorted(sorted_dirs)
for this_dir in sorted_dirs:
    if not os.path.exists(path + "/" + this_dir):
        os.makedirs(path + "/" + this_dir)

# move files
print("Moving files")
for file in file_names:
    os.replace(path + "/" + file, path + "/" + file[:2] + "/" + file)

print("done")
Exemplo n.º 45
0
    docmainfile = cfg[pkg]["docmainfile"]

    # open output file - start with new name, rename at the end - otherwise a file is read and written at the same time!
    outpf = open("%s.tmp" % outf, "w", encoding="utf-8")
    outpf.write(
        "# Complete table of contents\n\n___Do not edit - automatically created from DocMain!___\n\n"
    )

    # open DocMain
    docmain = open(docmainfile, encoding="utf-8")

    # get entries from TOC of DocMain
    toc_entries = gen_pageorder(docmain, outpf)

    # open all referenced MD file in the order given by DocMain
    openmdfiles = gen_open(toc_entries)

    # join all referenced MD files (Linux cat!)
    catopenmdfiles = gen_cat(openmdfiles)

    # get necessary output lines = lines with links from TOC and prepare output for printing
    lnklines = gen_output(catopenmdfiles)

    for l in lnklines:
        outpf.write(l)

    docmain.close()  # don't close to early - generators must work first!
    outpf.close()

    os.replace("%s.tmp" % outf, outf)
Exemplo n.º 46
0
    def save_data(self, file_name, data_dict_to_save):

        # If directory does not exist, create with .temp_model_folder
        if not os.path.exists(self.folder_path):
            os.makedirs(self.folder_path)

        if file_name[-4:] != ".zip":
            file_name += ".zip"

        current_temp_folder = self._get_temp_folder(file_name)

        try:

            data_format = {}
            attribute_to_save_as_json = {}

            for attrib_name, attrib_data in data_dict_to_save.items():

                current_file_path = current_temp_folder + attrib_name

                if isinstance(attrib_data, DataFrame):
                    # attrib_data.to_hdf(current_file_path + ".h5", key="DataFrame", mode='w', append = False, format="table")
                    # Save human readable version as a precaution. Append "." so that it is classified as auxiliary file and not loaded
                    attrib_data.to_csv(current_temp_folder + "." +
                                       attrib_name + ".csv",
                                       index=True)

                    # Using "fixed" as a format causes a PerformanceWarning because it saves types that are not native of C
                    # This is acceptable because it provides the flexibility of using python objects as types (strings, None, etc..)
                    with warnings.catch_warnings():
                        warnings.filterwarnings("ignore")
                        attrib_data.to_hdf(current_file_path + ".h5",
                                           key="DataFrame",
                                           mode='w',
                                           append=False,
                                           format="fixed")

                elif isinstance(attrib_data, sps.spmatrix):
                    sps.save_npz(current_file_path, attrib_data)

                elif isinstance(attrib_data, np.ndarray):
                    # allow_pickle is FALSE to prevent using pickle and ensure portability
                    np.save(current_file_path, attrib_data, allow_pickle=False)

                else:
                    # Try to parse it as json, if it fails and the data is a dictionary, use another zip file
                    try:
                        _ = json.dumps(attrib_data,
                                       default=json_not_serializable_handler)
                        attribute_to_save_as_json[attrib_name] = attrib_data

                    except TypeError:

                        if isinstance(attrib_data, dict):
                            dataIO = DataIO(folder_path=current_temp_folder)
                            dataIO.save_data(file_name=attrib_name,
                                             data_dict_to_save=attrib_data)

                        else:
                            raise TypeError(
                                "Type not recognized for attribute: {}".format(
                                    attrib_name))

            # Save list objects
            if len(data_format) > 0:
                attribute_to_save_as_json[".data_format"] = data_format.copy()

            for attrib_name, attrib_data in attribute_to_save_as_json.items():
                current_file_path = current_temp_folder + attrib_name

                # if self._is_windows and len(current_file_path + ".json") >= self._MAX_PATH_LENGTH_WINDOWS:
                #     current_file_path = "\\\\?\\" + current_file_path

                absolute_path = current_file_path + ".json" if current_file_path.startswith(
                    os.getcwd()) else os.getcwd() + current_file_path + ".json"

                assert not self._is_windows or (self._is_windows and len(absolute_path) <= self._MAX_PATH_LENGTH_WINDOWS), \
                    "DataIO: Path of file exceeds {} characters, which is the maximum allowed under standard paths for Windows.".format(self._MAX_PATH_LENGTH_WINDOWS)

                with open(current_file_path + ".json", 'w') as outfile:
                    if isinstance(attrib_data, dict):
                        attrib_data = self._check_dict_key_type(attrib_data)

                    json.dump(attrib_data,
                              outfile,
                              default=json_not_serializable_handler)

            with zipfile.ZipFile(self.folder_path + file_name + ".temp",
                                 'w',
                                 compression=zipfile.ZIP_DEFLATED) as myzip:
                for file_to_compress in os.listdir(current_temp_folder):
                    myzip.write(current_temp_folder + file_to_compress,
                                arcname=file_to_compress)

            # Replace file only after the new archive has been successfully created
            # Prevents accidental deletion of previous versions of the file if the current write fails
            os.replace(self.folder_path + file_name + ".temp",
                       self.folder_path + file_name)

        except Exception as exec:

            shutil.rmtree(current_temp_folder, ignore_errors=True)
            raise exec

        shutil.rmtree(current_temp_folder, ignore_errors=True)
Exemplo n.º 47
0
#  https://www.youtube.com/watch?v=ve2pmm5JqmI&index=26&list=PL-osiE80TeTt2d9bfVyTiXJA-UTHn6WwU
'''
Python Tutorial: Automate Parsing and Renaming of Multiple Files
'''
import os

#Example1
# os.chdir('F:\Learning\Java\Derek Banas\Java')
# print(os.getcwd())
#
# for f in os.listdir():
#     f_name,f_ext = os.path.splitext(f)  # it will split based text baed on dot. - gives filename and extension separate
#     f_course,f_format,f_general,f_num=f_name.split() # split based on space between file
#     f_num=f_num.zfill(2)  # this will make f_num with 2 digits by padding 0 if necessary
#     new_name = '{}-{}{}'.format(f_num,f_course,f_ext)
#     os.rename(f,new_name)  # this will replace old filename into new file name

#Example2
os.chdir('F:\Learning\Java\Derek Banas')
i = 0
for f in os.listdir():
    i += 1
    f_name, f_space = os.path.splitext(f)
    f_name = '{}{}'.format(f_name, '.mp4')
    os.replace(f, f_name)
Exemplo n.º 48
0
import os
from datetime import datetime

# main_path = os.getcwd() # current working directory
main_path = input(
    "Podaj bezwzględną ścieżkę do katalogu: ")  # the path has to be valid

for root, _, files in os.walk(main_path):
    for file in files:
        date = os.stat(os.path.join(root, file)).st_mtime
        date_str = datetime.fromtimestamp(date).strftime('%Y_%m_%d')
        new_dest = os.path.join(main_path, date_str)
        if not os.path.exists(new_dest):
            os.mkdir(os.path.join(main_path, date_str))
        os.replace(os.path.join(root, file), os.path.join(new_dest, file))
    # break # only one level

print("Zrobione!")
    def upload_file(self):
        """Upload file to server and return response data"""
        payload = None
        try:
            if 'Content-Length' in self.headers:
                try:
                    content_len = int(self.headers['Content-Length'])
                except ValueError:
                    raise HTTPStatusError(HTTP_STATUS["BAD_REQUEST"],
                                          "Wrong parameters")
                if content_len:

                    # create temp file which has constraint of buffer size
                    payload = NamedTemporaryFile(dir=FILES_DIR,
                                                 buffering=CHUNK_SIZE,
                                                 delete=False)

                    # use rfile.read1() instead rfile.read() since
                    # rfile.read() allows to read only the exact
                    # number of bytes
                    while True:
                        copied_bytes = self.rfile.read1(CHUNK_SIZE)
                        payload.write(copied_bytes)
                        if len(copied_bytes) < CHUNK_SIZE:
                            break
                    payload.seek(0)

                    file_hash = sha256_hash_hex(payload)

                    try:
                        mkdir(FILES_DIR)
                    except FileExistsError as err:
                        print(err)

                    file_dir = FILES_DIR + file_hash[:2] + '/'
                    file_path = file_dir + file_hash

                    if not path.exists(file_path):
                        try:
                            mkdir(file_dir)
                        except FileExistsError as err:
                            print(err)
                        # protection against race condition
                        # replace temporary file with name file_path
                        tmp_file = payload.name
                        replace(tmp_file, file_path)

                        data = bytes(file_hash.encode('UTF-8'))
                        content_len = len(data)
                        return ResponseData(
                            status=HTTP_STATUS['CREATED'],
                            content_type='text/plain; charset=utf-8',
                            content_length=content_len,
                            data_stream=data)

                    return ResponseData(status=HTTP_STATUS['OK'])

            raise HTTPStatusError(HTTP_STATUS["BAD_REQUEST"],
                                  "Wrong parameters")

        except OSError as err:
            raise HTTPStatusError(HTTP_STATUS["INTERNAL_SERVER_ERROR"],
                                  str(err))
        finally:
            if payload is not None:
                try:
                    remove(payload.name)
                except OSError as err:
                    if err.errno == 2:
                        pass
                    else:
                        raise HTTPStatusError(
                            HTTP_STATUS["INTERNAL_SERVER_ERROR"], str(err))
Exemplo n.º 50
0
def export(dst_format,
           task_id=None,
           project_id=None,
           server_url=None,
           save_images=False):
    try:
        if task_id is not None:
            db_instance = Task.objects.get(pk=task_id)
            logger = slogger.task[task_id]
            cache_ttl = TASK_CACHE_TTL
            export_fn = task.export_task
        else:
            db_instance = Project.objects.get(pk=project_id)
            logger = slogger.project[project_id]
            cache_ttl = PROJECT_CACHE_TTL
            export_fn = project.export_project

        cache_dir = get_export_cache_dir(db_instance)

        exporter = EXPORT_FORMATS[dst_format]
        output_base = '%s_%s' % ('dataset' if save_images else 'annotations',
                                 make_file_name(to_snake_case(dst_format)))
        output_path = '%s.%s' % (output_base, exporter.EXT)
        output_path = osp.join(cache_dir, output_path)

        instance_time = timezone.localtime(
            db_instance.updated_date).timestamp()
        if isinstance(db_instance, Project):
            tasks_update = list(
                map(
                    lambda db_task: timezone.localtime(db_task.updated_date).
                    timestamp(), db_instance.tasks.all()))
            instance_time = max(tasks_update + [instance_time])
        if not (osp.exists(output_path) and \
                instance_time <= osp.getmtime(output_path)):
            os.makedirs(cache_dir, exist_ok=True)
            with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir:
                temp_file = osp.join(temp_dir, 'result')
                export_fn(db_instance.id,
                          temp_file,
                          dst_format,
                          server_url=server_url,
                          save_images=save_images)
                os.replace(temp_file, output_path)

            archive_ctime = osp.getctime(output_path)
            scheduler = django_rq.get_scheduler()
            cleaning_job = scheduler.enqueue_in(time_delta=cache_ttl,
                                                func=clear_export_cache,
                                                file_path=output_path,
                                                file_ctime=archive_ctime,
                                                logger=logger)
            logger.info(
                "The {} '{}' is exported as '{}' at '{}' "
                "and available for downloading for the next {}. "
                "Export cache cleaning job is enqueued, id '{}'".format(
                    "project" if isinstance(db_instance, Project) else 'task',
                    db_instance.name, dst_format, output_path, cache_ttl,
                    cleaning_job.id))

        return output_path
    except Exception:
        log_exception(logger)
        raise
Exemplo n.º 51
0
for subdir, dirs, files in os.walk(path):
    for file in files:
        if file[4] == '1':  # make sure is speech only
            emotion = int(file[7])
            if emotion == 1 or emotion == 2:  # neutral / calm
                path_paste = path_out + '/0 - Neutral/'
            elif emotion == 3:  # happy
                path_paste = path_out + '/1 - Happy/'
            elif emotion == 4:  # sadness
                path_paste = path_out + '/2 - Sad/'
            elif emotion == 5:  # anger
                path_paste = path_out + '/3 - Anger/'
            elif emotion == 6:  # fear
                path_paste = path_out + '/4 - Fear/'
            elif emotion == 7:  # disgust
                path_paste = path_out + '/5 - Disgust/'
            elif emotion == 8:  # surprise
                path_paste = path_out + '/6 - Surprise/'

        # Criar caminho caso não exista
        if not os.path.exists(path_paste):
            os.makedirs(path_paste)
        # Colar arquivos
        os.replace(subdir + '/' + file, path_paste + file)

# Delete empty folders
shutil.rmtree(path)

# %%
Exemplo n.º 52
0
dates = []
folder_names = []

for k, f in enumerate(files):
    with open(f, 'rb') as o:
        tags = exifreader.process_file(o, details=False)

        if tags.get('Image DateTime'):
            dates.append([f, str(tags['Image DateTime'])])
        elif tags.get('EXIF DateTimeOriginal'):
            dates.append([f, str(tags['EXIF DateTimeOriginal'])])
        else:
            dates.append([f, str(modification_date(f))])

for _, d in dates:
    date = datetime.datetime.strptime(d, "%Y:%m:%d %H:%M:%S")
    folder_names.append('%s_%02d' % (date.year, date.month))

for u in list(set(folder_names)):
    if not os.path.exists(out_dir + u):
        os.mkdir(out_dir + u)

for d in dates:
    move(d)

for dirs in glob.glob(out_dir + '*'):
    for k, file in enumerate(glob.glob(dirs + '/*')):
        newfilename = ('%s/%s.%s' % (os.path.dirname(file), k + 1,
                                     os.path.basename(file).split('.')[1]))
        os.replace(file, newfilename)
Exemplo n.º 53
0
def store_model_weights(model,
                        checkpoint_path,
                        checkpoint_key="model",
                        strict=True):
    """
    This method can be used to prepare weights files for new models. It receives as
    input a model architecture and a checkpoint from the training script and produces
    a file with the weights ready for release.

    Examples:
        from torchvision import models as M

        # Classification
        model = M.mobilenet_v3_large(pretrained=False)
        print(store_model_weights(model, './class.pth'))

        # Quantized Classification
        model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False)
        model.fuse_model(is_qat=True)
        model.qconfig = torch.ao.quantization.get_default_qat_qconfig('qnnpack')
        _ = torch.ao.quantization.prepare_qat(model, inplace=True)
        print(store_model_weights(model, './qat.pth'))

        # Object Detection
        model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False)
        print(store_model_weights(model, './obj.pth'))

        # Segmentation
        model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True)
        print(store_model_weights(model, './segm.pth', strict=False))

    Args:
        model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes.
        checkpoint_path (str): The path of the checkpoint we will load.
        checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored.
            Default: "model".
        strict (bool): whether to strictly enforce that the keys
            in :attr:`state_dict` match the keys returned by this module's
            :meth:`~torch.nn.Module.state_dict` function. Default: ``True``

    Returns:
        output_path (str): The location where the weights are saved.
    """
    # Store the new model next to the checkpoint_path
    checkpoint_path = os.path.abspath(checkpoint_path)
    output_dir = os.path.dirname(checkpoint_path)

    # Deep copy to avoid side-effects on the model object.
    model = copy.deepcopy(model)
    checkpoint = torch.load(checkpoint_path, map_location="cpu")

    # Load the weights to the model to validate that everything works
    # and remove unnecessary weights (such as auxiliaries, etc)
    if checkpoint_key == "model_ema":
        del checkpoint[checkpoint_key]["n_averaged"]
        torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
            checkpoint[checkpoint_key], "module.")
    model.load_state_dict(checkpoint[checkpoint_key], strict=strict)

    tmp_path = os.path.join(output_dir, str(model.__hash__()))
    torch.save(model.state_dict(), tmp_path)

    sha256_hash = hashlib.sha256()
    with open(tmp_path, "rb") as f:
        # Read and update hash string value in blocks of 4K
        for byte_block in iter(lambda: f.read(4096), b""):
            sha256_hash.update(byte_block)
        hh = sha256_hash.hexdigest()

    output_path = os.path.join(output_dir, "weights-" + str(hh[:8]) + ".pth")
    os.replace(tmp_path, output_path)

    return output_path
Exemplo n.º 54
0
 def test_method(self):
     os.replace(os.path.join(".", "WebhookLearning", "views"), os.path.join("..", "views"))
Exemplo n.º 55
0
cache_folders = set()
for root, directories, files in os.walk(os.getcwd()):
    for directory in directories:
        if directory == '__pycache__':
            dir_path = os.path.join(root, directory)
            if 's2protocol' in root:
                print(f'Removing: {dir_path}')
                shutil.rmtree(dir_path)

# Run pyinstaller
os.system(
    'cmd /c "pyinstaller.exe --onefile --noconsole -i=src/OverlayIcon.ico --add-data venv\Lib\site-packages\s2protocol;s2protocol --add-data src;src --add-data SCOFunctions\SC2Dictionaries\*.csv;SCOFunctions\SC2Dictionaries --add-data SCOFunctions\SC2Dictionaries\*.txt;SCOFunctions\SC2Dictionaries SCO.py"'
)

# Move SCO.exe
os.replace('dist/SCO.exe', 'SCO.exe')

# Zip
file_name = f"SC2CoopOverlay (x.x).zip"

to_zip = ['SCO.exe', 'Read me (Github).url']
to_zip.extend([
    f'Layouts/{f}' for f in os.listdir('Layouts')
    if not f in ('custom.css', 'custom.js')
])
to_zip.extend([f'Layouts/Icons/{f}' for f in os.listdir('Layouts/Icons')])
to_zip.extend(
    [f'Layouts/Commanders/{f}' for f in os.listdir('Layouts/Commanders')])
to_zip.extend([
    f'Layouts/Mutator Icons/{f}' for f in os.listdir('Layouts/Mutator Icons')
])
Exemplo n.º 56
0
def update_tabs(df,path,file,matrix,sensor,datum,interval,initials):
    df_block=pd.read_csv('G:/Shared drives/CZN_HydroGroup/Data/data_tables/block.csv')
    
    block_start_time=df.index[0]
    block_end_time=df.index[len(df.index)-1]
    site_id=file.split('_')[0]
    n=len(df['LEVEL'])-1
    process_date=date.today().strftime('%m/%d/%Y')
    quality='P'
    sensor_sn=find_sn(path,file)
    
    blockno=df_block['blockno'].max()+1
    ind1=int(df_block['index2'].max()+1)
    ind2=int(ind1+n)
    dat_type='L'
    unit='M'
    df_block=df_block.append({'blockno':blockno,
                              'site_id':site_id,
                              'start_time':block_start_time,
                              'index1':ind1,
                              'end_time':block_end_time,
                              'index2':ind2,
                              'matrix':matrix,
                              'data_type':dat_type,
                              'sensor':sensor,
                              'sensor_sn':sensor_sn,
                              'unit':unit,
                              'interval':interval,
                              'mp_datum':datum,
                              'accuracy':acc,
                              'drift':np.nan,
                              'process_initials':initials,
                              'process_date':process_date,
                              'quality':quality},ignore_index=True)
    
    df_data=pd.DataFrame()
    df_data['index']=list(range(ind1,ind2+1,1))
    df_data['blockno']=blockno
    df_data['amount']=df['LEVEL_cor'].to_list()
    
    #SC
    dat_type='C'
    unit='US'
    blockno=df_block['blockno'].max()+1
    ind1=int(df_block['index2'].max()+1)
    ind2=int(ind1+n)
    df_block=df_block.append({'blockno':blockno,
                              'site_id':site_id,
                              'start_time':block_start_time,
                              'index1':ind1,
                              'end_time':block_end_time,
                              'index2':ind2,
                              'matrix':matrix,
                              'data_type':dat_type,
                              'sensor':sensor,
                              'sensor_sn':sensor_sn,
                              'unit':unit,
                              'interval':interval,
                              'mp_datum':datum,
                              'accuracy':np.nan,
                              'drift':drift,
                              'process_initials':initials,
                              'process_date':process_date,
                              'quality':quality},ignore_index=True)
    #df_data=pd.DataFrame()
    df_app=pd.DataFrame()
    df_app['index']=list(range(ind1,ind2+1,1))
    df_app['blockno']=blockno
    df_app['amount']=df['SC_cor'].to_list()
    frames=[df_data,df_app]
    df_data=pd.concat(frames)
    
    
    #Temp    
    dat_type='T'
    unit='DC' 
    blockno=df_block['blockno'].max()+1
    ind1=int(df_block['index2'].max()+1)
    ind2=int(ind1+n)
    df_block=df_block.append({'blockno':blockno,
                              'site_id':site_id,
                              'start_time':block_start_time,
                              'index1':ind1,
                              'end_time':block_end_time,
                              'index2':ind2,
                              'matrix':matrix,
                              'data_type':dat_type,
                              'sensor':sensor,
                              'sensor_sn':sensor_sn,
                              'unit':unit,
                              'interval':interval,
                              'mp_datum':datum,
                              'accuracy':np.nan,
                              'drift':np.nan,
                              'process_initials':initials,
                              'process_date':process_date,
                              'quality':quality},ignore_index=True)
    #df_data=pd.DataFrame()
    df_app=pd.DataFrame()
    df_app['index']=list(range(ind1,ind2+1,1))
    df_app['blockno']=blockno
    df_app['amount']=df['TEMPERATURE'].to_list()
    frames=[df_data,df_app]
    df_data=pd.concat(frames)
    
    df_block.to_csv('G:/Shared drives/CZN_HydroGroup/Data/data_tables/block.csv',index=False)
    df_data=df_data.to_csv('G:/Shared drives/CZN_HydroGroup/Data/data_tables/data.csv',mode='a',header=False,index=False)
    #Move processed file to postprocess_files folder    
    df.to_csv(path+file)
    postpath='G:/Shared drives/CZN_HydroGroup/Data/postprocess_files/'
    os.replace(path+file,postpath+file)
Exemplo n.º 57
0
def main(args=None):

    cpu_number = multiprocessing.cpu_count()

    parser = argparse.ArgumentParser(description='Path of networks')
    parser.add_argument('-m', type=str, help='Multiplex')

    args = parser.parse_args(args)
    graph_path = args.m

    ########################################################################
    # Parameters
    ########################################################################

    EMBED_DIMENSION = 128
    CLOSEST_NODES = np.int64(20)
    NUM_SAMPLED = np.int64(3)
    LEARNING_RATE = np.float64(0.01)
    KL = False
    NB_CHUNK = np.int64(1)
    CHUNK_SIZE = np.int64(10)
    NUM_STEPS_1 = np.int64(100 * 10**6 / CHUNK_SIZE)
    graph_name = os.path.basename(graph_path)

    ###################################################################################"
    # MULTIVERSE-M
    ###################################################################################"
    r_readRDS = robjects.r['readRDS']

    print('RWR-M')
    proc = subprocess.Popen(['Rscript',  './RWR/GenerateSimMatrix.R', \
              '-n', "."+ args.m, '-o', \
              '../ResultsRWR/MatrixSimilarityMultiplex'+graph_name, '-c',str(cpu_number)])

    proc.wait()
    pid = proc.pid
    proc.kill()

    print('RWR done')

    r_DistancematrixPPI = r_readRDS('./ResultsRWR/MatrixSimilarityMultiplex' +
                                    graph_name + '.rds')

    gc.collect()

    ########################################################################
    # Processing of the network
    ########################################################################
    reverse_data_DistancematrixPPI, list_neighbours, nodes, data_DistancematrixPPI, nodes_incomponent, neighborhood, nodesstr \
     = f.netpreprocess(r_DistancematrixPPI, graph_path, CLOSEST_NODES)

    ########################################################################
    # Initialization
    ########################################################################
    embeddings = np.random.normal(0, 1, [np.size(nodes), EMBED_DIMENSION])

    ########################################################################
    # Training and saving best embeddings
    ########################################################################

    nodes = np.asarray(nodes)
    embeddings = f.train(neighborhood, nodes, list_neighbours, NUM_STEPS_1, NUM_SAMPLED, LEARNING_RATE, \
                         CLOSEST_NODES, CHUNK_SIZE, NB_CHUNK, embeddings, reverse_data_DistancematrixPPI)

    X = dict(zip(range(embeddings.shape[0]), embeddings))
    X = {str(int(nodesstr[key]) + 1): X[key] for key in X}
    np.save('embeddings_M', X)
    date = datetime.datetime.now()
    os.replace('embeddings_M.npy', './ResultsMultiVERSE/' + 'embeddings_M.npy')

    print('End')
Exemplo n.º 58
0
            if os.path.islink(filepath):
                oldsymlinks.add(filepath)
        for filename in filenames:
            filepath = os.path.join(dirpath, filename)[2:].encode(
                'ascii')  # [2:] is to strip the ./ prefix
            if not os.path.islink(filepath) and not filepath.startswith(
                    b'snapshotindex.txt') and not filepath.startswith(
                        b'raspbmirrordownloads.txt'):
                basefiles.add(filepath)

print('stage 4, moves and deletions')

for filepath in fileupdates:
    print((b'renaming ' + makenewpath(filepath) + b' to ' +
           filepath).decode('ascii'))
    os.replace(makenewpath(filepath), filepath)

for (filepath, symlinktarget) in symlinkupdates:
    print('updating symlink ' + filepath.decode('ascii') + ' -> ' +
          symlinktarget.decode('ascii'))
    os.remove(filepath)
    os.symlink(symlinktarget, filepath)

removedfiles = (basefiles | oldsymlinks) - (set(knownfiles.keys())
                                            | newsymlinks)


def isemptydir(dirpath):
    #scandir would be significantly more efficient, but needs python 3.6 or above
    #which is not reasonable to expect at this time.
    #return os.path.isdir(dirpath) and ((next(os.scandir(dirpath), None)) is None)
Exemplo n.º 59
0
    shutil.rmtree('output')
    shutil.rmtree('output_2')
    shutil.rmtree('output_3')
os.mkdir('output')
os.mkdir('output_2')
os.mkdir('output_3')

os.system('ffmpeg -i '+vidFile+' -f image2 "output/output_%06d.bmp"')
#
# FIRST CALL
#
for (path, dirnames, filenames) in os.walk('output'):
    files.extend(os.path.join(name) for name in filenames)
for file in islice(files, 2, None, 3):
    print('FIRST: '+file)
    os.replace('output/'+file,'output_2/'+file)
files.clear()
time.sleep(2)
#
# SECOND CALL
#
if secondCall == True:
    for (path, dirnames, filenames) in os.walk('output'):
        files.extend(os.path.join(name) for name in filenames)
    for file in islice(files, 2, None, 2):
        print('FIRST: '+file)
        os.replace('output/'+file,'output_2/'+file)

files.clear()
time.sleep(2)
#Number every 3rd (minus them)
Exemplo n.º 60
0
            elif data[begin + bnum:begin + bnum + 1] == b'\n':
                result[data[begin:bnum + begin]] = b''
                break
            elif data[begin + bnum:begin + bnum + 1] == b',':
                result[data[begin:bnum + begin]] = b''
                break
            else:
                bnum = bnum + 1

    for swap in list(result):
        data = data.replace(swap, result[swap])

    wflag = 1
else:
    if args.status:
        print('no')
        sys.exit()

if wflag:
    if args.backup:
        from shutil import copyfile
        if not args.status and existf(filename + '.bak') != 0:
            print('Backing up ' + filename + ' ...')
            copyfile(filename, filename + '.bak')

    with open(filename + '_new', 'wb') as o:
        devnull = o.write(data)
    os.replace(filename + '_new', filename)
    print('\n' + filename + ' patched\n')
else:
    print('\nNo changes made to ' + filename + '\n')