def save(self, params={}, metrics={}, remarks="", model=None): unique_id = generate_unique_id() unique_dir = create_dir_if_not_exist(self._unique_dir(unique_id)) model_path = self._model_path(unique_id) code_path = self._code_archival_path(unique_id) if not model: log_message("No model given for saving. Try command"+ \ " 'gitml save -h'.") if not _path_exists(model_path): # Saving the model object as pickle file. model_dump(model, open(model_path, "wb")) if not _path_exists(code_path): self._archive_code(code_path) # Adding state to db. self._create_record(unique_id=unique_id, remarks=remarks, params=params, metrics=metrics) log_message("Iteration saved : %s" % unique_id, tag=True)
def pprint( self, dump_files_path = None, pformat_config = _PrettyFormatConfig( ) ): """ Dumps all loaded data to stdout or to files in a directory. """ if not _path_exists( dump_files_path ): _os.mkdir( dump_files_path, 0o700 ) else: if not _path_is_directory( dump_files_path ): raise IOError( "Not a directory: {0}".format( dump_files_path ) ) if not _os.access( dump_files_path, _os.R_OK | _os.W_OK | _os.X_OK ): raise IOError( "Could not access directory: {0}".format( dump_files_path ) ) tables = self._tables for table in tables.values( ): if None is dump_files_path: table.pprint( tables, pformat_config ) else: dump_file_path = _path_join( dump_files_path, table.FILE_NAME_BASE( ) + _path_extsep + "txt" ) with open( dump_file_path, "w" ) as dump_file: stream_print \ = _functools.partial( print, file = dump_file ) table.pprint( tables, pformat_config, stream_print )
def setup(cls, project_path): """Setup task for creating db files for all model. """ # Create data directory if not exist. data_dir = _path_join(project_path, cls.DATA_DIR) create_dir_if_not_exist(data_dir) # Create db file if not exist. for model_name in cls.MODELS: db_path = _path_join(data_dir, "%s.json" % model_name) if not _path_exists(db_path): open(db_path, "a", "utf-8").close() return data_dir
def restore(self): if not _path_exists(self.stash_path): exit_with_message("No stash found.") contents = listdir(self.stash_path) if len(contents) == 0: exit_with_message("Nothing to restore. Stash is empty.") for content in contents: move(_path_join(self.stash_path, content), self.path) exit_with_message("Stash restored.")
def load_model(self, unique_id): unique_id = unique_id.strip() if not unique_id: raise ValueError("[GitML] Invalid id.") model_path = self._model_path(unique_id) if not _path_exists(model_path): error_msg = "[GitML] Failed loading model." + \ " Invalid iteration id %s." % str(unique_id) raise ValueError(error_msg) # Return model by reading the pickle. return model_load(open(model_path, "rb"))
def commit(self, unique_id): iteration_dir = self._unique_dir(unique_id) if not _path_exists(iteration_dir): raise InvalidIterationException("No such iteration %s" % unique_id) _commit_dir = self._unique_commit_dir(unique_id) if _path_exists(_commit_dir): exit_with_message("Iteration is committed already.") # Moving iteration to commit. _dir_copy(iteration_dir, _commit_dir) # Moving iteration record to commit db. _iteration_record = self._find_by_id(unique_id) self._create_commit_record(_iteration_record) # Clearing iteration. _rmdir(iteration_dir) self._delete_record_by_id(unique_id) self.git.commit_all("Iteration %s" % unique_id) exit_with_message("Iteration committed : %s" % unique_id)
def reuse(self, unique_id): # Code path of iteration by unique id. _object = self._iteration_or_commit(unique_id) if not _object: exit_with_message("No iterations found.") # Returns code path of object which can be an iteration # or a commit. code_path = self._code_archival_path(unique_id, _object) if not _path_exists(code_path): exit_with_message("Not able restore code for iteration.") if not self.workspace.is_empty(): exit_with_message("Workspace is not empty. Please stash your " + \ "changes using 'gitml stash'") # Copies code contents to workspace. _dir_copy_contents(code_path, self.project_path)
def read(self, section, filepath, ignore_missing=False): """ Read `filepath` and make its contents available as `section` :raises ConfigError: if reading or parsing a file fails """ if ignore_missing and not _path_exists(filepath): self._files[section] = filepath else: try: with open(filepath, 'r') as f: string = f.read() except OSError as e: raise errors.ConfigError(f'{filepath}: {e.strerror}') else: cfg = self._parse(section, string, filepath) for subsection_name, subsection in cfg.items(): for option_name, option_value in subsection.items(): try: self._set(section, subsection_name, option_name, option_value) except errors.ConfigError as e: raise errors.ConfigError(f'{filepath}: {e}') self._files[section] = filepath
import brotlicffi as brotli except ImportError: try: import brotli except ImportError: brotli = None try: import certifi except ImportError: certifi = None else: from os.path import exists as _path_exists # The certificate may not be bundled in executable if not _path_exists(certifi.where()): certifi = None try: from Cryptodome.Cipher import AES as Cryptodome_AES except ImportError: try: from Crypto.Cipher import AES as Cryptodome_AES except ImportError: Cryptodome_AES = None else: try: # In pycrypto, mode defaults to ECB. See: # https://www.pycryptodome.org/en/latest/src/vs_pycrypto.html#:~:text=not%20have%20ECB%20as%20default%20mode Cryptodome_AES.new(b'abcdefghijklmnop') except TypeError:
"-A", "--generate-alpha-channel", action="store_true", default=False, ) clargs_parser.add_argument( "trs_file_paths", metavar="FILE", type=str, nargs="+", ) clargs = clargs_parser.parse_args() directory_path = clargs.output_directory_path if not _path_exists(directory_path): os.mkdir(directory_path, 0o700) else: if not _path_is_directory(directory_path): raise IOError("Not a directory: {0}".format(directory_path)) if not os.access(directory_path, os.R_OK | os.W_OK | os.X_OK): raise IOError( "Could not access directory: {0}".format(directory_path)) output_format = clargs.output_format for file_path in clargs.trs_file_paths: # TODO: Notify user which file is being processed. if not os.access(file_path, os.R_OK):
) clargs_parser.add_argument( "-L", "--fluff-lo-bits", action = "store_true", default = False, ) clargs_parser.add_argument( "-A", "--generate-alpha-channel", action = "store_true", default = False, ) clargs_parser.add_argument( "trs_file_paths", metavar = "FILE", type = str, nargs = "+", ) clargs = clargs_parser.parse_args( ) directory_path = clargs.output_directory_path if not _path_exists( directory_path ): os.mkdir( directory_path, 0o700 ) else: if not _path_is_directory( directory_path ): raise IOError( "Not a directory: {0}".format( directory_path ) ) if not os.access( directory_path, os.R_OK | os.W_OK | os.X_OK ): raise IOError( "Could not access directory: {0}".format( directory_path ) ) output_format = clargs.output_format for file_path in clargs.trs_file_paths:
def create(*, content_path, announce, source, torrent_path, init_callback, progress_callback, info_callback, overwrite=False, exclude=(), reuse_torrent_path=None): """ Generate and write torrent file :param str content_path: Path to the torrent's payload :param str announce: Announce URL :param str source: Value of the ``source`` field in the torrent. This makes the torrent unique for each tracker to avoid cross-seeding issues, so it is usually the tracker's abbreviated name. :param str torrent_path: Path of the generated torrent file :param init_callback: Callable that is called once before torrent generation commences. It gets `content_path` as a tree where each node is a tuple in which the first item is the directory name and the second item is a sequence of `(file_name, file_size)` tuples. Example: .. code:: ('Parent', ('Foo', ( ('Picture.jpg', 82489), ('Music.mp3', 5315672), ('More files', ( ('This.txt', 57734), ('And that.txt', 184), ('Also some of this.txt', 88433), )), )), ('Bar', ( ('Yee.mp4', 288489392), ('Yah.mkv', 3883247384), )), ) :param progress_callback: Callable that is called at regular intervals with a :class:`CreateTorrentProgress` object as a positional argument :param info_callback: Callable that is called with an informational message :param bool overwrite: Whether to overwrite `torrent_path` if it exists :param exclude: Sequence of regular expressions that are matched against file system paths. Matching files are not included in the torrent. :param reuse_torrent_path: Path to existing torrent file to get hashed pieces and piece size from. If this is a directory, search it recursively for ``*.torrent`` files and use the first one (in natural sort order) that matches. Non-existing or otherwise unreadable paths as well as falsy values (e.g. ``""`` or `None`) are silently ignored. If this is a sequence, its items are handled as described above. Callbacks can cancel the torrent creation by returning `True` or any other truthy value. :raise TorrentError: if anything goes wrong :return: `torrent_path` """ if not announce: raise errors.TorrentError('Announce URL is empty') if not source: raise errors.TorrentError('Source is empty') torrent = None if overwrite or not _path_exists(torrent_path): # Try to get existing torrent from global cache or `reuse_torrent_path` torrent = _get_cached_torrent( content_path=content_path, exclude=exclude, metadata={ 'trackers': (announce, ), 'source': source, }, reuse_torrent_path=reuse_torrent_path, info_callback=info_callback, ) # `torrent` is `None` if we couldn't find a cached torrent. # `torrent` is `False` if the generation process was cancelled. if torrent is None: # Create pieces hashes torrent = _get_generated_torrent( content_path=content_path, announce=announce, source=source, exclude=exclude, progress_callback=progress_callback, init_callback=init_callback, ) if torrent and torrent.is_ready: # Write generic torrent _store_generic_torrent(torrent) # Write torrent to `torrent_path` try: torrent.write(torrent_path, overwrite=True) except torf.TorfError as e: raise errors.TorrentError(e) else: return torrent_path
def touch(path): if not _path_exists(path): open(path, "w+").close() return path
def create_dir_if_not_exist(dir_path, privilege=0755): if not _path_exists(dir_path): makedirs(dir_path, privilege) return dir_path
def exists_file_dir(cls, path): file_path = _path_join(path, cls.VML_FILE_NAME) dir_path = _path_join(path, cls.VML_DIR_NAME) return (_path_exists(file_path) and _path_exists(dir_path))
def remove(self): if _path_exists(self._file_path): _rmfile(self._file_path) if _path_exists(self._dir_path): _rmdir(self._dir_path)