Exemplo n.º 1
0
    def load(self, optimizer, step):
        layers = self._client.list_objects(
            bucket_name=self.config.bucket_name, prefix=f"{step}/", recursive=True
        )

        logger.info("optimizer with {} is loaded".format(str(step)))
        self._load_optimizer(step, layers, optimizer)
Exemplo n.º 2
0
    def save(self, optimizer, scheduler=None):
        if not self._client.bucket_exists(self.config.bucket_name):
            self._client.make_bucket(
                self.config.bucket_name, location=self.config.region
            )

        step = self._get_step(optimizer)
        if not step:
            logger.error(
                "{} {} step({})is not exist".format(
                    self.config.optimizer_name, self.config.additional, str(step)
                )
            )
            return

        if step in self.config.metadata["optimizer"]:
            logger.info(
                "{} {} is already exist, so optimizer will be overwrited.".format(
                    self.config.optimizer_name, str(self.config.additional)
                )
            )
            self._save_with_clear(step, optimizer, overwrite=True)
        else:
            self._set_metadata(
                metadata=self.config.metadata, optimizer=optimizer, step=step
            )
            self._save_with_clear(step, optimizer)

        if scheduler:
            self._set_scheduler(
                metadata=self.config.metadata, scheduler=scheduler, step=step
            )

        logger.info("optimizer with {} is saved".format(str(step)))
Exemplo n.º 3
0
    def _check_bucket(self):
        """
        Check bucket name is exist. If not exist, create new bucket
        If bucket and metadata sub folder exist, get metadata(attributes, compressor) from there.

        """
        _client = (Minio(self.endpoint,
                         access_key=self.access_key,
                         secret_key=self.secret_key,
                         secure=self.secure,
                         region=self.region)
                   if not check_nas(self.endpoint) else NAS(self.endpoint))
        if _client.bucket_exists(self.bucket_name):
            try:
                _metadata = _client.get_object(self.bucket_name,
                                               "metadata.json")
            except:
                _client.remove_bucket(self.bucket_name)
                raise FileNotFoundError(
                    "metadata.json is not in bucket name {}"
                    ", So this bucket will be removed".format(
                        self.bucket_name))

            metadata_dict = json.loads(_metadata.read().decode("utf-8"))
            if self.endpoint != metadata_dict["endpoint"]:
                raise ValueError(
                    "Already created endpoint({}) doesn't current endpoint str({})"
                    " It may occurs permission denied error".format(
                        metadata_dict["endpoint"], self.endpoint))

            self.compressor = metadata_dict["compressor"]
            self.metadata = metadata_dict
        else:
            logger.info("{} {} is not exist!".format(self.optimizer_name,
                                                     str(self.additional)))
Exemplo n.º 4
0
    def load(self, model, **kwargs):
        if not isinstance(kwargs, dict):
            metadata = 0
        else:
            metadata = kwargs

        model_folder = self._hashmap_transfer(metadata)

        layers = self._client.list_objects(
            bucket_name=self.config.bucket_name,
            prefix=f"{model_folder}/",
            recursive=True,
        )

        logger.info("model with {} is loaded".format(str(metadata)))
        return self._load_model(model_folder, layers, model)
Exemplo n.º 5
0
    def _init_download(self):
        """
        Download all object from bucket with multi thread.
        cache to `_object_file_mapper` downloaded object paths.

        """
        _client = self._create_client()
        _downloader = Downloader(
            client=_client,
            bucket=self.config.bucket_name,
            num_worker_threads=self.num_worker_threads,
        )

        _remote_files = list(self.merged_indexer.values()) + list(self.merged_filetype)
        for _remote_file in _remote_files:
            if not check_nas(self.config.endpoint):
                _local_file = tempfile.mktemp(_remote_file)
                if _remote_file not in self._object_file_mapper:
                    self._object_file_mapper[_remote_file] = _local_file
                    _downloader.set_queue(
                        local_file=_local_file, remote_file=_remote_file
                    )
            else:
                if _remote_file not in self._object_file_mapper:
                    self._object_file_mapper[_remote_file] = os.path.join(
                        self.config.endpoint, self.config.bucket_name, _remote_file
                    )
        _downloader.join_queue()

        assert len(self._object_file_mapper) == (len(self.merged_indexer) + len(self.merged_filetype))

        if not os.path.exists(self.cache_path):
            with open(self.cache_path, "w") as f:
                json.dump(self._object_file_mapper, f)
            logger.info(
                "All {} {} datasets are downloaded done.".format(
                    self.config.dataset_name, str(self.config.additional)
                )
            )
Exemplo n.º 6
0
    def save(self, model, **kwargs):
        if not self._client.bucket_exists(self.config.bucket_name):
            self._client.make_bucket(self.config.bucket_name,
                                     location=self.config.region)

        if not isinstance(kwargs, dict):
            metadata = 0
        else:
            metadata = kwargs

        model_folder = self._hashmap_transfer(metadata)

        if model_folder in self.config.metadata["model"]:
            logger.info(
                "{} {} is already exist, so model will be overwrited.".format(
                    self.config.model_name, str(self.config.additional)))
            self._save_with_clear(model_folder, model, overwrite=True)
        else:
            self.config.metadata["model"].update({model_folder: metadata})
            self._save_with_clear(model_folder, model)

        logger.info("model with {} is saved".format(str(metadata)))