Пример #1
0
    def _do_single_part_upload(self, backup_file):

        fqn = self._fqn(backup_file.backup_path)
        self.log.debug("Starting single part upload of %s to %s:%s",
                       backup_file, self.args.bucket_name, fqn)
        key = self.bucket.new_key(fqn)

        # All meta data fields have to be strings.
        key.update_metadata(self._dict_to_aws_meta(backup_file.serialise()))

        # # Rebuild the MD5 tuple boto makes
        # md5 = (
        #     backup_file.md5,
        #     source_meta["md5_base64"],
        #     source_meta["size"]
        # )
        timing = endpoints.TransferTiming(self.log, fqn,
                                          backup_file.component.stat.size)
        key.set_contents_from_filename(backup_file.file_path,
                                       replace=False,
                                       cb=timing.progress,
                                       num_cb=timing.num_callbacks)

        self.log.debug("Finished single part upload of %s to %s:%s",
                       backup_file, self.args.bucket_name, fqn)
        return fqn
Пример #2
0
    def _do_multi_part_upload(self, backup_file):

        fqn = self._fqn(backup_file.backup_path)
        self.log.debug("Starting multi part upload of %s to %s:%s",
                       backup_file, self.args.bucket_name, fqn)
        # All meta tags must be strings
        metadata = self._dict_to_aws_meta(backup_file.serialise())
        mp = self.bucket.initiate_multipart_upload(fqn, metadata=metadata)

        timing = endpoints.TransferTiming(self.log, fqn,
                                          backup_file.component.stat.size)
        chunk = None
        try:
            # Part numbers must start at 1
            for part, chunk in enumerate(
                    self._chunk_file(backup_file.file_path), 1):

                self.log.debug("Uploading part %s", part)
                try:
                    mp.upload_part_from_file(chunk,
                                             part,
                                             cb=timing.progress,
                                             num_cb=timing.num_callbacks)
                finally:
                    chunk.close()
        except (Exception):
            mp.cancel_upload()
            raise

        mp.complete_upload()
        self.log.debug("Finished multi part upload of %s to %s:%s",
                       backup_file, self.args.bucket_name, fqn)
        return fqn
Пример #3
0
    def restore_file(self, backup_file, dest_prefix):

        src_path = os.path.join(self.args.backup_base, backup_file.backup_path)
        dest_path = os.path.join(dest_prefix, backup_file.restore_path)
        file_util.ensure_dir(os.path.dirname(dest_path))
        self.log.debug("Restoring file %s to %s", src_path, dest_path)

        with endpoints.TransferTiming(self.log, src_path,
                                      backup_file.component.stat.size):
            shutil.copy(src_path, dest_path)
        return dest_path
Пример #4
0
    def backup_file(self, backup_file):

        dest_path = os.path.join(self.args.backup_base,
                                 backup_file.backup_path)
        file_util.ensure_dir(os.path.dirname(dest_path))

        # Store the actual file
        with endpoints.TransferTiming(self.log, dest_path,
                                      backup_file.component.stat.size):
            shutil.copy(backup_file.file_path, dest_path)

        # Store the meta data
        dest_meta_path = dest_path + self._META_SUFFIX
        with open(dest_meta_path, "w") as f:
            f.write(json.dumps(backup_file.serialise()))
        return dest_path
Пример #5
0
    def backup_keyspace(self, ks_backup):
        fqn = self._fqn(ks_backup.backup_path)

        self.log.debug("Starting to store keyspace backup to %s:%s",
                       self.args.bucket_name, fqn)

        json_str = zlib.compress(msgpack.dumps(ks_backup.serialise()))

        with endpoints.TransferTiming(self.log, fqn, len(json_str)):
            self.bucket.put_object(Key=fqn,
                                   Body=json_str,
                                   ContentType='application/binary',
                                   **self.sse_options)

        self.log.debug("Finished storing keyspace backup to %s:%s",
                       self.args.bucket_name, fqn)
        return
Пример #6
0
    def read_keyspace(self, path):

        key_name = path
        fqn = self._fqn(key_name)

        self.log.debug("Starting to read json from %s:%s",
                       self.args.bucket_name, fqn)

        key = self.bucket.get_key(fqn)
        if key is None:
            raise EnvironmentError(errno.ENOENT, fqn)
        timing = endpoints.TransferTiming(self.log, fqn, 0)
        data = json.loads(
            key.get_contents_as_string(cb=timing.progress,
                                       num_cb=timing.num_callbacks))
        self.log.debug("Finished reading json from %s:%s",
                       self.args.bucket_name, fqn)

        return cassandra.KeyspaceBackup.deserialise(data)
Пример #7
0
    def read_keyspace(self, path):
        fqn = self._fqn(path)

        self.log.debug("Starting to read keyspace backup from %s:%s",
                       self.args.bucket_name, fqn)

        with endpoints.TransferTiming(self.log, fqn, 0):
            try:
                body = self.bucket.Object(fqn).get(**self.sse_options)['Body']
            except botocore.exceptions.ClientError as e:
                if e.response['Error']['Code'] == "404":
                    raise EnvironmentError(errno.ENOENT, fqn)

                raise

        data = msgpack.loads(zlib.decompress(body.read()))

        self.log.debug("Finished reading keyspace backup from %s:%s",
                       self.args.bucket_name, fqn)

        return cassandra.KeyspaceBackup.deserialise(data)
Пример #8
0
    def restore_file(self, backup_file, dest_prefix):
        """
        """

        key_name = backup_file.backup_path
        fqn = self._fqn(key_name)
        dest_path = os.path.join(dest_prefix, backup_file.restore_path)
        file_util.ensure_dir(os.path.dirname(dest_path))
        self.log.debug("Starting to restore from %s:%s to %s",
                       self.args.bucket_name, fqn, dest_path)

        key = self.bucket.get_key(fqn)
        if key is None:
            raise EnvironmentError(errno.ENOENT, fqn)
        timing = endpoints.TransferTiming(self.log, fqn,
                                          backup_file.component.stat.size)
        key.get_contents_to_filename(dest_path,
                                     cb=timing.progress,
                                     num_cb=timing.num_callbacks)

        return dest_path
Пример #9
0
    def backup_keyspace(self, ks_backup):

        key_name = ks_backup.backup_path
        fqn = self._fqn(key_name)

        self.log.debug("Starting to store json to %s:%s",
                       self.args.bucket_name, fqn)

        # TODO: Overwrite ?
        key = self.bucket.new_key(fqn)
        json_str = json.dumps(ks_backup.serialise())
        timing = endpoints.TransferTiming(self.log, fqn, len(json_str))
        key.set_contents_from_string(
            json_str,
            headers={'Content-Type': 'application/json'},
            cb=timing.progress,
            num_cb=timing.num_callbacks)

        self.log.debug("Finished storing json to %s:%s", self.args.bucket_name,
                       fqn)
        return
Пример #10
0
    def backup_file(self, backup_file):

        fqn = self._fqn(backup_file.backup_path)
        self.log.debug("Starting upload of %s to %s:%s", backup_file,
                       self.args.bucket_name, fqn)

        num_retries = 5
        for attempt in xrange(num_retries):
            timing = endpoints.TransferTiming(self.log, fqn,
                                              backup_file.component.stat.size)

            try:
                self.bucket.upload_file(backup_file.file_path,
                                        fqn,
                                        ExtraArgs=dict(
                                            Metadata=self._dict_to_aws_meta(
                                                backup_file.serialise()),
                                            **self.sse_options),
                                        Callback=timing.progress,
                                        Config=self.transfer_config)

                break
            except boto3.exceptions.S3UploadFailedError as e:
                # retry "Request Timeout (408)" errors only, other errors
                # are retried by boto3
                if 'Request Timeout' not in str(e):
                    raise

                if attempt == num_retries - 1:
                    raise

                self.log.warn(
                    "Retrying (attempt %d) exception %s while uploading %s to %s:%s",
                    attempt, e, backup_file, self.args.bucket_name, fqn)

        self.log.debug("Finished upload of %s to %s:%s", backup_file,
                       self.args.bucket_name, fqn)
        return fqn
Пример #11
0
    def restore_file(self, backup_file, dest_prefix):
        fqn = self._fqn(backup_file.backup_path)
        dest_path = os.path.join(dest_prefix, backup_file.restore_path)
        file_util.ensure_dir(os.path.dirname(dest_path))

        self.log.debug("Starting to restore from %s:%s to %s",
                       self.args.bucket_name, fqn, dest_path)

        timing = endpoints.TransferTiming(self.log, fqn,
                                          backup_file.component.stat.size)
        try:
            self.bucket.download_file(fqn,
                                      backup_file.file_path,
                                      ExtraArgs=self.sse_options,
                                      Callback=timing.progress,
                                      Config=self.transfer_config)
        except botocore.exceptions.ClientError as e:
            if e.response['Error']['Code'] == "404":
                raise EnvironmentError(errno.ENOENT, fqn)

            raise

        return dest_path