Exemplo n.º 1
0
 def add_stream(self, stream, package_name, headers=None):
     """
     :param stream: data
     :param package_name: path
     :param headers: backup metadata information
     :return:
     """
     tmpdir = self._create_tempdir()
     LOG.info('add stream')
     try:
         split = package_name.rsplit('/', 1)
         # create backup_basedir
         backup_basedir = "{0}/{1}".format(self.storage_path,
                                           package_name)
         self.create_dirs(backup_basedir)
         # define backup_data_name
         backup_basepath = "{0}/{1}".format(backup_basedir,
                                            split[0])
         backup_metadata = "%s/metadata" % backup_basedir
         # write backup to backup_basepath
         data_backup = utils.path_join(tmpdir, "data_backup")
         with open(data_backup, 'wb') as backup_file:
             for el in stream:
                 backup_file.write(el)
         self.put_file(data_backup, backup_basepath)
         # write data matadata to backup_metadata
         metadata = utils.path_join(tmpdir, "metadata")
         with open(metadata, 'wb') as backup_meta:
             backup_meta.write(json.dumps(headers))
         self.put_file(metadata, backup_metadata)
     finally:
         shutil.rmtree(tmpdir)
Exemplo n.º 2
0
 def find_all(self, hostname_backup_name):
     backups = []
     backup_dir = utils.path_join(self.storage_directory,
                                  hostname_backup_name)
     self.create_dirs(backup_dir)
     timestamps = self.listdir(backup_dir)
     for timestamp in timestamps:
         increments = \
             self.listdir(utils.path_join(backup_dir, timestamp))
         backups.extend(base.Backup.parse_backups(increments, self))
     return backups
Exemplo n.º 3
0
 def find_all(self, hostname_backup_name):
     backups = []
     backup_dir = utils.path_join(self.storage_directory,
                                  hostname_backup_name)
     self.create_dirs(backup_dir)
     timestamps = self.listdir(backup_dir)
     for timestamp in timestamps:
         increments = \
             self.listdir(utils.path_join(backup_dir, timestamp))
         backups.extend(base.Backup.parse_backups(increments, self))
     return backups
Exemplo n.º 4
0
    def write_backup(self, rich_queue, backup):
        """
        Stores backup in storage
        :type rich_queue: freezer.streaming.RichQueue
        :type backup: freezer.storage.base.Backup
        """
        try:
            tmpdir = tempfile.mkdtemp()
        except Exception:
            LOG.error("Unable to create a tmp directory")
            raise

        try:
            data_meta = utils.path_join(tmpdir, "data_meta")
            LOG.info("ftp write data_meta %s" % data_meta)
            backup = backup.copy(storage=self)
            path = backup.data_path
            self.create_dirs(path.rsplit('/', 1)[0])

            with open(data_meta, mode='wb') as b_file:
                for message in rich_queue.get_messages():
                    b_file.write(message)

            self.put_file(data_meta, path)
        finally:
            shutil.rmtree(tmpdir)
Exemplo n.º 5
0
    def backup_to_file_path(self, backup):
        """

        :param backup:
        :type backup: freezer.storage.base.Backup
        :return:
        """
        return utils.path_join(self._zero_backup_dir(backup), backup)
Exemplo n.º 6
0
    def backup_to_file_path(self, backup):
        """

        :param backup:
        :type backup: freezer.storage.base.Backup
        :return:
        """
        return utils.path_join(self._zero_backup_dir(backup), backup)
Exemplo n.º 7
0
 def _zero_backup_dir(self, backup):
     """
     :param backup:
     :type backup: freezer.storage.base.Backup
     :return:
     """
     return utils.path_join(self.storage_directory,
                            backup.hostname_backup_name,
                            backup.full_backup.timestamp)
Exemplo n.º 8
0
 def rmtree(self, path):
     files = self.ftp.listdir(path=path)
     for f in files:
         filepath = utils.path_join(path, f)
         if self._is_dir(filepath):
             self.rmtree(filepath)
         else:
             self.ftp.remove(filepath)
     self.ftp.rmdir(path)
Exemplo n.º 9
0
 def _zero_backup_dir(self, backup):
     """
     :param backup:
     :type backup: freezer.storage.base.Backup
     :return:
     """
     return utils.path_join(self.storage_directory,
                            backup.hostname_backup_name,
                            backup.full_backup.timestamp)
Exemplo n.º 10
0
 def rmtree(self, path):
     files = self.ftp.listdir(path=path)
     for f in files:
         filepath = utils.path_join(path, f)
         if self._is_dir(filepath):
             self.rmtree(filepath)
         else:
             self.ftp.remove(filepath)
     self.ftp.rmdir(path)
Exemplo n.º 11
0
    def upload_meta_file(self, backup, meta_file):
        """

        :param backup:
        :type backup: freezer.storage.base.Backup
        :param meta_file:
        :return:
        """
        zero_backup = self._zero_backup_dir(backup)
        to_path = utils.path_join(zero_backup, backup.tar())
        self.put_file(meta_file, to_path)
Exemplo n.º 12
0
    def upload_meta_file(self, backup, meta_file):
        """

        :param backup:
        :type backup: freezer.storage.base.Backup
        :param meta_file:
        :return:
        """
        zero_backup = self._zero_backup_dir(backup)
        to_path = utils.path_join(zero_backup, backup.tar())
        self.put_file(meta_file, to_path)
Exemplo n.º 13
0
 def download_meta_file(self, backup):
     """
     Downloads meta_data to work_dir of previous backup.
     :type backup: freezer.storage.base.Backup
     :param backup: A backup or increment. Current backup is incremental,
     that means we should download tar_meta for detection new files and
     changes. If backup.tar_meta is false, raise Exception
     :return:
     """
     utils.create_dir(self.work_dir)
     if backup.level == 0:
         return utils.path_join(self.work_dir, backup.tar())
     meta_backup = backup.full_backup.increments[backup.level - 1]
     if not meta_backup.tar_meta:
         raise ValueError('Latest update have no tar_meta')
     to_path = utils.path_join(self.work_dir, meta_backup.tar())
     if os.path.exists(to_path):
         os.remove(to_path)
     meta_backup.storage.get_file(
         meta_backup.storage.meta_file_abs_path(meta_backup), to_path)
     return to_path
Exemplo n.º 14
0
 def download_meta_file(self, backup):
     """
     Downloads meta_data to work_dir of previous backup.
     :type backup: freezer.storage.base.Backup
     :param backup: A backup or increment. Current backup is incremental,
     that means we should download tar_meta for detection new files and
     changes. If backup.tar_meta is false, raise Exception
     :return:
     """
     utils.create_dir(self.work_dir)
     if backup.level == 0:
         return utils.path_join(self.work_dir, backup.tar())
     meta_backup = backup.full_backup.increments[backup.level - 1]
     if not meta_backup.tar_meta:
         raise ValueError('Latest update have no tar_meta')
     to_path = utils.path_join(self.work_dir, meta_backup.tar())
     if os.path.exists(to_path):
         os.remove(to_path)
     meta_backup.storage.get_file(
         meta_backup.storage.meta_file_abs_path(meta_backup), to_path)
     return to_path
Exemplo n.º 15
0
 def rmtree(self, path):
     LOG.info("ftp rmtree path=%s" % path)
     files = []
     self.ftp.dir(path, files.append)
     LOG.info('rm files=%s' % files)
     for f in files:
         attr = f.split()[0]
         file_name = f.split()[-1]
         filepath = utils.path_join(path, file_name)
         if attr.startswith('d'):
             self.rmtree(filepath)
         else:
             self.ftp.delete(filepath)
     self.ftp.rmd(path)
Exemplo n.º 16
0
 def backup_blocks(self, backup):
     LOG.info("ftp backup_blocks ")
     self.init()
     # should recreate ssh for new process
     tmpdir = self._create_tempdir()
     try:
         data = utils.path_join(tmpdir, "data")
         LOG.info("backup_blocksa datadown=%s" % data)
         self.get_file(backup.data_path, data)
         with open(data, 'rb') as backup_file:
             while True:
                 chunk = backup_file.read(self.max_segment_size)
                 if chunk == '':
                     break
                 if len(chunk):
                     yield chunk
     finally:
         shutil.rmtree(tmpdir)
Exemplo n.º 17
0
 def __init__(self,
              engine,
              hostname_backup_name,
              level_zero_timestamp,
              timestamp,
              level,
              storage=None):
     """
     :type storage: freezer.storage.physical.PhysicalStorage
     :param hostname_backup_name: name (hostname_backup_name) of backup
     :type hostname_backup_name: str
     :param timestamp: timestamp of backup (when it was executed)
     :type timestamp: int
     :param level: current incremental level of backup
     :type level: int
     :param tar_meta: Is backup has or has not an attached meta
     tar file in storage. Default = False
     :type tar_meta: bool
     :return:
     """
     self.hostname_backup_name = hostname_backup_name
     self.timestamp = timestamp
     self.level = level
     self.engine = engine
     self.storage = storage
     self.level_zero_timestamp = level_zero_timestamp
     if storage:
         self.increments_data_path = utils.path_join(
             self.storage.storage_path, "data", self.engine.name,
             self.hostname_backup_name, self.level_zero_timestamp)
         self.increments_metadata_path = utils.path_join(
             self.storage.storage_path, "metadata", self.engine.name,
             self.hostname_backup_name, self.level_zero_timestamp)
         self.data_prefix_path = utils.path_join(
             self.increments_data_path,
             "{0}_{1}".format(self.level, self.timestamp))
         self.engine_metadata_path = utils.path_join(
             self.data_prefix_path, "engine_metadata")
         self.metadata_path = utils.path_join(
             self.increments_metadata_path,
             "{0}_{1}".format(self.level, self.timestamp), "metadata")
         self.data_path = utils.path_join(self.data_prefix_path, "data")
         self.segments_path = utils.path_join(self.data_prefix_path,
                                              "segments")
Exemplo n.º 18
0
 def __init__(self, engine, hostname_backup_name,
              level_zero_timestamp, timestamp, level, storage=None):
     """
     :type storage: freezer.storage.physical.PhysicalStorage
     :param hostname_backup_name: name (hostname_backup_name) of backup
     :type hostname_backup_name: str
     :param timestamp: timestamp of backup (when it was executed)
     :type timestamp: int
     :param level: current incremental level of backup
     :type level: int
     :return:
     """
     self.hostname_backup_name = hostname_backup_name
     self.timestamp = timestamp
     self.level = level
     self.engine = engine
     self.storage = storage
     self.level_zero_timestamp = level_zero_timestamp
     if storage:
         self.increments_data_path = utils.path_join(
             self.storage.storage_path, "data", self.engine.name,
             self.hostname_backup_name, self.level_zero_timestamp)
         self.increments_metadata_path = utils.path_join(
             self.storage.storage_path, "metadata", self.engine.name,
             self.hostname_backup_name, self.level_zero_timestamp)
         self.data_prefix_path = utils.path_join(
             self.increments_data_path,
             "{0}_{1}".format(self.level, self.timestamp))
         self.engine_metadata_path = utils.path_join(
             self.data_prefix_path, "engine_metadata")
         self.metadata_path = utils.path_join(
             self.increments_metadata_path,
             "{0}_{1}".format(self.level, self.timestamp), "metadata")
         self.data_path = utils.path_join(self.data_prefix_path, "data")
         self.segments_path = utils.path_join(self.data_prefix_path,
                                              "segments")
Exemplo n.º 19
0
 def meta_file_abs_path(self, backup):
     zero_backup = self._zero_backup_dir(backup)
     return utils.path_join(zero_backup, backup.tar())
Exemplo n.º 20
0
    def _create_image(self, path, restore_from_timestamp):
        """
        :param path:
        :param restore_from_timestamp:
        :type restore_from_timestamp: int
        :return:
        """
        backup = self._get_backups(path, restore_from_timestamp)
        if self.storage.type == 'swift':
            swift = self.client_manager.get_swift()
            path = "{0}_segments/{1}/{2}".format(self.container, path, backup)
            stream = swift.get_object(self.container,
                                      "{}/{}".format(path, backup),
                                      resp_chunk_size=10000000)
            length = int(stream[0]["x-object-meta-length"])
            data = utils.ReSizeStream(stream[1], length, 1)
            info = stream[0]
            image = self.client_manager.create_image(
                name="restore_{}".format(path),
                container_format="bare",
                disk_format="raw",
                data=data)
            return info, image
        elif self.storage.type == 's3':
            if self.storage.get_object_prefix() != '':
                base_path = "{0}/{1}/{2}".format(
                    self.storage.get_object_prefix(), path, backup)
            else:
                base_path = "{0}/{1}".format(path, backup)
            image_file = "{0}/{1}".format(base_path, path)
            s3_object = self.storage.get_object(
                bucket_name=self.storage.get_bucket_name(), key=image_file)
            stream = utils.S3ResponseStream(data=s3_object['Body'],
                                            chunk_size=10000000)
            data = utils.ReSizeStream(stream, s3_object['ContentLength'], 1)
            metadata = "{0}/metadata".format(base_path)
            metadata_object = self.storage.get_object(
                bucket_name=self.storage.get_bucket_name(), key=metadata)
            info = json.load(metadata_object['Body'])

            image = self.client_manager.create_image(
                name="restore_{}".format(path),
                container_format="bare",
                disk_format="raw",
                data=data)
            return info, image
        elif self.storage.type == 'local':
            image_file = "{0}/{1}/{2}/{3}".format(self.container, path, backup,
                                                  path)
            metadata_file = "{0}/{1}/{2}/metadata".format(
                self.container, path, backup)
            try:
                data = open(image_file, 'rb')
            except Exception:
                msg = "Failed to open image file {}".format(image_file)
                LOG.error(msg)
                raise BaseException(msg)
            info = json.load(open(metadata_file, 'r'))
            image = self.client_manager.create_image(
                name="restore_{}".format(path),
                container_format="bare",
                disk_format="raw",
                data=data)
            return info, image
        elif self.storage.type == 'ssh':
            image_file = "{0}/{1}/{2}/{3}".format(self.container, path, backup,
                                                  path)
            metadata_file = "{0}/{1}/{2}/metadata".format(
                self.container, path, backup)
            try:
                data = self.storage.open(image_file, 'rb')
            except Exception:
                msg = "Failed to open remote image file {}".format(image_file)
                LOG.error(msg)
                raise BaseException(msg)
            info = json.loads(self.storage.read_metadata_file(metadata_file))
            image = self.client_manager.create_image(
                name="restore_{}".format(path),
                container_format="bare",
                disk_format="raw",
                data=data)
            return info, image
        elif self.storage.type in ['ftp', 'ftps']:
            image_file = "{0}/{1}/{2}/{3}".format(self.container, path, backup,
                                                  path)
            metadata_file = "{0}/{1}/{2}/metadata".format(
                self.container, path, backup)
            try:
                tmpdir = tempfile.mkdtemp()
            except Exception:
                LOG.error("Unable to create a tmp directory")
                raise
            try:
                data_image = utils.path_join(tmpdir, "data_image")
                LOG.info('create image restore ftp storage')
                self.storage.get_file(image_file, data_image)
                data_meta = utils.path_join(tmpdir, "data_meta")
                self.storage.get_file(metadata_file, data_meta)
                data = open(data_image, 'rb')
                info = json.load(open(data_meta, 'r'))
                image = self.client_manager.create_image(
                    name="restore_{}".format(path),
                    container_format="bare",
                    disk_format="raw",
                    data=data)
                return info, image
            finally:
                shutil.rmtree(tmpdir)
        else:
            return {}
Exemplo n.º 21
0
 def meta_file_abs_path(self, backup):
     zero_backup = self._zero_backup_dir(backup)
     return utils.path_join(zero_backup, backup.tar())
Exemplo n.º 22
0
 def metadata_path(self, engine, hostname_backup_name):
     return utils.path_join(self.storage_path, "metadata", engine.name,
                            hostname_backup_name)
Exemplo n.º 23
0
 def metadata_path(self, engine, hostname_backup_name):
     return utils.path_join(self.storage_path, "metadata", engine.name,
                            hostname_backup_name)
Exemplo n.º 24
0
    def backup(self, backup_resource, hostname_backup_name, no_incremental,
               max_level, always_level, restart_always_level, queue_size=2):
        """
        Here we now location of all interesting artifacts like metadata
        Should return stream for storing data.
        :return: stream
        """
        prev_backup = self.storage.previous_backup(
            engine=self,
            hostname_backup_name=hostname_backup_name,
            no_incremental=no_incremental,
            max_level=max_level,
            always_level=always_level,
            restart_always_level=restart_always_level
        )

        try:
            tmpdir = tempfile.mkdtemp()
        except Exception:
            LOG.error("Unable to create a tmp directory")
            raise

        try:
            engine_meta = utils.path_join(tmpdir, "engine_meta")
            freezer_meta = utils.path_join(tmpdir, "freezer_meta")
            if prev_backup:
                prev_backup.storage.get_file(prev_backup.engine_metadata_path,
                                             engine_meta)
            timestamp = utils.DateTime.now().timestamp
            level_zero_timestamp = (prev_backup.level_zero_timestamp
                                    if prev_backup else timestamp)
            backup = base.Backup(
                engine=self,
                hostname_backup_name=hostname_backup_name,
                level_zero_timestamp=level_zero_timestamp,
                timestamp=timestamp,
                level=(prev_backup.level + 1 if prev_backup else 0)
            )

            input_queue = streaming.RichQueue(queue_size)
            read_except_queue = queue.Queue()
            write_except_queue = queue.Queue()

            read_stream = streaming.QueuedThread(
                self.backup_stream,
                input_queue,
                read_except_queue,
                kwargs={"backup_resource": backup_resource,
                        "manifest_path": engine_meta})

            write_stream = streaming.QueuedThread(
                self.storage.write_backup,
                input_queue,
                write_except_queue,
                kwargs={"backup": backup})

            read_stream.daemon = True
            write_stream.daemon = True
            read_stream.start()
            write_stream.start()
            read_stream.join()
            write_stream.join()

            # queue handling is different from SimpleQueue handling.
            def handle_except_queue(except_queue):
                if not except_queue.empty():
                    while not except_queue.empty():
                        e = except_queue.get_nowait()
                        LOG.critical('Engine error: {0}'.format(e))
                    return True
                else:
                    return False

            got_exception = None
            got_exception = (handle_except_queue(read_except_queue) or
                             got_exception)
            got_exception = (handle_except_queue(write_except_queue) or
                             got_exception)

            if got_exception:
                raise engine_exceptions.EngineException(
                    "Engine error. Failed to backup.")

            with open(freezer_meta, mode='wb') as b_file:
                b_file.write(json.dumps(self.metadata(backup_resource)))
            self.storage.put_metadata(engine_meta, freezer_meta, backup)
        finally:
            shutil.rmtree(tmpdir)