예제 #1
0
 def write_engine_meta(self, manifest_path, files_meta):
     # Compress meta data file
     # Write meta data to disk as JSON
     with open(manifest_path, 'wb') as manifest_file:
         cmp_meta = compress.one_shot_compress(self.compression_algo,
                                               msgpack.dumps(files_meta))
         manifest_file.write(cmp_meta)
예제 #2
0
 def write_engine_meta(self, manifest_path, files_meta):
     # Compress meta data file
     # Write meta data to disk as JSON
     with open(manifest_path, 'wb') as manifest_file:
         cmp_meta = compress.one_shot_compress(
             self.compression_algo, msgpack.dumps(files_meta))
         manifest_file.write(cmp_meta)
예제 #3
0
파일: rsync.py 프로젝트: mnaser/freezer
    def get_sign_delta(self, fs_path, manifest_path, write_queue):
        """Compute the file or fs tree path signatures.

        :param fs_path:
        :param manifest_path
        :param write_queue:
        :return:
        """

        files_meta = {
            'files': {},
            'directories': {},
            'meta': {
                'broken_links_tot': '',
                'total_files': '',
                'total_directories': '',
                'backup_size_on_disk': 0,
                'backup_size_uncompressed': 0,
                'backup_size_compressed': 0,
                'platform': sys.platform
            },
            'abs_backup_path': os.getcwd(),
            'broken_links': [],
            'rsync_struct_ver': RSYNC_DATA_STRUCT_VERSION,
            'rsync_block_size': RSYNC_BLOCK_SIZE
        }

        # Get old file meta structure or an empty dict if not available
        old_fs_meta_struct = self.get_fs_meta_struct(manifest_path)

        if os.path.isdir(fs_path):
            # If given path is a directory, change cwd to path to backup
            os.chdir(fs_path)
            for root, dirs, files in os.walk(fs_path):
                self.process_file(root, fs_path, files_meta,
                                  old_fs_meta_struct, write_queue)

                # Check if exclude is in filename. If it is, log the file
                # exclusion and continue to the next iteration.
                if self.exclude:
                    files = [
                        name for name in files if self.exclude not in name
                    ]
                    if files:
                        LOG.warning(('Excluding file names matching with: '
                                     '{}'.format(self.exclude)))

                for name in files:
                    file_path = os.path.join(root, name)
                    self.process_file(file_path, fs_path, files_meta,
                                      old_fs_meta_struct, write_queue)
        else:
            self.process_file(fs_path, os.getcwd(), files_meta,
                              old_fs_meta_struct, write_queue)
        if old_fs_meta_struct:
            for rel_path in old_fs_meta_struct['files']:
                if not files_meta['files'].get(rel_path):
                    self.gen_struct_for_deleted_files(files_meta,
                                                      old_fs_meta_struct,
                                                      rel_path, write_queue)

        # Flush any compressed buffered data
        flushed_data = self.compressor.flush()
        if flushed_data:
            flushed_data = self.process_backup_data(flushed_data,
                                                    do_compress=False)
            files_meta['meta']['backup_size_compressed'] += len(flushed_data)
            write_queue.put(flushed_data)

        # General metrics to be uploaded to the API and/or media storage
        files_meta['meta']['broken_links_tot'] = len(
            files_meta['broken_links'])
        files_meta['meta']['total_files'] = len(files_meta['files'])
        files_meta['meta']['total_directories'] = len(
            files_meta['directories'])
        files_meta['meta']['rsync_data_struct_ver'] = RSYNC_DATA_STRUCT_VERSION
        LOG.info("Backup session metrics: {0}".format(files_meta['meta']))

        # Compress meta data file
        # Write meta data to disk as JSON
        compressed_json_meta = compress.one_shot_compress(
            self.compression_algo, json.dumps(files_meta))
        with open(manifest_path, 'wb') as manifest_file:
            manifest_file.write(compressed_json_meta)

        # Put False on the queue so it will be terminated on the other side:
        write_queue.put(False)
예제 #4
0
파일: rsync.py 프로젝트: openstack/freezer
    def get_sign_delta(self, fs_path, manifest_path, write_queue):
        """Compute the file or fs tree path signatures.

        :param fs_path:
        :param manifest_path:
        :param write_queue:
        :return:
        """

        files_meta = {
            'files': {},
            'directories': {},
            'meta': {
                'broken_links_tot': '',
                'total_files': '',
                'total_directories': '',
                'backup_size_on_disk': 0,
                'backup_size_uncompressed': 0,
                'backup_size_compressed': 0,
                'platform': sys.platform
            },
            'abs_backup_path': os.getcwd(),
            'broken_links': [],
            'rsync_struct_ver': RSYNC_DATA_STRUCT_VERSION,
            'rsync_block_size': RSYNC_BLOCK_SIZE}

        # Get old file meta structure or an empty dict if not available
        old_fs_meta_struct = self.get_fs_meta_struct(manifest_path)

        if os.path.isdir(fs_path):
            # If given path is a directory, change cwd to path to backup
            os.chdir(fs_path)
            for root, dirs, files in os.walk(fs_path):
                self.process_file(root, fs_path, files_meta,
                                  old_fs_meta_struct, write_queue)

                # Check if exclude is in filename. If it is, log the file
                # exclusion and continue to the next iteration.
                if self.exclude:
                    files = [name for name in files if
                             self.exclude not in name]
                    if files:
                        LOG.warning(
                            ('Excluding file names matching with: '
                             '{}'.format(self.exclude)))

                for name in files:
                    file_path = os.path.join(root, name)
                    self.process_file(file_path, fs_path,
                                      files_meta, old_fs_meta_struct,
                                      write_queue)
        else:
            self.process_file(fs_path, os.getcwd(), files_meta,
                              old_fs_meta_struct, write_queue)
        if old_fs_meta_struct:
            for rel_path in old_fs_meta_struct['files']:
                if not files_meta['files'].get(rel_path):
                    self.gen_struct_for_deleted_files(
                        files_meta, old_fs_meta_struct, rel_path, write_queue)

        # Flush any compressed buffered data
        flushed_data = self.compressor.flush()
        if flushed_data:
            flushed_data = self.process_backup_data(flushed_data,
                                                    do_compress=False)
            files_meta['meta']['backup_size_compressed'] += len(flushed_data)
            write_queue.put(flushed_data)

        # General metrics to be uploaded to the API and/or media storage
        files_meta['meta']['broken_links_tot'] = len(
            files_meta['broken_links'])
        files_meta['meta']['total_files'] = len(files_meta['files'])
        files_meta['meta']['total_directories'] = len(
            files_meta['directories'])
        files_meta['meta']['rsync_data_struct_ver'] = RSYNC_DATA_STRUCT_VERSION
        LOG.info("Backup session metrics: {0}".format(
            files_meta['meta']))

        # Compress meta data file
        # Write meta data to disk as JSON
        compressed_json_meta = compress.one_shot_compress(
            self.compression_algo, json.dumps(files_meta))
        with open(manifest_path, 'wb') as manifest_file:
            manifest_file.write(compressed_json_meta)

        # Put False on the queue so it will be terminated on the other side:
        write_queue.put(False)