Beispiel #1
0
def snapshot_create(backup_opt_dict):
    if is_windows():
        if backup_opt_dict.vssadmin:
            # Create a shadow copy.
            backup_opt_dict.shadow_path, backup_opt_dict.shadow = \
                vss_create_shadow_copy(backup_opt_dict.windows_volume)

            # execute this after the snapshot creation
            if backup_opt_dict.mode == 'sqlserver':
                start_sql_server(backup_opt_dict)

    else:
        # If lvm_auto_snap is true, the volume group and volume name will
        # be extracted automatically
        if backup_opt_dict.lvm_auto_snap:
            lvm_list = get_lvm_info(
                backup_opt_dict.lvm_auto_snap)
            backup_opt_dict.lvm_volgroup = lvm_list[0]
            backup_opt_dict.lvm_srcvol = lvm_list[2]

        # Generate the lvm_snap if lvm arguments are available
        lvm_snap(backup_opt_dict)

    if is_windows() and backup_opt_dict.vssadmin:
        backup_opt_dict.path_to_backup = use_shadow(
            backup_opt_dict.path_to_backup,
            backup_opt_dict.windows_volume)
    return backup_opt_dict
Beispiel #2
0
    def test_using_guess(self, mock_get_mount_from_path, mock_lvm_guess):
        mock_get_mount_from_path.return_value = '/home/somedir', 'some-snap-path'
        mock_lvm_guess.return_value = 'vg_test', 'lv_test', 'lvm_device'
        mounts = ('/dev/mapper/vg_prova-lv_prova_vol1 /home/pippo ext4 rw,relatime,data=ordered 0 0')
        mocked_open_function = mock_open(read_data=mounts)

        with patch("__builtin__.open", mocked_open_function):
            res = lvm.get_lvm_info('lvm_auto_snap_value')

        expected_result = {'volgroup': 'vg_test',
                           'snap_path': 'some-snap-path',
                           'srcvol': 'lvm_device'}
        self.assertEquals(res, expected_result)
Beispiel #3
0
    def test_using_mount(self, mock_get_mount_from_path, mock_lvm_guess, mock_popen):
        mock_get_mount_from_path.return_value = '/home/somedir', 'some-snap-path'
        mock_lvm_guess.side_effect = [(None, None, None), ('vg_test', 'lv_test', 'lvm_device')]
        mounts = ('/dev/mapper/vg_prova-lv_prova_vol1 /home/pippo ext4 rw,relatime,data=ordered 0 0')
        mocked_open_function = mock_open(read_data=mounts)
        mock_process = Mock()
        mock_process.returncode = 0
        mock_popen.return_value = mock_process
        mock_process.communicate.return_value = '', ''

        with patch("__builtin__.open", mocked_open_function):
            res = lvm.get_lvm_info('lvm_auto_snap_value')

        expected_result = {'volgroup': 'vg_test',
                           'snap_path': 'some-snap-path',
                           'srcvol': 'lvm_device'}
        self.assertEquals(res, expected_result)
Beispiel #4
0
def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict):
    """
    Execute the necessary tasks for file system backup mode
    """

    logging.info('[*] File System backup is being executed...')

    try:

        if is_windows():
            # Create a shadow copy.
            # Create a shadow copy.
            backup_opt_dict.shadow_path, backup_opt_dict.shadow = \
                vss_create_shadow_copy(backup_opt_dict.volume)

        else:
            # If lvm_auto_snap is true, the volume group and volume name will
            # be extracted automatically
            if backup_opt_dict.lvm_auto_snap:
                backup_opt_dict = get_lvm_info(backup_opt_dict)

            # Generate the lvm_snap if lvm arguments are available
            lvm_snap(backup_opt_dict)

        # Generate a string hostname, backup name, timestamp and backup level
        file_name = add_host_name_ts_level(backup_opt_dict, time_stamp)
        meta_data_backup_file = u'tar_metadata_{0}'.format(file_name)
        backup_opt_dict.meta_data_file = meta_data_backup_file

        # Initialize a Queue for a maximum of 2 items
        tar_backup_queue = multiprocessing.Queue(maxsize=2)

        if is_windows():
            backup_opt_dict.absolute_path = backup_opt_dict.src_file
            backup_opt_dict.src_file = use_shadow(backup_opt_dict.src_file,
                                                  backup_opt_dict.volume)

        # Execute a tar gzip of the specified directory and return
        # small chunks (default 128MB), timestamp, backup, filename,
        # file chunk index and the tar meta-data file
        (backup_opt_dict, tar_command, manifest_meta_dict) = \
            gen_tar_command(opt_dict=backup_opt_dict,
                            time_stamp=time_stamp,
                            remote_manifest_meta=manifest_meta_dict)

        tar_backup_stream = multiprocessing.Process(
            target=tar_backup, args=(
                backup_opt_dict, tar_command, tar_backup_queue,))

        tar_backup_stream.daemon = True
        tar_backup_stream.start()

        add_object_stream = multiprocessing.Process(
            target=add_object, args=(
                backup_opt_dict, tar_backup_queue, file_name, time_stamp))
        add_object_stream.daemon = True
        add_object_stream.start()

        tar_backup_stream.join()
        tar_backup_queue.put(({False: False}))
        tar_backup_queue.close()
        add_object_stream.join()

        if add_object_stream.exitcode:
            raise Exception('failed to upload object to swift server')

        (backup_opt_dict, manifest_meta_dict, tar_meta_to_upload,
            tar_meta_prev) = gen_manifest_meta(
                backup_opt_dict, manifest_meta_dict, meta_data_backup_file)

        manifest_file = u''
        meta_data_abs_path = os.path.join(backup_opt_dict.workdir,
                                          tar_meta_prev)

        # Upload swift manifest for segments
        if backup_opt_dict.upload:
            # Request a new auth client in case the current token
            # is expired before uploading tar meta data or the swift manifest
            backup_opt_dict = get_client(backup_opt_dict)

            if not backup_opt_dict.no_incremental:
                # Upload tar incremental meta data file and remove it
                logging.info('[*] Uploading tar meta data file: {0}'.format(
                    tar_meta_to_upload))
                with open(meta_data_abs_path, 'r') as meta_fd:
                    backup_opt_dict.sw_connector.put_object(
                        backup_opt_dict.container, tar_meta_to_upload, meta_fd)
                # Removing tar meta data file, so we have only one
                # authoritative version on swift
                logging.info('[*] Removing tar meta data file: {0}'.format(
                    meta_data_abs_path))
                os.remove(meta_data_abs_path)
            # Upload manifest to swift
            manifest_upload(
                manifest_file, backup_opt_dict, file_name, manifest_meta_dict)

    finally:
        if is_windows():
            # Delete the shadow copy after the backup
            vss_delete_shadow_copy(backup_opt_dict.shadow,
                                   backup_opt_dict.volume)
        else:
            # Unmount and remove lvm snapshot volume
            lvm_snap_remove(backup_opt_dict)
Beispiel #5
0
def backup_mode_fs(backup_opt_dict, time_stamp, manifest_meta_dict):
    """
    Execute the necessary tasks for file system backup mode
    """

    logging.info('[*] File System backup is being executed...')

    try:

        if is_windows():
            # Create a shadow copy.
            # Create a shadow copy.
            backup_opt_dict.shadow_path, backup_opt_dict.shadow = \
                vss_create_shadow_copy(backup_opt_dict.volume)

        else:
            # If lvm_auto_snap is true, the volume group and volume name will
            # be extracted automatically
            if backup_opt_dict.lvm_auto_snap:
                backup_opt_dict = get_lvm_info(backup_opt_dict)

            # Generate the lvm_snap if lvm arguments are available
            lvm_snap(backup_opt_dict)

        # Generate a string hostname, backup name, timestamp and backup level
        file_name = add_host_name_ts_level(backup_opt_dict, time_stamp)
        meta_data_backup_file = u'tar_metadata_{0}'.format(file_name)
        backup_opt_dict.meta_data_file = meta_data_backup_file

        # Initialize a Queue for a maximum of 2 items
        tar_backup_queue = multiprocessing.Queue(maxsize=2)

        if is_windows():
            backup_opt_dict.absolute_path = backup_opt_dict.src_file
            backup_opt_dict.src_file = use_shadow(backup_opt_dict.src_file,
                                                  backup_opt_dict.volume)

        # Execute a tar gzip of the specified directory and return
        # small chunks (default 128MB), timestamp, backup, filename,
        # file chunk index and the tar meta-data file
        (backup_opt_dict, tar_command, manifest_meta_dict) = \
            gen_tar_command(opt_dict=backup_opt_dict,
                            time_stamp=time_stamp,
                            remote_manifest_meta=manifest_meta_dict)

        tar_backup_stream = multiprocessing.Process(target=tar_backup,
                                                    args=(
                                                        backup_opt_dict,
                                                        tar_command,
                                                        tar_backup_queue,
                                                    ))

        tar_backup_stream.daemon = True
        tar_backup_stream.start()

        add_object_stream = multiprocessing.Process(
            target=add_object,
            args=(backup_opt_dict, tar_backup_queue, file_name, time_stamp))
        add_object_stream.daemon = True
        add_object_stream.start()

        tar_backup_stream.join()
        tar_backup_queue.put(({False: False}))
        tar_backup_queue.close()
        add_object_stream.join()

        if add_object_stream.exitcode:
            raise Exception('failed to upload object to swift server')

        (backup_opt_dict, manifest_meta_dict, tar_meta_to_upload,
         tar_meta_prev) = gen_manifest_meta(backup_opt_dict,
                                            manifest_meta_dict,
                                            meta_data_backup_file)

        manifest_file = u''
        meta_data_abs_path = os.path.join(backup_opt_dict.workdir,
                                          tar_meta_prev)

        # Upload swift manifest for segments
        if backup_opt_dict.upload:
            # Request a new auth client in case the current token
            # is expired before uploading tar meta data or the swift manifest
            backup_opt_dict = get_client(backup_opt_dict)

            if not backup_opt_dict.no_incremental:
                # Upload tar incremental meta data file and remove it
                logging.info('[*] Uploading tar meta data file: {0}'.format(
                    tar_meta_to_upload))
                with open(meta_data_abs_path, 'r') as meta_fd:
                    backup_opt_dict.sw_connector.put_object(
                        backup_opt_dict.container, tar_meta_to_upload, meta_fd)
                # Removing tar meta data file, so we have only one
                # authoritative version on swift
                logging.info('[*] Removing tar meta data file: {0}'.format(
                    meta_data_abs_path))
                os.remove(meta_data_abs_path)
            # Upload manifest to swift
            manifest_upload(manifest_file, backup_opt_dict, file_name,
                            manifest_meta_dict)

    finally:
        if is_windows():
            # Delete the shadow copy after the backup
            vss_delete_shadow_copy(backup_opt_dict.shadow,
                                   backup_opt_dict.volume)
        else:
            # Unmount and remove lvm snapshot volume
            lvm_snap_remove(backup_opt_dict)