示例#1
0
 def test_apply_async1(self):
     download = largetransfer.Download('src')
     download._run = mock.MagicMock()
     download.apply_async()
     assert download._read_fd is not None
     assert download._write_fd is not None
     assert download.output is not None
示例#2
0
 def test_stop(self):
     """
     Test stop method when download is not running
     """
     download = largetransfer.Download('src')
     download._kill = mock.MagicMock()
     download.stop()
     assert download._kill.call_count == 0
示例#3
0
 def test_terminate(self):
     """
     Test terminate method then download is not running
     """
     download = largetransfer.Download('src', 'dst')
     download._kill = mock.MagicMock()
     download.terminate()
     assert download._kill.call_count == 0, download._kill.call_count
示例#4
0
 def test__init__default(self):
     download = largetransfer.Download('src')
     assert download.use_pigz is True
     assert download._pool_size == largetransfer.DEFAULT_POOL_SIZE
     assert download._cb_interval == largetransfer.DEFAULT_CALLBACK_INTERVAL
     assert download._manifest is not None
     assert download._callback_thread is None
     assert download._read_fd is None
     assert download._write_fd is None
     assert download.output is None
示例#5
0
def download_file(context):
    sources = [os.path.join(context.remote_dir, source.name) for source in context.sources]
    context.tr = largetransfer.Download(sources, context.tmp_dir, simple=True)
    context.tr.apply_async()
    try:
        context.tr.join()
    except:
        context.error = sys.exc_info()
    context.downloaded_files = [os.path.join(context.tmp_dir, source.src)
                                for source in context.sources]
示例#6
0
def i_download_with_manifest(context):
    manifest_url = context.manifest.cloudfs_path
    context.tr = largetransfer.Download(manifest_url)
    context.tr.apply_async()
    tmp_dir = tempfile.mkdtemp()
    with open(os.path.join(tmp_dir, 'output'), 'wb') as f:
        while True:
            data = context.tr.output.read(4096)
            if not data:
                break
            f.write(data)
    try:
        context.tr.join()
    except:
        context.error = sys.exc_info()
    context.downloaded_files = [os.path.join(tmp_dir, 'output')]
示例#7
0
    def test__chunk_generator2(self, cryptotool_mock):
        file_path1, size1, md5_sum1 = make_file(name='chunk.000', size=2)
        file_path2, size2, md5_sum2 = make_file(name='chunk.001', size=2)
        file_path3, size3, md5_sum3 = make_file(name='chunk.002', size=2)
        manifest = largetransfer.Manifest()
        manifest.data = {
            "version":
            2.0,
            "description":
            '',
            "tags": {},
            "files": [{
                'name':
                'test_name',
                'streamer':
                'xyz',
                'compressor':
                'zyx',
                'created_at':
                None,
                'chunks': [
                    ('chunk.000', md5_sum1, size1),
                    ('chunk.001', md5_sum2, size2),
                    ('chunk.002', md5_sum3, size3),
                ]
            }]
        }
        manifest.cloudfs_path = 'file://' + os.path.join(
            tmp_dir, 'manifest.json')
        manifest.save()

        cryptotool_mock.calculate_md5_sum.return_value = '000'

        download = largetransfer.Download(manifest.cloudfs_path)

        try:
            for chunk, streamer, compressor in download._chunk_generator():
                pass
        except KeyboardInterrupt:
            assert largetransfer.__thread_error__
            assert largetransfer.__thread_error__[
                0] == largetransfer.MD5SumError
        except:
            assert False
        else:
            assert False
示例#8
0
    def test__init__custom(self):
        def progress_cb(progres):
            return

        download = largetransfer.Download('src',
                                          use_pigz=False,
                                          pool_size=1,
                                          progress_cb=progress_cb,
                                          cb_interval=10)
        assert download.use_pigz is False
        assert download._pool_size == 1
        assert download._cb_interval == 10
        assert download._progress_cb == progress_cb
        assert download._manifest is not None
        assert download._callback_thread is None
        assert download._read_fd is None
        assert download._write_fd is None
        assert download.output is None
示例#9
0
    def test_apply_async2(self):
        file_path1, size1, md5_sum1 = make_file(name='chunk.000', size=2)
        file_path2, size2, md5_sum2 = make_file(name='chunk.001', size=2)
        file_path3, size3, md5_sum3 = make_file(name='chunk.002', size=2)
        manifest = largetransfer.Manifest()
        manifest.data = {
            "version":
            2.0,
            "description":
            '',
            "tags": {},
            "files": [{
                'name':
                'test_name',
                'streamer':
                '',
                'compressor':
                '',
                'created_at':
                None,
                'chunks': [
                    ('chunk.000', md5_sum1, size1),
                    ('chunk.001', md5_sum2, size2),
                    ('chunk.002', md5_sum3, size3),
                ]
            }]
        }
        manifest.cloudfs_path = 'file://' + os.path.join(
            tmp_dir, 'manifest.json')
        manifest.save()

        download = largetransfer.Download(manifest.cloudfs_path)
        download.apply_async()
        size = 0
        while True:
            data = download.output.read(1024)
            if not data:
                break
            size += len(data)
        download.join()
        assert size == 2 * 3 * 1024 * 1024, size
        assert download.progress == size
示例#10
0
    def test__chunk_generator1(self):
        file_path1, size1, md5_sum1 = make_file(name='chunk.000', size=2)
        file_path2, size2, md5_sum2 = make_file(name='chunk.001', size=2)
        file_path3, size3, md5_sum3 = make_file(name='chunk.002', size=2)
        manifest = largetransfer.Manifest()
        manifest.data = {
            "version":
            2.0,
            "description":
            '',
            "tags": {},
            "files": [{
                'name':
                'test_name',
                'streamer':
                'xyz',
                'compressor':
                'zyx',
                'created_at':
                None,
                'chunks': [
                    ('chunk.000', md5_sum1, size1),
                    ('chunk.001', md5_sum2, size2),
                    ('chunk.002', md5_sum3, size3),
                ]
            }]
        }
        manifest.cloudfs_path = 'file://' + os.path.join(
            tmp_dir, 'manifest.json')
        manifest.save()

        download = largetransfer.Download(manifest.cloudfs_path)

        i = 0
        try:
            for chunk, streamer, compressor in download._chunk_generator():
                assert os.path.basename(chunk) == 'chunk.00%s' % i, chunk
                assert streamer == 'xyz'
                assert compressor == 'zyx'
                i += 1
        except KeyboardInterrupt:
            assert largetransfer.__thread_error__ is None, largetransfer.__thread_error__
示例#11
0
    def test_apply_async3(self):
        file_path1, size1, md5_sum1 = make_file(name='chunk.000', size=2)
        file_path2, size2, md5_sum2 = make_file(name='chunk.001', size=2)
        file_path3, size3, md5_sum3 = make_file(name='chunk.002', size=2)
        manifest = largetransfer.Manifest()
        manifest.data = {
            "version":
            2.0,
            "description":
            '',
            "tags": {},
            "files": [{
                'name':
                'test_name',
                'streamer':
                'xyz',
                'compressor':
                'zyx',
                'created_at':
                None,
                'chunks': [
                    ('chunk.000', md5_sum1, size1),
                    ('chunk.001', md5_sum2, size2),
                    ('chunk.002', md5_sum3, size3),
                ]
            }]
        }
        manifest.cloudfs_path = 'file://' + os.path.join(
            tmp_dir, 'manifest.json')
        manifest.save()

        download = largetransfer.Download(manifest.cloudfs_path)
        download.apply_async()
        try:
            download.join()
        except largetransfer.TransferError as e:
            assert e.args[0] == 'Exception'
            assert e.args[1] == 'Unsupported compressor: zyx'
        except:
            assert False
        else:
            assert False
示例#12
0
    def _ensure(self):
        # snap should be applied after layout: download and extract data.
        # this could be done on already ensured volume.
        # Example: resync slave data

        if not self._lvm_volume:
            # First of all, merge self config and snapshot config
            self.snap = storage2.snapshot(self.snap) if self.snap else None

            for attr in ('fstype', 'size', 'vg', 'mpoint'):
                if not getattr(self, attr, None):
                    if not self.snap or not getattr(self.snap, attr, None):
                        raise storage2.StorageError('Missing ephemeral volume attribute "%s"' % attr)
                    setattr(self, attr, getattr(self.snap, attr))
            if not (self.disk or self.disks):
                raise storage2.StorageError('Missing "disk" or "disks" attribute')

            if self.disk:
                self.disk = storage2.volume(self.disk)
                # Compatibility with storage v1
                if self.disk.device and self.disk.type == 'base':
                    if self.disk.device.startswith('/dev/sd'):
                        self.disk = storage2.volume(type='ec2_ephemeral', name='ephemeral0')
                    elif 'google' in self.disk.device:
                        self.disk = storage2.volume(type='gce_ephemeral', name='ephemeral-disk-0')

            self._lvm_volume = storage2.volume(
                            type='lvm',
                            pvs=[self.disk] if self.disk else self.disks,
                            size=self.size + 'VG',
                            vg=self.vg,
                            name='data')

        self._lvm_volume.ensure()
        self.device = self._lvm_volume.device
        # To allow ensure(mkfs=True, mount=True) after volume passed
        # scalarizr 1st initialization
        self.fscreated = self.is_fs_created()

        if self.snap:
            self.snap = storage2.snapshot(self.snap)
            # umount device to allow filesystem re-creation
            if self.mounted_to():
                self.umount()
            self.mkfs(force=True)

            tmp_mpoint = not self.mpoint
            if tmp_mpoint:
                tmp_mpoint = tempfile.mkdtemp()
                self.mpoint = tmp_mpoint

            try:
                transfer = largetransfer.Download(self.snap.path, self.mpoint + '/', simple=True)
                self.mount()
                if hasattr(self.snap, 'data_size'):
                    fs_free = coreutils.statvfs(self.mpoint)['avail']
                    if fs_free < int(self.snap.data_size):
                        raise storage2.StorageError('Not enough free space'
                                                    ' on device %s to restore snapshot.' %
                                                    self.device)

                transfer.apply_async()
                try:
                    transfer.join()
                except:
                    err = sys.exc_info()[1]
                    raise storage2.StorageError('Failed to download snapshot data. %s' % err)
            except:
                e = sys.exc_info()[1]
                raise storage2.StorageError("Snapshot restore error: %s" % e)
            finally:
                try:
                    self.umount()
                finally:
                    if tmp_mpoint:
                        self.mpoint = None
                        os.rmdir(tmp_mpoint)

            self.snap = None
示例#13
0
 def test_not_running(self):
     download = largetransfer.Download('src')
     assert not download.running
示例#14
0
    def _run(self):
        # Apply resource's meta
        mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source)
        bak = backup.restore(**mnf.meta)

        incrementals = []
        if bak.backup_type == 'incremental':
            incrementals = [bak]
            while bak.prev_cloudfs_source:
                tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source)
                bak = backup.restore(**tmpmnf.meta)
                if bak.backup_type == 'incremental':
                    incrementals.insert(0, bak)
        self.incrementals = incrementals
        if self.incrementals:
            self.log_file = self.incrementals[-1].log_file
            self.log_pos = self.incrementals[-1].log_pos
        else:
            self.log_file = bak.log_file
            self.log_pos = bak.log_pos

        coreutils.clean_dir(__mysql__['data_dir'])

        LOG.info('Downloading the base backup (LSN: 0..%s)', bak.to_lsn)

        trn = largetransfer.Download(bak.cloudfs_source)
        trn.apply_async()

        streamer = xbstream.args(extract=True, directory=__mysql__['data_dir'])
        streamer.popen(stdin=trn.output)

        trn.join()

        LOG.info('Preparing the base backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     redo_only=True,
                     ibbackup='xtrabackup',
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])

        if incrementals:
            inc_dir = os.path.join(__mysql__['tmp_dir'],
                                   'xtrabackup-restore-inc')
            i = 0
            for inc in incrementals:
                try:
                    os.makedirs(inc_dir)
                    inc = backup.restore(inc)
                    LOG.info(
                        'Downloading incremental backup #%d (LSN: %s..%s)', i,
                        inc.from_lsn, inc.to_lsn)

                    trn = largetransfer.Download(inc.cloudfs_source)
                    trn.apply_async()

                    streamer = xbstream.args(extract=True, directory=inc_dir)
                    streamer.popen(stdin=trn.output)

                    trn.join()

                    LOG.info('Preparing incremental backup #%d', i)
                    innobackupex(__mysql__['data_dir'],
                                 apply_log=True,
                                 redo_only=True,
                                 incremental_dir=inc_dir,
                                 ibbackup='xtrabackup',
                                 user=__mysql__['root_user'],
                                 password=__mysql__['root_password'])
                    i += 1
                finally:
                    coreutils.remove(inc_dir)

        LOG.info('Preparing the full backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])
        coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql')

        self._mysql_init.start()
        if int(__mysql__['replication_master']):
            LOG.info("Master will reset it's binary logs, "
                     "so updating binary log position in backup manifest")
            log_file, log_pos = self._client().master_status()
            meta = mnf.meta
            meta.update({'log_file': log_file, 'log_pos': log_pos})
            mnf.meta = meta
            mnf.save()
示例#15
0
    def test_progress(self):
        assert_flag = multiprocessing.Value('i', 0)
        progress_list = []

        def get(*args, **kwds):
            if kwds['report_to']:
                for i in range(100):
                    kwds['report_to'](i, 0)
                    time.sleep(0.01)
                for i in range(100):
                    if i not in progress_list:
                        break
                else:
                    assert_flag.value = 1
            url = args[-2]
            dst = args[-1]
            return os.path.join(dst, os.path.basename(url))

        cloudfs_types['file'].get = mock.MagicMock(side_effect=get)
        file_path1, size1, md5_sum1 = make_file(name='chunk.000', size=2)
        file_path2, size2, md5_sum2 = make_file(name='chunk.001', size=2)
        file_path3, size3, md5_sum3 = make_file(name='chunk.002', size=2)
        manifest = largetransfer.Manifest()
        manifest.data = {
            "version":
            2.0,
            "description":
            '',
            "tags": {},
            "files": [{
                'name':
                'test_name',
                'streamer':
                '',
                'compressor':
                '',
                'created_at':
                None,
                'chunks': [
                    ('chunk.000', md5_sum1, size1),
                    ('chunk.001', md5_sum2, size2),
                    ('chunk.002', md5_sum3, size3),
                ]
            }]
        }
        manifest.cloudfs_path = 'file://' + os.path.join(
            tmp_dir, 'manifest.json')
        manifest.save()

        def _on_progress(bytes_completed, size):
            progress_list.append(bytes_completed)

        download = largetransfer.Download(manifest.cloudfs_path)
        download._tmp_dir = tmp_dir
        download._on_progress = _on_progress
        download.apply_async()
        while download.output.read(1024):
            pass
        download.join()

        assert assert_flag.value == 1
示例#16
0
 def test_initial_progress(self):
     download = largetransfer.Download('src')
     assert download.progress == 0
示例#17
0
 def test_join(self):
     """
     Test join method then download is not started
     """
     download = largetransfer.Download('src')
     download.join()