Example #1
0
    def _run(self):
        # ..
        LOG.debug("Creating tmpfs...")
        self._tranzit_vol.size = int(self.chunk_size *
                                     self._transfer.num_workers * 1.2)
        self._tranzit_vol.ensure(mkfs=True)
        LOG.debug("Creating tmpfs...")
        try:
            res = self._transfer.run()
            LOG.debug("self._transfer finished")

            if not self._up:
                if self._restorer:
                    LOG.debug("waiting restorer to finish...")
                    self._restorer.join()
                return res

            elif self._up:
                if res["failed"] or self._killed:
                    if self.manifest:
                        # TODO: get rid of the duplicate delete
                        self.manifest.delete()
                    return
                if self.multipart:
                    return res["multipart_result"]
                else:
                    return self.manifest
        finally:
            LOG.debug("Destroying tmpfs")
            self._tranzit_vol.destroy()
            coreutils.remove(self._tranzit_vol.mpoint)
Example #2
0
    def on_before_host_up(self, message):
        LOG.debug("on_before_host_up")
        """
        Configure MySQL __mysql__['behavior']
        @type message: scalarizr.messaging.Message
        @param message: HostUp message
        """

        self.generate_datadir()
        self.mysql.service.stop('Configuring MySQL')

        # On Debian/GCE we've got 'Another MySQL daemon already running with the same unix socket.'
        socket_file = mysql2_svc.my_print_defaults('mysqld').get('socket')
        if socket_file:
            coreutils.remove(socket_file)

        if 'Amazon' == linux.os['name']:
            self.mysql.my_cnf.pid_file = os.path.join(__mysql__['data_dir'], 'mysqld.pid')

        repl = 'master' if int(__mysql__['replication_master']) else 'slave'
        bus.fire('before_mysql_configure', replication=repl)
        if repl == 'master':
            self._init_master(message)
        else:
            self._init_slave(message)
        # Force to resave volume settings
        __mysql__['volume'] = storage2.volume(__mysql__['volume'])
        bus.fire('service_configured', service_name=__mysql__['behavior'],
                        replication=repl, preset=self.initial_preset)
Example #3
0
    def _run(self):
        # ..
        LOG.debug("Creating tmpfs...")
        self._tranzit_vol.size = int(self.chunk_size * self._transfer.num_workers * 1.2)
        self._tranzit_vol.ensure(mkfs=True)
        LOG.debug("Creating tmpfs...")
        try:
            res = self._transfer.run()
            LOG.debug("self._transfer finished")

            if not self._up:
                if self._restorer:
                    LOG.debug("waiting restorer to finish...")
                    self._restorer.join()
                return res

            elif self._up:
                if res["failed"] or self._killed:
                    if self.manifest:
                        # TODO: get rid of the duplicate delete
                        self.manifest.delete()
                    return
                if self.multipart:
                    return res["multipart_result"]
                else:
                    return self.manifest
        finally:
            LOG.debug("Destroying tmpfs")
            self._tranzit_vol.destroy()
            coreutils.remove(self._tranzit_vol.mpoint)
Example #4
0
 def remove_systemd_generated_mount_unit(mpoint):
     unit_name = '{}.mount'.format(mpoint.replace('/', '-')[1:])
     logger.debug('Removing systemd runtime unit %s', unit_name)
     coreutils.remove(
         '/run/systemd/generator/local-fs.target.wants/{}'.format(
             unit_name))
     coreutils.remove('/run/systemd/generator/{}'.format(unit_name))
Example #5
0
 def _install_download_only(self, name_version, download_dir, **kwds):
     partial_dir = os.path.join(download_dir, 'partial')
     if not os.path.exists(partial_dir):
         os.makedirs(partial_dir)
     cmd = 'install --download-only -o=Dir::Cache::Archives={0} {1}'.format(
             download_dir, name_version)
     self.apt_get_command(cmd, raise_exc=True)
     # remove all not *.deb 
     for name in os.listdir(download_dir):
         if not name.endswith('.deb'):
             coreutils.remove(os.path.join(download_dir, name))
Example #6
0
 def save(self):
     if self.cloudfs_path:
         cfs = cloudfs(urlparse.urlparse(self.cloudfs_path).scheme)
         source = tempfile.mkstemp()[1] + '.json'
         self.write(source)
         try:
             cfs.put(source, self.cloudfs_path)
         finally:
             coreutils.remove(source)
     elif self.filename:
         self.write(self.filename)
Example #7
0
 def save(self):
     if self.cloudfs_path:
         cfs = cloudfs(urlparse.urlparse(self.cloudfs_path).scheme)
         source = tempfile.mkstemp()[1] + '.json'
         self.write(source)
         try:
             cfs.put(source, self.cloudfs_path)
         finally:
             coreutils.remove(source)
     elif self.filename:
         self.write(self.filename)
Example #8
0
 def _install_download_only(self, name_version, download_dir, **kwds):
     partial_dir = os.path.join(download_dir, 'partial')
     if not os.path.exists(partial_dir):
         os.makedirs(partial_dir)
     cmd = 'install --download-only -o=Dir::Cache::Archives={0} {1}'.format(
         download_dir, name_version)
     self.apt_get_command(cmd, raise_exc=True)
     # remove all not *.deb
     for name in os.listdir(download_dir):
         if not name.endswith('.deb'):
             coreutils.remove(os.path.join(download_dir, name))
Example #9
0
 def __init__(self, filename=None, cloudfs_path=None):
     self.reset()
     if filename:
         self.read(filename)
         self.filename = filename
     elif cloudfs_path:
         cfs = cloudfs(urlparse.urlparse(cloudfs_path).scheme)
         target_dir = tempfile.mkdtemp()
         cfs.get(cloudfs_path, target_dir)
         try:
             self.read(os.path.join(target_dir, os.path.basename(cloudfs_path)))
             self.cloudfs_path = cloudfs_path
         finally:
             coreutils.remove(target_dir)
Example #10
0
    def restore_backup(self, name, backup_id):
        def dpkg_configure(raise_exc=False):
            cmd = ('dpkg', '--configure', '-a')
            return linux.system(cmd, raise_exc=raise_exc)

        packages = linux.system(("dpkg-query", "-W", "-f=${Status}|${Package}\n"))[0].strip().split('\n')
        problem_packages = (package.split('|')[1] for package in packages
                            if package.split('|')[0] in ('install ok unpacked', 'install ok half-configured'))

        # delete postinst scripts of problem packages
        for name in problem_packages:
            postinst_path = '/var/lib/dpkg/info/{0}.postinst'.format(name)
            coreutils.remove(postinst_path)

        dpkg_configure(raise_exc=True)

        return super(AptPackageMgr, self).restore_backup(name, backup_id)
Example #11
0
	def _run(self):
		LOG.debug("Creating tmpfs")
		self._tranzit_vol.size = int(self.chunk_size * self._transfer.num_workers * 1.1)
		self._tranzit_vol.ensure(mkfs=True)
		try:
			res = self._transfer.run()

			if self.direction == self.DOWNLOAD:
				self._restorer.join()
			elif self.direction == self.UPLOAD:
				if self.multipart:
					return res["multipart_result"]
				else:
					return self._upload_res
		finally:
			LOG.debug("Destroying tmpfs")
			self._tranzit_vol.destroy()
			coreutils.remove(self._tranzit_vol.mpoint)
Example #12
0
    def restore_backup(self, name, backup_id):
        def dpkg_configure(raise_exc=False):
            cmd = ('dpkg', '--configure', '-a')
            return linux.system(cmd, raise_exc=raise_exc)

        error_re = re.compile(r'dpkg: error processing ([^\s]+)')
        problem_packages = []
        for line in dpkg_configure()[1].splitlines():
            m = error_re.match(line)
            if m:
                problem_packages.append(m.group(1))

        # delete postinst scripts of problem packages
        for name in problem_packages:
            postinst_path = '/var/lib/dpkg/info/{0}.postinst'.format(name)
            coreutils.remove(postinst_path)

        dpkg_configure(raise_exc=True)

        return super(AptPackageMgr, self).restore_backup(name, backup_id)
Example #13
0
    def restore_backup(self, name, backup_id):
        def dpkg_configure(raise_exc=False):
            cmd = ('dpkg', '--configure', '-a')
            return linux.system(cmd, raise_exc=raise_exc)     

        error_re = re.compile(r'dpkg: error processing ([^\s]+)')
        problem_packages = []
        for line in dpkg_configure()[1].splitlines():
            m = error_re.match(line)
            if m:
                problem_packages.append(m.group(1))

        # delete postinst scripts of problem packages
        for name in problem_packages:
            postinst_path = '/var/lib/dpkg/info/{0}.postinst'.format(name)
            coreutils.remove(postinst_path)

        dpkg_configure(raise_exc=True)

        return super(AptPackageMgr, self).restore_backup(name, backup_id)
Example #14
0
    def _install(self, packages_path, metadata):
        # Restore system state if we have half-configured packages before install
        def dpkg_configure(raise_exc=False):
            cmd = ('dpkg', '--configure', '-a')
            return system(cmd, raise_exc=raise_exc)

        packages = system(("dpkg-query", "-W",
                           "-f=${Status}|${Package}\n"))[0].strip().split('\n')
        problem_packages = (
            package.split('|')[1] for package in packages
            if package.split('|')[0] in ('install ok unpacked',
                                         'install ok half-configured'))

        # delete postinst scripts of problem packages
        for name in problem_packages:
            postinst_path = '/var/lib/dpkg/info/{0}.postinst'.format(name)
            coreutils.remove(postinst_path)

        dpkg_configure(raise_exc=True)

        packages = tuple(metadata.get('packages'))
        system(('dpkg', '-i', '--force-downgrade') + packages,
               cwd=packages_path,
               exc_class=DpkgError)
Example #15
0
 def before_rebundle(self):
     rulename = '70-persistent-net.rules'
     coreutils.remove('/tmp/' + rulename)
     if os.path.exists('/etc/udev/rules.d/' + rulename):
         shutil.move('/etc/udev/rules.d/' + rulename, '/tmp')
Example #16
0
    def _run(self):
        # Apply resource's meta
        mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source)
        bak = backup.restore(**mnf.meta)

        incrementals = []
        if bak.backup_type == "incremental":
            incrementals = [bak]
            while bak.prev_cloudfs_source:
                tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source)
                bak = backup.restore(**tmpmnf.meta)
                if bak.backup_type == "incremental":
                    incrementals.insert(0, bak)
        self.incrementals = incrementals
        if self.incrementals:
            self.log_file = self.incrementals[-1].log_file
            self.log_pos = self.incrementals[-1].log_pos
        else:
            self.log_file = bak.log_file
            self.log_pos = bak.log_pos

        coreutils.clean_dir(__mysql__["data_dir"])

        LOG.info("Downloading the base backup (LSN: 0..%s)", bak.to_lsn)
        trn = cloudfs.LargeTransfer(
            bak.cloudfs_source,
            __mysql__["data_dir"],
            streamer=xbstream.args(extract=True, directory=__mysql__["data_dir"]),
        )
        trn.run()

        LOG.info("Preparing the base backup")
        innobackupex(
            __mysql__["data_dir"],
            apply_log=True,
            redo_only=True,
            ibbackup="xtrabackup",
            user=__mysql__["root_user"],
            password=__mysql__["root_password"],
        )

        if incrementals:
            inc_dir = os.path.join(__mysql__["tmp_dir"], "xtrabackup-restore-inc")
            i = 0
            for inc in incrementals:
                try:
                    os.makedirs(inc_dir)
                    inc = backup.restore(inc)
                    LOG.info("Downloading incremental backup #%d (LSN: %s..%s)", i, inc.from_lsn, inc.to_lsn)
                    trn = cloudfs.LargeTransfer(
                        inc.cloudfs_source, inc_dir, streamer=xbstream.args(extract=True, directory=inc_dir)
                    )

                    trn.run()  # todo: Largetransfer should support custom decompressor proc
                    LOG.info("Preparing incremental backup #%d", i)
                    innobackupex(
                        __mysql__["data_dir"],
                        apply_log=True,
                        redo_only=True,
                        incremental_dir=inc_dir,
                        ibbackup="xtrabackup",
                        user=__mysql__["root_user"],
                        password=__mysql__["root_password"],
                    )
                    i += 1
                finally:
                    coreutils.remove(inc_dir)

        LOG.info("Preparing the full backup")
        innobackupex(
            __mysql__["data_dir"], apply_log=True, user=__mysql__["root_user"], password=__mysql__["root_password"]
        )
        coreutils.chown_r(__mysql__["data_dir"], "mysql", "mysql")

        self._mysql_init.start()
        if int(__mysql__["replication_master"]):
            LOG.info("Master will reset it's binary logs, " "so updating binary log position in backup manifest")
            log_file, log_pos = self._client().master_status()
            meta = mnf.meta
            meta.update({"log_file": log_file, "log_pos": log_pos})
            mnf.meta = meta
            mnf.save()
Example #17
0
    def _run(self):
        # Apply resource's meta
        mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source)
        bak = backup.restore(**mnf.meta)

        incrementals = []
        if bak.backup_type == 'incremental':
            incrementals = [bak]
            while bak.prev_cloudfs_source:
                tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source)
                bak = backup.restore(**tmpmnf.meta)
                if bak.backup_type == 'incremental':
                    incrementals.insert(0, bak)
        self.incrementals = incrementals
        if self.incrementals:
            self.log_file = self.incrementals[-1].log_file
            self.log_pos = self.incrementals[-1].log_pos
        else:
            self.log_file = bak.log_file
            self.log_pos = bak.log_pos

        coreutils.clean_dir(__mysql__['data_dir'])

        LOG.info('Downloading the base backup (LSN: 0..%s)', bak.to_lsn)

        trn = largetransfer.Download(bak.cloudfs_source)
        trn.apply_async()

        streamer = xbstream.args(extract=True, directory=__mysql__['data_dir'])
        streamer.popen(stdin=trn.output)

        trn.join()

        LOG.info('Preparing the base backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     redo_only=True,
                     ibbackup='xtrabackup',
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])

        if incrementals:
            inc_dir = os.path.join(__mysql__['tmp_dir'],
                                   'xtrabackup-restore-inc')
            i = 0
            for inc in incrementals:
                try:
                    os.makedirs(inc_dir)
                    inc = backup.restore(inc)
                    LOG.info(
                        'Downloading incremental backup #%d (LSN: %s..%s)', i,
                        inc.from_lsn, inc.to_lsn)

                    trn = largetransfer.Download(inc.cloudfs_source)
                    trn.apply_async()

                    streamer = xbstream.args(extract=True, directory=inc_dir)
                    streamer.popen(stdin=trn.output)

                    trn.join()

                    LOG.info('Preparing incremental backup #%d', i)
                    innobackupex(__mysql__['data_dir'],
                                 apply_log=True,
                                 redo_only=True,
                                 incremental_dir=inc_dir,
                                 ibbackup='xtrabackup',
                                 user=__mysql__['root_user'],
                                 password=__mysql__['root_password'])
                    i += 1
                finally:
                    coreutils.remove(inc_dir)

        LOG.info('Preparing the full backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])
        coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql')

        self._mysql_init.start()
        if int(__mysql__['replication_master']):
            LOG.info("Master will reset it's binary logs, "
                     "so updating binary log position in backup manifest")
            log_file, log_pos = self._client().master_status()
            meta = mnf.meta
            meta.update({'log_file': log_file, 'log_pos': log_pos})
            mnf.meta = meta
            mnf.save()