Ejemplo n.º 1
0
    def on_DbMsr_NewMasterUp(self, message):
        """
        Switch replication to a new master server
        @type message: scalarizr.messaging.Message
        @param message:  DbMsr_NewMasterUp
        """
        try:
            assert message.body.has_key("db_type")
            assert message.body.has_key("local_ip")
            assert message.body.has_key("remote_ip")
            assert message.body.has_key(BEHAVIOUR)

            postgresql_data = message.body[BEHAVIOUR]

            if int(__postgresql__['replication_master']):
                LOG.debug('Skip NewMasterUp. My replication role is master')
                return

            host = message.local_ip or message.remote_ip
            LOG.info("Switching replication to a new PostgreSQL master %s", host)
            bus.fire('before_postgresql_change_master', host=host)

            LOG.debug("__postgresql__['volume']: %s", __postgresql__['volume'])

            if __postgresql__['volume'].type in ('eph', 'lvm'):
                if 'restore' in postgresql_data:
                    restore = backup.restore(**postgresql_data['restore'])
                else:
                    restore = backup.restore(
                        type='snap_postgresql',
                        volume=__postgresql__['volume'],
                        snapshot=postgresql_data[OPT_SNAPSHOT_CNF])

                if __postgresql__['volume'].type == 'eph':
                    self.postgresql.service.stop('Swapping storages to reinitialize slave')

                    LOG.info('Reinitializing Slave from the new snapshot %s',
                        restore.snapshot['id'])
                    new_vol = restore.run()

                #self.postgresql.service.start()

            self.postgresql.init_slave(STORAGE_PATH, host, __postgresql__['port'], self.root_password)
            LOG.debug("Replication switched")
            bus.fire('postgresql_change_master', host=host)

            msg_data = dict(
                db_type = BEHAVIOUR,
                status = 'ok'
            )
            self.send_message(DbMsrMessages.DBMSR_NEW_MASTER_UP_RESULT, msg_data)

        except (Exception, BaseException), e:
            LOG.exception(e)

            msg_data = dict(
                db_type = BEHAVIOUR,
                status="error",
                last_error=str(e))
            self.send_message(DbMsrMessages.DBMSR_NEW_MASTER_UP_RESULT, msg_data)
Ejemplo n.º 2
0
    def on_DbMsr_NewMasterUp(self, message):
        """
        Switch replication to a new master server
        @type message: scalarizr.messaging.Message
        @param message:  DbMsr_NewMasterUp
        """
        try:
            assert message.body.has_key("db_type")
            assert message.body.has_key("local_ip")
            assert message.body.has_key("remote_ip")
            assert message.body.has_key(BEHAVIOUR)

            postgresql_data = message.body[BEHAVIOUR]

            if int(__postgresql__['replication_master']):
                LOG.debug('Skip NewMasterUp. My replication role is master')
                return

            host = message.local_ip or message.remote_ip
            LOG.info("Switching replication to a new PostgreSQL master %s", host)
            bus.fire('before_postgresql_change_master', host=host)

            LOG.debug("__postgresql__['volume']: %s", __postgresql__['volume'])

            if __postgresql__['volume'].type in ('eph', 'lvm'):
                if 'restore' in postgresql_data:
                    restore = backup.restore(**postgresql_data['restore'])
                else:
                    restore = backup.restore(
                        type='snap_postgresql',
                        volume=__postgresql__['volume'],
                        snapshot=postgresql_data[OPT_SNAPSHOT_CNF])

                if __postgresql__['volume'].type == 'eph':
                    self.postgresql.service.stop('Swapping storages to reinitialize slave')

                    LOG.info('Reinitializing Slave from the new snapshot %s',
                        restore.snapshot['id'])
                    new_vol = restore.run()

                #self.postgresql.service.start()

            self.postgresql.init_slave(STORAGE_PATH, host, __postgresql__['port'], self.root_password)
            LOG.debug("Replication switched")
            bus.fire('postgresql_change_master', host=host)

            msg_data = dict(
                db_type = BEHAVIOUR,
                status = 'ok'
            )
            self.send_message(DbMsrMessages.DBMSR_NEW_MASTER_UP_RESULT, msg_data)

        except (Exception, BaseException), e:
            LOG.exception(e)

            msg_data = dict(
                db_type = BEHAVIOUR,
                status="error",
                last_error=str(e))
            self.send_message(DbMsrMessages.DBMSR_NEW_MASTER_UP_RESULT, msg_data)
Ejemplo n.º 3
0
    def test_run_without_volume(self, snap_factory, vol_factory):
        rst = backup.restore(
                        type='snap',
                        snapshot={'type': 'ebs', 'id': 'snap-12345678'})

        result = rst.run()

        assert result == snap_factory.return_value.restore.return_value
Ejemplo n.º 4
0
    def test_run_without_volume(self, snap_factory, vol_factory):
        rst = backup.restore(type='snap',
                             snapshot={
                                 'type': 'ebs',
                                 'id': 'snap-12345678'
                             })

        result = rst.run()

        assert result == snap_factory.return_value.restore.return_value
Ejemplo n.º 5
0
	def _run(self):
		client = mysql_svc.MySQLClient(
					__mysql__['root_user'],
					__mysql__['root_password'])
		self._databases = client.list_databases()
		transfer = LargeTransfer(self._gen_src, self._gen_dst, 'upload', 
								tar_it=False, chunk_size=self.chunk_size)
		transfer.run()
		return backup.restore(type='mysqldump', 
						files=transfer.result()['completed'])
Ejemplo n.º 6
0
    def test_run_with_volume(self, snap_factory, vol_factory):
        rst = backup.restore(
                        type='snap',
                        volume={'type': 'ebs', 'iops': 10, 'avail_zone': 'us-east-1c'},
                        snapshot={'type': 'ebs', 'id': 'snap-12345678'})

        result = rst.run()

        vol = vol_factory.return_value
        snap = snap_factory.return_value

        assert vol.snap == snap
        vol.ensure.assert_called_with()
        assert result == vol
        assert rst.result() == vol
Ejemplo n.º 7
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(__mysql__['root_user'],
                                       __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            src_gen = self._gen_src()
            transfer_id = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
            self.transfer = largetransfer.Upload(src_gen,
                                                 self._dst,
                                                 chunk_size=self.chunk_size,
                                                 transfer_id=transfer_id)
        self.transfer.apply_async()
        self.transfer.join()
        result = self.transfer.manifest

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump (code %s) stderr for %s: %s",
                          popen.returncode, popen.db_name, err)
            return popen.db_name, popen.returncode, err

        mysqldump_results = map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        mysqldump_errors = []
        for db_name, retcode, err in mysqldump_results:
            if retcode:
                mysqldump_errors.append('%s: "%s"' % (db_name, err))
        if mysqldump_errors:
            raise Error("Mysqldump has returned a non-zero code.\n" +
                        '\n'.join(mysqldump_errors))

        parts = transfer_result_to_backup_result(result)
        return backup.restore(type='mysqldump',
                              cloudfs_source=result.cloudfs_path,
                              parts=parts,
                              description=self.description,
                              tags=self.tags)
Ejemplo n.º 8
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(__mysql__['root_user'],
                                       __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            self.transfer = cloudfs.LargeTransfer(self._gen_src,
                                                  self._dst,
                                                  streamer=None,
                                                  chunk_size=self.chunk_size)
        result = self.transfer.run()
        if not result:
            raise Error("Error while transfering to cloud storage")

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump (code %s) stderr for %s: %s",
                          popen.returncode, popen.db_name, err)
            return popen.db_name, popen.returncode, err

        mysqldump_results = map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        mysqldump_errors = []
        for db_name, retcode, err in mysqldump_results:
            if retcode:
                mysqldump_errors.append('%s: "%s"' % (db_name, err))
        if mysqldump_errors:
            raise Error("Mysqldump has returned a non-zero code.\n" +
                        '\n'.join(mysqldump_errors))

        parts = transfer_result_to_backup_result(result)
        return backup.restore(type='mysqldump',
                              cloudfs_source=result.cloudfs_path,
                              parts=parts,
                              description=self.description,
                              tags=self.tags)
Ejemplo n.º 9
0
 def test_tmp_volume_creation_only_snapshot(self, ec2volume, ec2snapshot, *args):
     snapshot = mock.Mock(id='vol-123456ab', type='base')
     ebs = mock.Mock(
             id='vol-12345678',
             size=1,
             zone='us-east-1a',
             **{'volume_state.return_value': 'available',
                'attachment_state.return_value': 'attaching'}
     )
     ec2volume.return_value = ebs
     ec2snapshot.return_value = snapshot
     rst = backup.restore(type='xtrabackup', snapshot=snapshot)
     mock.patch.object(rst, '_mysql_init').start()
     mock.patch.object(rst, '_start_copyback').start()
     mock.patch.object(rst, '_commit_copyback').start()
     mock.patch.object(rst, '_rollback_copyback').start()
     res = rst.run()
     ec2volume.assert_called_with(snap=snapshot, type=snapshot.type)
     ebs.destroy.assert_called_once_with()
Ejemplo n.º 10
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(
                    __mysql__['root_user'],
                    __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            self.transfer = cloudfs.LargeTransfer(self._gen_src, self._dst,
                                    streamer=None, chunk_size=self.chunk_size)
        result = self.transfer.run()
        if not result:
            raise Error("Error while transfering to cloud storage")

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump (code %s) stderr for %s: %s", popen.returncode, popen.db_name, err)
            return popen.db_name, popen.returncode, err
        mysqldump_results = map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        mysqldump_errors = []
        for db_name, retcode, err in mysqldump_results:
            if retcode:
                mysqldump_errors.append('%s: "%s"' % (db_name, err))
        if mysqldump_errors:
            raise Error("Mysqldump has returned a non-zero code.\n" + 
                        '\n'.join(mysqldump_errors))

        parts = transfer_result_to_backup_result(result)
        return backup.restore(type='mysqldump', 
                cloudfs_source=result.cloudfs_path, 
                parts=parts,
                description=self.description,
                tags=self.tags)
Ejemplo n.º 11
0
 def test_copyback_start_commit(self, rename, makedirs, remove, rmtree, pglob, *args):
     def glob_returns(*args):
         if '.bak' in args[0]:
             return ['/mnt/dbstorage/mysql-misc/logbin.index.bak', '/mnt/dbstorage/mysql-misc/logbin.000001.bak']
         return ['/mnt/dbstorage/mysql-misc/logbin.index', '/mnt/dbstorage/mysql-misc/logbin.000001']
     rst = backup.restore(type='xtrabackup')
     mock.patch.object(rst, '_mysql_init').start()
     rollback = mock.patch.object(rst, '_rollback_copyback').start()
     pglob.side_effect = glob_returns
     rst.run()
     assert not rollback.call_count
     rename_calls = [mock.call(rst._data_dir, rst._data_dir+'.bak'),
                     mock.call('/mnt/dbstorage/mysql-misc/logbin.index', '/mnt/dbstorage/mysql-misc/logbin.index.bak'),
                     mock.call('/mnt/dbstorage/mysql-misc/logbin.000001', '/mnt/dbstorage/mysql-misc/logbin.000001.bak')]
     rename.assert_has_calls(rename_calls)
     makedirs.assert_called_once_with(rst._data_dir)
     rmtree.assert_called_with(rst._data_dir + '.bak')
     remove_calls = [mock.call('/mnt/dbstorage/mysql-misc/logbin.index.bak'),
                                     mock.call('/mnt/dbstorage/mysql-misc/logbin.000001.bak')]
     remove.assert_has_calls(remove_calls)
Ejemplo n.º 12
0
    def test_run_with_volume(self, snap_factory, vol_factory):
        rst = backup.restore(type='snap',
                             volume={
                                 'type': 'ebs',
                                 'iops': 10,
                                 'avail_zone': 'us-east-1c'
                             },
                             snapshot={
                                 'type': 'ebs',
                                 'id': 'snap-12345678'
                             })

        result = rst.run()

        vol = vol_factory.return_value
        snap = snap_factory.return_value

        assert vol.snap == snap
        vol.ensure.assert_called_with()
        assert result == vol
        assert rst.result() == vol
Ejemplo n.º 13
0
 def test_copyback_start_rollback(self, rename, makedirs, remove, rmtree, pglob, *args):
     def glob_returns(*args, **kwargs):
         if '.bak' in args[0]:
             return ['/mnt/dbstorage/mysql-misc/logbin.index.bak', '/mnt/dbstorage/mysql-misc/logbin.000001.bak']
         return ['/mnt/dbstorage/mysql-misc/logbin.index', '/mnt/dbstorage/mysql-misc/logbin.000001']
     rst = backup.restore(type='xtrabackup')
     mock.patch.object(rst._mysql_init, 'start', side_effect=Exception('Test')).start()
     mock.patch.object(rst, '_commit_copyback').start()
     pglob.side_effect = glob_returns
     try:
         rst.run()
     except:
         pass
     rename_calls = [mock.call(rst._data_dir, rst._data_dir+'.bak'),
                     mock.call('/mnt/dbstorage/mysql-misc/logbin.index', '/mnt/dbstorage/mysql-misc/logbin.index.bak'),
                     mock.call('/mnt/dbstorage/mysql-misc/logbin.000001', '/mnt/dbstorage/mysql-misc/logbin.000001.bak'),
                     mock.call(rst._data_dir+'.bak', rst._data_dir),
                     mock.call('/mnt/dbstorage/mysql-misc/logbin.index.bak', '/mnt/dbstorage/mysql-misc/logbin.index'),
                     mock.call('/mnt/dbstorage/mysql-misc/logbin.000001.bak', '/mnt/dbstorage/mysql-misc/logbin.000001')
                     ]
     rename.assert_has_calls(rename_calls)
     makedirs.assert_called_once_with(rst._data_dir)
Ejemplo n.º 14
0
    def test_run(self, innobackupex, chown_r, *args):
        rst = backup.restore(type='xtrabackup')
        mock.patch.object(rst, '_mysql_init').start()
        mock.patch.object(rst, '_start_copyback').start()
        mock.patch.object(rst, '_commit_copyback').start()
        mock.patch.object(rst, '_rollback_copyback').start()
        rst.run()
 
        rst._mysql_init.stop.assert_called_with()
        calls = innobackupex.call_args_list
        # Prepare base
        assert calls[0] == ((os.path.join(rst.backup_dir, '2012-09-15_18-06'), ),
                        dict(apply_log=True, redo_only=True, user=mock.ANY, password=mock.ANY))
        # Prepare inc
        assert calls[1] == ((os.path.join(rst.backup_dir, '2012-09-15_18-06'), ),
                        dict(incremental_dir=os.path.join(rst.backup_dir, '2012-09-16_11-54'),
                                apply_log=True, redo_only=True,
                                user=mock.ANY, password=mock.ANY))
        # Prepare full
        assert calls[2] == ((os.path.join(rst.backup_dir, '2012-09-15_18-06'), ),
                        dict(apply_log=True, user=mock.ANY, password=mock.ANY))
        chown_r.assert_called_with(rst._data_dir, 'mysql', 'mysql')
        rst._mysql_init.start.assert_called_with()
Ejemplo n.º 15
0
    def on_host_init_response(self, message):
        """
        Check postgresql data in host init response
        @type message: scalarizr.messaging.Message
        @param message: HostInitResponse
        """
        log = bus.init_op.logger
        log.info('Accept Scalr configuration')

        if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR:
            raise HandlerError(
                "HostInitResponse message for PostgreSQL behaviour must have 'postgresql' property and db_type 'postgresql'"
            )

        postgresql_data = message.postgresql.copy()

        #Extracting service configuration preset from message
        if 'preset' in postgresql_data:
            self.initial_preset = postgresql_data['preset']
            LOG.debug('Scalr sent current preset: %s' % self.initial_preset)
            del postgresql_data['preset']

        #Extracting or generating postgresql root password
        postgresql_data['%s_password' % ROOT_USER] = postgresql_data.get(
            OPT_ROOT_PASSWORD) or cryptotool.pwgen(10)
        del postgresql_data[OPT_ROOT_PASSWORD]

        #Extracting replication ssh keys from message
        root = PgUser(ROOT_USER, self.postgresql.pg_keys_dir)
        root.store_keys(postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY],
                        postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY])
        del postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY]
        del postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY]

        if postgresql_data.get('volume'):
            # New format
            postgresql_data['compat_prior_backup_restore'] = False
            postgresql_data['volume'] = storage2.volume(
                postgresql_data['volume'])

            LOG.debug("message.pg['volume']: %s", postgresql_data['volume'])
            if 'backup' in postgresql_data:
                postgresql_data['backup'] = backup.backup(
                    postgresql_data['backup'])
                LOG.debug("message.pg['backup']: %s",
                          postgresql_data['backup'])
            if 'restore' in postgresql_data:
                postgresql_data['restore'] = backup.restore(
                    postgresql_data['restore'])
                LOG.debug("message.pg['restore']: %s",
                          postgresql_data['restore'])
        else:

            # Compatibility transformation
            # - volume_config -> volume
            # - master n'th start, type=ebs - del snapshot_config
            # - snapshot_config -> restore
            # - create backup object on master 1'st start

            postgresql_data['compat_prior_backup_restore'] = True
            if postgresql_data.get(OPT_VOLUME_CNF):
                postgresql_data['volume'] = storage2.volume(
                    postgresql_data.pop(OPT_VOLUME_CNF))

            elif postgresql_data.get(OPT_SNAPSHOT_CNF):
                postgresql_data['volume'] = storage2.volume(
                    type=postgresql_data[OPT_SNAPSHOT_CNF]['type'])

            else:
                raise HandlerError(
                    'No volume config or snapshot config provided')

            if postgresql_data['volume'].device and \
                            postgresql_data['volume'].type in ('ebs', 'csvol', 'cinder', 'raid', 'gce_persistent'):
                LOG.debug(
                    "Master n'th start detected. Removing snapshot config from message"
                )
                postgresql_data.pop(OPT_SNAPSHOT_CNF, None)

            if postgresql_data.get(OPT_SNAPSHOT_CNF):
                postgresql_data['restore'] = backup.restore(
                    type='snap_postgresql',
                    snapshot=postgresql_data.pop(OPT_SNAPSHOT_CNF),
                    volume=postgresql_data['volume'])

            if int(postgresql_data['replication_master']):
                postgresql_data['backup'] = backup.backup(
                    type='snap_postgresql', volume=postgresql_data['volume'])

        self._hir_volume_growth = postgresql_data.pop('volume_growth', None)

        LOG.debug("Update postgresql config with %s", postgresql_data)
        __postgresql__.update(postgresql_data)
        __postgresql__['volume'].mpoint = __postgresql__['storage_dir']
Ejemplo n.º 16
0
    def on_host_init_response(self, message):
        """
        Check redis data in host init response
        @type message: scalarizr.messaging.Message
        @param message: HostInitResponse
        """
        with bus.initialization_op as op:
            with op.phase(self._phase_redis):
                with op.step(self._step_accept_scalr_conf):

                    if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR:
                        raise HandlerError("HostInitResponse message for %s behaviour must have '%s' property and db_type '%s'"
                                           % (BEHAVIOUR, BEHAVIOUR, BEHAVIOUR))

                    config_dir = os.path.dirname(self._volume_config_path)
                    if not os.path.exists(config_dir):
                        os.makedirs(config_dir)

                    redis_data = message.redis.copy()
                    LOG.info('Got Redis part of HostInitResponse: %s' % redis_data)

                    if 'preset' in redis_data:
                        self.initial_preset = redis_data['preset']
                        del redis_data['preset']
                        LOG.debug('Scalr sent current preset: %s' % self.initial_preset)


                    '''
                    XXX: following line enables support for old scalr installations
                    use_password shoud be set by postinstall script for old servers
                    '''
                    redis_data[OPT_USE_PASSWORD] = redis_data.get(OPT_USE_PASSWORD, '1')

                    ports = []
                    passwords = []
                    num_processes = 1

                    if 'ports' in redis_data and redis_data['ports']:
                        ports = map(int, redis_data['ports'])
                        del redis_data['ports']

                    if 'passwords' in redis_data and redis_data['passwords']:
                        passwords = redis_data['passwords']
                        del redis_data['passwords']

                    if 'num_processes' in redis_data and redis_data['num_processes']:
                        num_processes = int(redis_data['num_processes'])
                        del redis_data['num_processes']

                    redis_data['volume'] = storage2.volume(
                                    redis_data.pop('volume_config'))

                    if redis_data['volume'].device and \
                                            redis_data['volume'].type in ('ebs', 'csvol', 'cinder', 'raid'):
                        redis_data.pop('snapshot_config', None)

                    if redis_data.get('snapshot_config'):
                        redis_data['restore'] = backup.restore(
                                type='snap_redis',
                                snapshot=redis_data.pop('snapshot_config'),
                                volume=redis_data['volume'])

                    # Update configs
                    __redis__.update(redis_data)
                    __redis__['volume'].mpoint = __redis__['storage_dir']
                    __redis__['volume'].tags = self.redis_tags
                    if self.default_service.running:
                        self.default_service.stop('Terminating default redis instance')

                    self.redis_instances = redis.RedisInstances(self.is_replication_master, self.persistence_type, self.use_passwords)
                    ports = ports or [__redis__['defaults']['port'],]
                    passwords = passwords or [self.get_main_password(),]
                    self.redis_instances.init_processes(num_processes, ports=ports, passwords=passwords)

                    if self.use_passwords:
                        self.security_off()
Ejemplo n.º 17
0
    def _run(self):
        # Apply resource's meta
        mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source)
        bak = backup.restore(**mnf.meta)

        incrementals = []
        if bak.backup_type == "incremental":
            incrementals = [bak]
            while bak.prev_cloudfs_source:
                tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source)
                bak = backup.restore(**tmpmnf.meta)
                if bak.backup_type == "incremental":
                    incrementals.insert(0, bak)
        self.incrementals = incrementals
        if self.incrementals:
            self.log_file = self.incrementals[-1].log_file
            self.log_pos = self.incrementals[-1].log_pos
        else:
            self.log_file = bak.log_file
            self.log_pos = bak.log_pos

        coreutils.clean_dir(__mysql__["data_dir"])

        LOG.info("Downloading the base backup (LSN: 0..%s)", bak.to_lsn)
        trn = cloudfs.LargeTransfer(
            bak.cloudfs_source,
            __mysql__["data_dir"],
            streamer=xbstream.args(extract=True, directory=__mysql__["data_dir"]),
        )
        trn.run()

        LOG.info("Preparing the base backup")
        innobackupex(
            __mysql__["data_dir"],
            apply_log=True,
            redo_only=True,
            ibbackup="xtrabackup",
            user=__mysql__["root_user"],
            password=__mysql__["root_password"],
        )

        if incrementals:
            inc_dir = os.path.join(__mysql__["tmp_dir"], "xtrabackup-restore-inc")
            i = 0
            for inc in incrementals:
                try:
                    os.makedirs(inc_dir)
                    inc = backup.restore(inc)
                    LOG.info("Downloading incremental backup #%d (LSN: %s..%s)", i, inc.from_lsn, inc.to_lsn)
                    trn = cloudfs.LargeTransfer(
                        inc.cloudfs_source, inc_dir, streamer=xbstream.args(extract=True, directory=inc_dir)
                    )

                    trn.run()  # todo: Largetransfer should support custom decompressor proc
                    LOG.info("Preparing incremental backup #%d", i)
                    innobackupex(
                        __mysql__["data_dir"],
                        apply_log=True,
                        redo_only=True,
                        incremental_dir=inc_dir,
                        ibbackup="xtrabackup",
                        user=__mysql__["root_user"],
                        password=__mysql__["root_password"],
                    )
                    i += 1
                finally:
                    coreutils.remove(inc_dir)

        LOG.info("Preparing the full backup")
        innobackupex(
            __mysql__["data_dir"], apply_log=True, user=__mysql__["root_user"], password=__mysql__["root_password"]
        )
        coreutils.chown_r(__mysql__["data_dir"], "mysql", "mysql")

        self._mysql_init.start()
        if int(__mysql__["replication_master"]):
            LOG.info("Master will reset it's binary logs, " "so updating binary log position in backup manifest")
            log_file, log_pos = self._client().master_status()
            meta = mnf.meta
            meta.update({"log_file": log_file, "log_pos": log_pos})
            mnf.meta = meta
            mnf.save()
Ejemplo n.º 18
0
    def _run(self):
        self._check_backup_type()

        kwds = {
            "stream": "xbstream",
            # Compression is broken
            #'compress': True,
            #'compress_threads': os.sysconf('SC_NPROCESSORS_ONLN'),
            "ibbackup": "xtrabackup",
            "user": __mysql__["root_user"],
            "password": __mysql__["root_password"],
        }
        if self.no_lock:
            kwds["no_lock"] = True
        if not int(__mysql__["replication_master"]):
            kwds["safe_slave_backup"] = True
            kwds["slave_info"] = True

        current_lsn = None
        if self.backup_type == "auto":
            client = self._client()
            innodb_stat = client.fetchone("SHOW INNODB STATUS")[0]
            for line in innodb_stat.splitlines():
                m = self._re_lsn_innodb_stat.search(line)
                if m:
                    current_lsn = int(m.group(1))
                    break

        if self.backup_type in ("incremental", "auto"):
            if self.prev_cloudfs_source:
                # Download manifest and get it's to_lsn
                mnf = cloudfs.Manifest(cloudfs_path=self.prev_cloudfs_source)
                self.from_lsn = mnf.meta["to_lsn"]
            else:
                self._check_attr("from_lsn")
            if self.backup_type == "incremental" or (
                self.backup_type == "auto" and current_lsn and current_lsn >= self.from_lsn
            ):
                kwds.update({"incremental": True, "incremental_lsn": self.from_lsn})
        LOG.debug("self._config: %s", self._config)
        LOG.debug("kwds: %s", kwds)

        if self.backup_type == "incremental":
            LOG.info("Creating incremental xtrabackup (from LSN: %s)", self.from_lsn)
        else:
            LOG.info("Creating full xtrabackup")

        with self._xbak_init_lock:
            if self._killed:
                raise Error("Canceled")
            self._xbak = innobackupex.args(__mysql__["tmp_dir"], **kwds).popen()
            LOG.debug("Creating LargeTransfer, src=%s dst=%s", self._xbak.stdout, self.cloudfs_target)
            self._transfer = cloudfs.LargeTransfer([self._xbak.stdout], self.cloudfs_target, compressor=self.compressor)

        stderr_thread, stderr = cloudfs.readfp_thread(self._xbak.stderr)

        manifesto = self._transfer.run()
        if self._killed:
            raise Error("Canceled")
        stderr_thread.join()
        self._xbak.wait()
        stderr = stderr[0] if stderr else ""
        if self._xbak.returncode:
            raise Error(stderr)

        with self._xbak_init_lock:
            self._xbak = None
            self._transfer = None

        log_file = log_pos = to_lsn = None
        re_binlog = self._re_binlog if int(__mysql__["replication_master"]) else self._re_slave_binlog
        for line in stderr.splitlines():
            m = self._re_lsn.search(line) or self._re_lsn_51.search(line)
            if m:
                to_lsn = m.group(1)
                continue
            m = re_binlog.search(line)
            if m:
                log_file = m.group(1)
                log_pos = int(m.group(2))
                continue
            if log_file and log_pos and to_lsn:
                break

        rst = backup.restore(
            type="xtrabackup",
            backup_type=self.backup_type,
            from_lsn=self.from_lsn,
            to_lsn=to_lsn,
            cloudfs_source=manifesto.cloudfs_path,
            prev_cloudfs_source=self.prev_cloudfs_source,
            log_file=log_file,
            log_pos=log_pos,
        )

        # Update manifest
        LOG.debug("rst: %s", dict(rst))
        manifesto.meta = dict(rst)
        manifesto.save()

        LOG.info(
            "Created %s xtrabackup. (LSN: %s..%s, log_file: %s, log_pos: %s)",
            rst.backup_type,
            rst.from_lsn,
            rst.to_lsn,
            rst.log_file,
            rst.log_pos,
        )

        return rst
Ejemplo n.º 19
0
	def _run(self):
		self._check_backup_type()
		if self.volume:
			self.volume = storage2.volume(self.volume)
			if self.tags:
				self.volume.tags = self.tags
			self.volume.mpoint = self.backup_dir
			self.volume.ensure(mount=True, mkfs=True)
		elif not os.path.exists(self.backup_dir):
			os.makedirs(self.backup_dir)

		kwds = {}
		if self.backup_type == 'incremental':
			from_lsn = self.from_lsn
			if not from_lsn:
				checkpoints = self._checkpoints()
				from_lsn = checkpoints['to_lsn']
			kwds.update({
				'incremental': True,
				'incremental_lsn': from_lsn
			})
		elif 'full' == self.backup_type and self.volume:
			coreutils.clean_dir(self.backup_dir)

		exc_info = None
		try:
			LOG.info('Creating %s xtrabackup', self.backup_type)
			innobackupex(self.backup_dir, 
					user=__mysql__['root_user'], 
					password=__mysql__['root_password'],
					**kwds)
			log_file, log_pos = self._binlog_info()
			chkpoints = self._checkpoints()
			to_lsn = chkpoints['to_lsn']
			from_lsn = chkpoints['from_lsn']
			snapshot = None
		except:
			exc_info = sys.exc_info()
		finally:
			if self.volume:
				try:
					self.volume.detach()
				except:
					msg = 'Failed to detach backup volume: %s'
					LOG.warn(msg, sys.exc_info()[1])
		if exc_info:
			raise exc_info[0], exc_info[1], exc_info[2]
		if self.volume:
			snapshot = self.volume.snapshot(
						self.description or 'MySQL xtrabackup', 
						self.tags)

		return backup.restore(
				type='xtrabackup', 
				log_file=log_file, 
				log_pos=log_pos,
				from_lsn=from_lsn,
				to_lsn=to_lsn,
				backup_type=self.backup_type,
				backup_dir=self.backup_dir,
				volume=self.volume.clone(),				 
				snapshot=snapshot)
Ejemplo n.º 20
0
	def on_DbMsr_NewMasterUp(self, message):

		assert message.body.has_key("db_type")
		assert message.body.has_key("local_ip")
		assert message.body.has_key("remote_ip")
		assert message.body.has_key(__mysql__['behavior'])
	
		mysql2 = message.body[__mysql__['behavior']]
		
		if int(__mysql__['replication_master']):
			LOG.debug('Skip NewMasterUp. My replication role is master')
			return
		
		host = message.local_ip or message.remote_ip
		LOG.info("Switching replication to a new MySQL master %s", host)
		bus.fire('before_mysql_change_master', host=host)			

		LOG.debug("__mysql__['volume']: %s", __mysql__['volume'])
		
		if __mysql__['volume'].type in ('eph', 'lvm'):
			if 'restore' in mysql2:
				restore = backup.restore(**mysql2['restore'])
			else:
				# snap_mysql restore should update MySQL volume, and delete old one
				restore = backup.restore(
							type='snap_mysql',
							log_file=mysql2['log_file'],
							log_pos=mysql2['log_pos'],
							volume=__mysql__['volume'],
							snapshot=mysql2['snapshot_config'])
			# XXX: ugly
			if __mysql__['volume'].type == 'eph':
				self.mysql.service.stop('Swapping storages to reinitialize slave')
				'''
				LOG.info('Reinitializing Slave from the new snapshot %s (log_file: %s log_pos: %s)', 
						restore.snapshot['id'], restore.log_file, restore.log_pos)
				new_vol = restore.run()
				self.mysql.service.stop('Swapping storages to reinitialize slave')
			
				LOG.debug('Destroing old storage')
				vol = storage.volume(**__mysql__['volume'])
				vol.destroy(remove_disks=True)
				LOG.debug('Storage destoyed')

				'''
			log_file = restore.log_file
			log_pos = restore.log_pos
			restore.run()
			
			'''
			LOG.debug('Plugging new storage')
			vol = Storage.create(snapshot=snap_config.copy(), tags=self.mysql_tags)
			self._plug_storage(STORAGE_PATH, vol)
			LOG.debug('Storage plugged')

			Storage.backup_config(vol.config(), self._volume_config_path)
			Storage.backup_config(snap_config, self._snapshot_config_path)
			self.storage_vol = vol
			'''
			
			self.mysql.service.start()
		else:
			LOG.debug("Stopping slave i/o thread")
			self.root_client.stop_slave_io_thread()
			LOG.debug("Slave i/o thread stopped")
			
			LOG.debug("Retrieving current log_file and log_pos")
			status = self.root_client.slave_status()
			log_file = status['Master_Log_File']
			log_pos = status['Read_Master_Log_Pos']
			LOG.debug("Retrieved log_file=%s, log_pos=%s", log_file, log_pos)


		self._change_master(
			host=host, 
			user=__mysql__['repl_user'], 
			password=mysql2['repl_password'],
			log_file=log_file, 
			log_pos=log_pos
		)
			
		LOG.debug("Replication switched")
		bus.fire('mysql_change_master', host=host, log_file=log_file, log_pos=log_pos)
Ejemplo n.º 21
0
    def _run(self):
        self._check_backup_type()

        kwds = {
            'stream': 'xbstream',
            # Compression is broken
            #'compress': True,
            #'compress_threads': os.sysconf('SC_NPROCESSORS_ONLN'),
            'ibbackup': 'xtrabackup',
            'user': __mysql__['root_user'],
            'password': __mysql__['root_password']
        }
        if self.no_lock:
            kwds['no_lock'] = True
        if not int(__mysql__['replication_master']):
            kwds['safe_slave_backup'] = True
            kwds['slave_info'] = True

        current_lsn = None
        if self.backup_type == 'auto':
            client = self._client()
            innodb_stat = client.fetchone('SHOW INNODB STATUS')[0]
            for line in innodb_stat.splitlines():
                m = self._re_lsn_innodb_stat.search(line)
                if m:
                    current_lsn = int(m.group(1))
                    break

        if self.backup_type in ('incremental', 'auto'):
            if self.prev_cloudfs_source:
                # Download manifest and get it's to_lsn
                mnf = cloudfs.Manifest(cloudfs_path=self.prev_cloudfs_source)
                self.from_lsn = mnf.meta['to_lsn']
            else:
                self._check_attr('from_lsn')
            if self.backup_type == 'incremental' or \
                (self.backup_type == 'auto' and current_lsn and current_lsn >= self.from_lsn):
                kwds.update({
                    'incremental': True,
                    'incremental_lsn': self.from_lsn
                })
        LOG.debug('self._config: %s', self._config)
        LOG.debug('kwds: %s', kwds)

        if self.backup_type == 'incremental':
            LOG.info('Creating incremental xtrabackup (from LSN: %s)', self.from_lsn)
        else:
            LOG.info('Creating full xtrabackup')

        with self._xbak_init_lock:
            if self._killed:
                raise Error("Canceled")
            self._xbak = innobackupex.args(__mysql__['tmp_dir'], **kwds).popen()
            LOG.debug('Creating LargeTransfer, src=%s dst=%s', self._xbak.stdout,
                self.cloudfs_target)
            self._transfer = cloudfs.LargeTransfer(
                        [self._xbak.stdout],
                        self.cloudfs_target,
                        compressor=self.compressor)

        stderr_thread, stderr = cloudfs.readfp_thread(self._xbak.stderr)

        manifesto = self._transfer.run()
        if self._killed:
            raise Error("Canceled")
        stderr_thread.join()
        self._xbak.wait()
        stderr = stderr[0] if stderr else ''
        if self._xbak.returncode:
            raise Error(stderr)

        with self._xbak_init_lock:
            self._xbak = None
            self._transfer = None

        log_file = log_pos = to_lsn = None
        re_binlog = self._re_binlog \
                    if int(__mysql__['replication_master']) else \
                    self._re_slave_binlog
        for line in stderr.splitlines():
            m = self._re_lsn.search(line) or self._re_lsn_51.search(line)
            if m:
                to_lsn = m.group(1)
                continue
            m = re_binlog.search(line)
            if m:
                log_file = m.group(1)
                log_pos = int(m.group(2))
                continue
            if log_file and log_pos and to_lsn:
                break

        rst = backup.restore(type='xtrabackup',
                backup_type=self.backup_type,
                from_lsn=self.from_lsn,
                to_lsn=to_lsn,
                cloudfs_source=manifesto.cloudfs_path,
                prev_cloudfs_source=self.prev_cloudfs_source,
                log_file=log_file,
                log_pos=log_pos)

        # Update manifest
        LOG.debug('rst: %s', dict(rst))
        manifesto.meta = dict(rst)
        manifesto.save()

        LOG.info('Created %s xtrabackup. (LSN: %s..%s, log_file: %s, log_pos: %s)',
                rst.backup_type, rst.from_lsn, rst.to_lsn, rst.log_file, rst.log_pos)

        return rst
Ejemplo n.º 22
0
    def _run(self):
        self._check_backup_type()

        kwds = {
            'stream': 'xbstream',
            # Compression is broken
            #'compress': True,
            #'compress_threads': os.sysconf('SC_NPROCESSORS_ONLN'),
            'ibbackup': 'xtrabackup',
            'user': __mysql__['root_user'],
            'password': __mysql__['root_password']
        }
        if self.no_lock:
            kwds['no_lock'] = True
        if not int(__mysql__['replication_master']):
            kwds['safe_slave_backup'] = True
            kwds['slave_info'] = True

        current_lsn = None
        if self.backup_type == 'auto':
            client = self._client()
            innodb_stat = client.fetchone('SHOW ENGINE INNODB STATUS')[0]
            #innodb_stat = client.fetchone('SHOW INNODB STATUS')[0]
            for line in innodb_stat.splitlines():
                m = self._re_lsn_innodb_stat.search(line)
                if m:
                    current_lsn = int(m.group(1))
                    break

        if self.backup_type in ('incremental', 'auto'):
            if self.prev_cloudfs_source:
                # Download manifest and get it's to_lsn
                mnf = cloudfs.Manifest(cloudfs_path=self.prev_cloudfs_source)
                self.from_lsn = mnf.meta['to_lsn']
            else:
                self._check_attr('from_lsn')
            if self.backup_type == 'incremental' or \
                (self.backup_type == 'auto' and current_lsn and current_lsn >= self.from_lsn):
                kwds.update({
                    'incremental': True,
                    'incremental_lsn': self.from_lsn
                })
        LOG.debug('self._config: %s', self._config)
        LOG.debug('kwds: %s', kwds)

        if self.backup_type == 'incremental':
            LOG.info('Creating incremental xtrabackup (from LSN: %s)',
                     self.from_lsn)
        else:
            LOG.info('Creating full xtrabackup')

        with self._xbak_init_lock:
            if self._killed:
                raise Error("Canceled")
            self._xbak = innobackupex.args(__mysql__['tmp_dir'],
                                           **kwds).popen()
            gzip = self.compressor == 'gzip'
            transfer_id = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
            self._transfer = largetransfer.Upload(self._xbak.stdout,
                                                  self.cloudfs_target,
                                                  gzip=gzip,
                                                  transfer_id=transfer_id)

        stderr_thread, stderr = cloudfs.readfp_thread(self._xbak.stderr)

        self._transfer.apply_async()
        self._transfer.join()
        manifesto = self._transfer.manifest

        if self._killed:
            raise Error("Canceled")
        stderr_thread.join()
        self._xbak.wait()
        stderr = stderr[0] if stderr else ''
        if self._xbak.returncode:
            raise Error(stderr)

        with self._xbak_init_lock:
            self._xbak = None
            self._transfer = None

        log_file = log_pos = to_lsn = None
        re_binlog = self._re_binlog \
                    if int(__mysql__['replication_master']) else \
                    self._re_slave_binlog
        for line in stderr.splitlines():
            m = self._re_lsn.search(line) or self._re_lsn_51.search(line)
            if m:
                to_lsn = m.group(1)
                continue
            m = re_binlog.search(line)
            if m:
                log_file = m.group(1)
                log_pos = int(m.group(2))
                continue
            if log_file and log_pos and to_lsn:
                break

        rst = backup.restore(type='xtrabackup',
                             backup_type=self.backup_type,
                             from_lsn=self.from_lsn,
                             to_lsn=to_lsn,
                             cloudfs_source=manifesto.cloudfs_path,
                             prev_cloudfs_source=self.prev_cloudfs_source,
                             log_file=log_file,
                             log_pos=log_pos)

        # Update manifest
        LOG.debug('rst: %s', dict(rst))
        manifesto.meta = dict(rst)
        manifesto.save()

        LOG.info(
            'Created %s xtrabackup. (LSN: %s..%s, log_file: %s, log_pos: %s)',
            rst.backup_type, rst.from_lsn, rst.to_lsn, rst.log_file,
            rst.log_pos)

        return rst
Ejemplo n.º 23
0
    def _run(self):
        # Apply resource's meta
        mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source)
        bak = backup.restore(**mnf.meta)

        incrementals = []
        if bak.backup_type == 'incremental':
            incrementals = [bak]
            while bak.prev_cloudfs_source:
                tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source)
                bak = backup.restore(**tmpmnf.meta)
                if bak.backup_type == 'incremental':
                    incrementals.insert(0, bak)
        self.incrementals = incrementals
        if self.incrementals:
            self.log_file = self.incrementals[-1].log_file
            self.log_pos = self.incrementals[-1].log_pos
        else:
            self.log_file = bak.log_file
            self.log_pos = bak.log_pos

        coreutils.clean_dir(__mysql__['data_dir'])

        LOG.info('Downloading the base backup (LSN: 0..%s)', bak.to_lsn)

        trn = largetransfer.Download(bak.cloudfs_source)
        trn.apply_async()

        streamer = xbstream.args(extract=True, directory=__mysql__['data_dir'])
        streamer.popen(stdin=trn.output)

        trn.join()

        LOG.info('Preparing the base backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     redo_only=True,
                     ibbackup='xtrabackup',
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])

        if incrementals:
            inc_dir = os.path.join(__mysql__['tmp_dir'],
                                   'xtrabackup-restore-inc')
            i = 0
            for inc in incrementals:
                try:
                    os.makedirs(inc_dir)
                    inc = backup.restore(inc)
                    LOG.info(
                        'Downloading incremental backup #%d (LSN: %s..%s)', i,
                        inc.from_lsn, inc.to_lsn)

                    trn = largetransfer.Download(inc.cloudfs_source)
                    trn.apply_async()

                    streamer = xbstream.args(extract=True, directory=inc_dir)
                    streamer.popen(stdin=trn.output)

                    trn.join()

                    LOG.info('Preparing incremental backup #%d', i)
                    innobackupex(__mysql__['data_dir'],
                                 apply_log=True,
                                 redo_only=True,
                                 incremental_dir=inc_dir,
                                 ibbackup='xtrabackup',
                                 user=__mysql__['root_user'],
                                 password=__mysql__['root_password'])
                    i += 1
                finally:
                    coreutils.remove(inc_dir)

        LOG.info('Preparing the full backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])
        coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql')

        self._mysql_init.start()
        if int(__mysql__['replication_master']):
            LOG.info("Master will reset it's binary logs, "
                     "so updating binary log position in backup manifest")
            log_file, log_pos = self._client().master_status()
            meta = mnf.meta
            meta.update({'log_file': log_file, 'log_pos': log_pos})
            mnf.meta = meta
            mnf.save()
Ejemplo n.º 24
0
def when_i_restore_full_backup(step, key):
    rst = backup.restore(world.restore[key])
    rst.run()
Ejemplo n.º 25
0
    def on_host_init_response(self, message):
        """
        Check mysql data in host init response
        @type message: scalarizr.messaging.Message
        @param message: HostInitResponse
        """
        LOG.debug("on_host_init_response")

        log = bus.init_op.logger
        log.info('Accept Scalr configuration')
        if not message.body.has_key(__mysql__['behavior']):
            msg = "HostInitResponse message for MySQL behavior " \
                            "must have '%s' property" % __mysql__['behavior']
            raise HandlerError(msg)


        # Apply MySQL data from HIR
        md = getattr(message, __mysql__['behavior']).copy()

        if 'preset' in md:
            self.initial_preset = md['preset']
            del md['preset']
            LOG.debug('Scalr sent current preset: %s' % self.initial_preset)

        md['compat_prior_backup_restore'] = False
        if md.get('volume'):
            # New format
            md['volume'] = storage2.volume(md['volume'])
            if 'backup' in md:
                md['backup'] = backup.backup(md['backup'])
            if 'restore' in md:
                md['restore'] = backup.restore(md['restore'])

        else:

            # Compatibility transformation
            # - volume_config -> volume
            # - master n'th start, type=ebs - del snapshot_config
            # - snapshot_config + log_file + log_pos -> restore
            # - create backup on master 1'st start

            md['compat_prior_backup_restore'] = True
            if md.get('volume_config'):
                md['volume'] = storage2.volume(
                                md.pop('volume_config'))
            else:
                md['volume'] = storage2.volume(
                                type=md['snapshot_config']['type'])

            # Initialized persistent disk have latest data.
            # Next statement prevents restore from snapshot
            if md['volume'].device and \
                                    md['volume'].type in ('ebs', 'csvol', 'cinder', 'raid'):
                md.pop('snapshot_config', None)

            if md.get('snapshot_config'):
                md['restore'] = backup.restore(
                                type='snap_mysql',
                                snapshot=md.pop('snapshot_config'),
                                volume=md['volume'],
                                log_file=md.pop('log_file'),
                                log_pos=md.pop('log_pos'))
            elif int(md['replication_master']) and \
                                    not md['volume'].device:
                md['backup'] = backup.backup(
                                type='snap_mysql',
                                volume=md['volume'])

        __mysql__.update(md)

        LOG.debug('__mysql__: %s', md)
        LOG.debug('volume in __mysql__: %s', 'volume' in __mysql__)
        LOG.debug('restore in __mysql__: %s', 'restore' in __mysql__)
        LOG.debug('backup in __mysql__: %s', 'backup' in __mysql__)

        __mysql__['volume'].mpoint = __mysql__['storage_dir']
        __mysql__['volume'].tags = self.resource_tags()
        if 'backup' in __mysql__:
            __mysql__['backup'].tags = self.resource_tags()
            __mysql__['backup'].description = self._data_bundle_description()
Ejemplo n.º 26
0
    def setup(self):
        self.bak = backup.backup(type='snap_mysql')
        mock.patch.object(self.bak, '_client').start()
        self.bak._client.return_value.master_status.return_value = ('binlog.000003', '107')
 
        self.rst = backup.restore(type='snap_mysql')
Ejemplo n.º 27
0
    def on_host_init_response(self, message):
        """
        Check postgresql data in host init response
        @type message: scalarizr.messaging.Message
        @param message: HostInitResponse
        """
        
        with bus.initialization_op as op:
            with op.phase(self._phase_postgresql):
                with op.step(self._step_accept_scalr_conf):
        
                    if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR:
                        raise HandlerError("HostInitResponse message for PostgreSQL behaviour must have 'postgresql' property and db_type 'postgresql'")

                    postgresql_data = message.postgresql.copy()

                    #Extracting service configuration preset from message
                    if 'preset' in postgresql_data:
                        self.initial_preset = postgresql_data['preset']
                        LOG.debug('Scalr sent current preset: %s' % self.initial_preset)
                        del postgresql_data['preset']

                    #Extracting or generating postgresql root password
                    postgresql_data['%s_password' % ROOT_USER] = postgresql_data.get(OPT_ROOT_PASSWORD) or cryptotool.pwgen(10)
                    del postgresql_data[OPT_ROOT_PASSWORD]

                    #Extracting replication ssh keys from message
                    root = PgUser(ROOT_USER, self.postgresql.pg_keys_dir)
                    root.store_keys(postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY], postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY])
                    del postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY]
                    del postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY]


                    if postgresql_data.get('volume'):
                        # New format
                        postgresql_data['compat_prior_backup_restore'] = False
                        postgresql_data['volume'] = storage2.volume(postgresql_data['volume'])
                        LOG.debug("message.pg['volume']:", postgresql_data['volume'])
                        if 'backup' in postgresql_data:
                            postgresql_data['backup'] = backup.backup(postgresql_data['backup'])
                            LOG.debug("message.pg['backup']:", postgresql_data['backup'])
                        if 'restore' in postgresql_data:
                            postgresql_data['restore'] = backup.restore(postgresql_data['restore'])
                            LOG.debug("message.pg['restore']:", postgresql_data['restore'])
                    else:

                        # Compatibility transformation
                        # - volume_config -> volume
                        # - master n'th start, type=ebs - del snapshot_config
                        # - snapshot_config -> restore
                        # - create backup object on master 1'st start

                        postgresql_data['compat_prior_backup_restore'] = True
                        if postgresql_data.get(OPT_VOLUME_CNF):
                            postgresql_data['volume'] = storage2.volume(
                                postgresql_data.pop(OPT_VOLUME_CNF))

                        elif postgresql_data.get(OPT_SNAPSHOT_CNF):
                            postgresql_data['volume'] = storage2.volume(
                                type=postgresql_data[OPT_SNAPSHOT_CNF]['type'])

                        else:
                            raise HandlerError('No volume config or snapshot config provided')

                        if postgresql_data['volume'].device and \
                                        postgresql_data['volume'].type in ('ebs', 'csvol', 'cinder', 'raid'):
                            LOG.debug("Master n'th start detected. Removing snapshot config from message")
                            postgresql_data.pop(OPT_SNAPSHOT_CNF, None)

                        if postgresql_data.get(OPT_SNAPSHOT_CNF):
                            postgresql_data['restore'] = backup.restore(
                                type='snap_postgresql',
                                snapshot=postgresql_data.pop(OPT_SNAPSHOT_CNF),
                                volume=postgresql_data['volume'])

                        if int(postgresql_data['replication_master']):
                            postgresql_data['backup'] = backup.backup(
                                type='snap_postgresql',
                                volume=postgresql_data['volume'])

                    LOG.debug("Update postgresql config with %s", postgresql_data)
                    __postgresql__.update(postgresql_data)
                    __postgresql__['volume'].mpoint = __postgresql__['storage_dir']
                    __postgresql__['volume'].tags = self.resource_tags()
                    if 'backup' in __postgresql__:
                        __postgresql__['backup'].tags = self.resource_tags()
Ejemplo n.º 28
0
    def on_host_init_response(self, message):
        """
        Check redis data in host init response
        @type message: scalarizr.messaging.Message
        @param message: HostInitResponse
        """
        with bus.initialization_op as op:
            with op.phase(self._phase_redis):
                with op.step(self._step_accept_scalr_conf):

                    if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR:
                        raise HandlerError("HostInitResponse message for %s behaviour must have '%s' property and db_type '%s'"
                                           % (BEHAVIOUR, BEHAVIOUR, BEHAVIOUR))

                    config_dir = os.path.dirname(self._volume_config_path)
                    if not os.path.exists(config_dir):
                        os.makedirs(config_dir)

                    redis_data = message.redis.copy()
                    LOG.info('Got Redis part of HostInitResponse: %s' % redis_data)

                    if 'preset' in redis_data:
                        self.initial_preset = redis_data['preset']
                        del redis_data['preset']
                        LOG.debug('Scalr sent current preset: %s' % self.initial_preset)


                    '''
                    XXX: following line enables support for old scalr installations
                    use_password shoud be set by postinstall script for old servers
                    '''
                    redis_data[OPT_USE_PASSWORD] = redis_data.get(OPT_USE_PASSWORD, '1')

                    ports = []
                    passwords = []
                    num_processes = 1

                    if 'ports' in redis_data and redis_data['ports']:
                        ports = map(int, redis_data['ports'])
                        del redis_data['ports']

                    if 'passwords' in redis_data and redis_data['passwords']:
                        passwords = redis_data['passwords']
                        del redis_data['passwords']

                    if 'num_processes' in redis_data and redis_data['num_processes']:
                        num_processes = int(redis_data['num_processes'])
                        del redis_data['num_processes']

                    redis_data['volume'] = storage2.volume(
                                    redis_data.pop('volume_config'))

                    if redis_data['volume'].device and \
                                            redis_data['volume'].type in ('ebs', 'csvol', 'cinder', 'raid'):
                        redis_data.pop('snapshot_config', None)

                    if redis_data.get('snapshot_config'):
                        redis_data['restore'] = backup.restore(
                                type='snap_redis',
                                snapshot=redis_data.pop('snapshot_config'),
                                volume=redis_data['volume'])

                    # Update configs
                    __redis__.update(redis_data)
                    __redis__['volume'].mpoint = __redis__['storage_dir']

                    if self.default_service.running:
                        self.default_service.stop('Terminating default redis instance')

                    self.redis_instances = redis.RedisInstances(self.is_replication_master, self.persistence_type, self.use_passwords)
                    ports = ports or [redis.DEFAULT_PORT,]
                    passwords = passwords or [self.get_main_password(),]
                    self.redis_instances.init_processes(num_processes, ports=ports, passwords=passwords)

                    if self.use_passwords:
                        self.security_off()
Ejemplo n.º 29
0
	def on_host_init_response(self, message):
		"""
		Check mysql data in host init response
		@type message: scalarizr.messaging.Message
		@param message: HostInitResponse
		"""
		LOG.debug("on_host_init_response")
		
		with bus.initialization_op as op:
			with op.phase(self._phase_mysql):
				with op.step(self._step_accept_scalr_conf):
		
					if not message.body.has_key(__mysql__['behavior']):
						msg = "HostInitResponse message for MySQL behavior " \
								"must have '%s' property" % __mysql__['behavior']
						raise HandlerError(msg)
					

					# Apply MySQL data from HIR
					md = getattr(message, __mysql__['behavior']).copy()					

					md['compat_prior_backup_restore'] = False
					if md.get('volume'):
						# New format
						md['volume'] = storage2.volume(md['volume'])
						if 'backup' in md:
							md['backup'] = backup.backup(md['backup'])
						if 'restore' in md:
							md['restore'] = backup.restore(md['restore'])

					else:
						# Compatibility transformation
						# - volume_config -> volume
						# - master n'th start, type=ebs - del snapshot_config
						# - snapshot_config + log_file + log_pos -> restore
						# - create backup on master 1'st start
						md['compat_prior_backup_restore'] = True
						if md.get('volume_config'):
							md['volume'] = storage2.volume(
									md.pop('volume_config'))
						else:
							md['volume'] = storage2.volume(
									type=md['snapshot_config']['type'])

						if md['volume'].device and \
									md['volume'].type in ('ebs', 'raid'):
							md.pop('snapshot_config', None)

						if md.get('snapshot_config'):
							md['restore'] = backup.restore(
									type='snap_mysql', 
									snapshot=md.pop('snapshot_config'),
									volume=md['volume'],
									log_file=md.pop('log_file'),
									log_pos=md.pop('log_pos'))
						elif int(md['replication_master']) and \
									not md['volume'].device:
							md['backup'] = backup.backup(
									type='snap_mysql',
									volume=md['volume'])

					__mysql__.update(md)

					LOG.debug('__mysql__: %s', md)
					LOG.debug('volume in __mysql__: %s', 'volume' in __mysql__)
					LOG.debug('restore in __mysql__: %s', 'restore' in __mysql__)
					LOG.debug('backup in __mysql__: %s', 'backup' in __mysql__)
					
					__mysql__['volume'].mpoint = __mysql__['storage_dir']
					__mysql__['volume'].tags = self.resource_tags()
					if 'backup' in __mysql__:
						__mysql__['backup'].tags = self.resource_tags()
Ejemplo n.º 30
0
def when_i_restore_incremental_backup(step, key):
    rst = world.restore[key]
    rst = backup.restore(rst)
    rst.run()
Ejemplo n.º 31
0
    def on_host_init_response(self, message):
        """
        Check redis data in host init response
        @type message: scalarizr.messaging.Message
        @param message: HostInitResponse
        """
        log = bus.init_op.logger
        log.info('Accept Scalr configuration')

        if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR:
            raise HandlerError(
                "HostInitResponse message for %s behaviour must have '%s' property and db_type '%s'"
                % (BEHAVIOUR, BEHAVIOUR, BEHAVIOUR))

        config_dir = os.path.dirname(self._volume_config_path)
        if not os.path.exists(config_dir):
            os.makedirs(config_dir)

        redis_data = message.redis.copy()
        LOG.info('Got Redis part of HostInitResponse: %s' % redis_data)

        if 'preset' in redis_data:
            self.initial_preset = redis_data['preset']
            del redis_data['preset']
            LOG.debug('Scalr sent current preset: %s' % self.initial_preset)
        '''
        XXX: following line enables support for old scalr installations
        use_password shoud be set by postinstall script for old servers
        '''
        redis_data["use_password"] = redis_data.get("use_password", '1')

        ports = []
        passwords = []
        num_processes = 1

        if 'ports' in redis_data and redis_data['ports']:
            ports = map(int, redis_data['ports'])
            del redis_data['ports']

        if 'passwords' in redis_data and redis_data['passwords']:
            passwords = redis_data['passwords']
            del redis_data['passwords']

        if 'num_processes' in redis_data and redis_data['num_processes']:
            num_processes = int(redis_data['num_processes'])
            del redis_data['num_processes']

        redis_data['volume'] = storage2.volume(redis_data.pop('volume_config'))

        if redis_data['volume'].device and \
                                redis_data['volume'].type in ('ebs', 'csvol', 'cinder', 'raid'):
            redis_data.pop('snapshot_config', None)

        if redis_data.get('snapshot_config'):
            redis_data['restore'] = backup.restore(
                type='snap_redis',
                snapshot=redis_data.pop('snapshot_config'),
                volume=redis_data['volume'])

        self._hir_volume_growth = redis_data.pop('volume_growth', None)

        # Update configs
        __redis__.update(redis_data)
        __redis__['volume'].mpoint = __redis__['storage_dir']
        __redis__['volume'].tags = self.redis_tags
        if self.default_service.running:
            self.default_service.stop('Terminating default redis instance')

        self.redis_instances = redis.RedisInstances()
        ports = ports or [
            __redis__['defaults']['port'],
        ]
        passwords = passwords or [
            self.get_main_password(),
        ]
        self.redis_instances.init_processes(num_processes,
                                            ports=ports,
                                            passwords=passwords)

        self._ensure_security()
Ejemplo n.º 32
0
def when_i_restore_full_backup(step, key):
    rst = backup.restore(world.restore[key])
    rst.run()
Ejemplo n.º 33
0
def when_i_restore_incremental_backup(step, key):
    rst = world.restore[key]
    rst = backup.restore(rst)
    rst.run()
Ejemplo n.º 34
0
    def on_DbMsr_NewMasterUp(self, message):
        try:
            assert message.body.has_key("db_type")
            assert message.body.has_key("local_ip")
            assert message.body.has_key("remote_ip")
            assert message.body.has_key(__mysql__['behavior'])

            mysql2 = message.body[__mysql__['behavior']]

            if int(__mysql__['replication_master']):
                LOG.debug('Skip NewMasterUp. My replication role is master')
                return

            host = message.local_ip or message.remote_ip
            LOG.info("Switching replication to a new MySQL master %s", host)
            bus.fire('before_mysql_change_master', host=host)

            LOG.debug("__mysql__['volume']: %s", __mysql__['volume'])

            if __mysql__['volume'].type in ('eph', 'lvm') or __node__['platform'].name == 'idcf':
                if 'restore' in mysql2:
                    restore = backup.restore(**mysql2['restore'])
                else:
                    # snap_mysql restore should update MySQL volume, and delete old one
                    restore = backup.restore(
                                            type='snap_mysql',
                                            log_file=mysql2['log_file'],
                                            log_pos=mysql2['log_pos'],
                                            volume=__mysql__['volume'],
                                            snapshot=mysql2['snapshot_config'])
                # XXX: ugly
                old_vol = None
                if __mysql__['volume'].type == 'eph':
                    self.mysql.service.stop('Swapping storages to reinitialize slave')

                    LOG.info('Reinitializing Slave from the new snapshot %s (log_file: %s log_pos: %s)',
                                    restore.snapshot['id'], restore.log_file, restore.log_pos)
                    new_vol = restore.run()
                else:
                    if __node__['platform'].name == 'idcf':
                        self.mysql.service.stop('Detaching old Slave volume')
                        old_vol = dict(__mysql__['volume'])
                        old_vol = storage2.volume(old_vol)
                        old_vol.umount()

                    restore.run()

                log_file = restore.log_file
                log_pos = restore.log_pos

                self.mysql.service.start()

                if __node__['platform'].name == 'idcf' and old_vol:
                    LOG.info('Destroying old Slave volume')
                    old_vol.destroy(remove_disks=True)
            else:
                LOG.debug("Stopping slave i/o thread")
                self.root_client.stop_slave_io_thread()
                LOG.debug("Slave i/o thread stopped")

                LOG.debug("Retrieving current log_file and log_pos")
                status = self.root_client.slave_status()
                log_file = status['Master_Log_File']
                log_pos = status['Read_Master_Log_Pos']
                LOG.debug("Retrieved log_file=%s, log_pos=%s", log_file, log_pos)


            self._change_master(
                    host=host,
                    user=__mysql__['repl_user'],
                    password=mysql2['repl_password'],
                    log_file=log_file,
                    log_pos=log_pos,
                    timeout=120
            )

            LOG.debug("Replication switched")
            bus.fire('mysql_change_master', host=host, log_file=log_file, log_pos=log_pos)

            msg_data = dict(
                    db_type = __mysql__['behavior'],
                    status = 'ok'
            )
            self.send_message(DbMsrMessages.DBMSR_NEW_MASTER_UP_RESULT, msg_data)

        except (Exception, BaseException), e:
            LOG.exception(e)

            msg_data = dict(
                    db_type = __mysql__['behavior'],
                    status="error",
                    last_error=str(e))
            self.send_message(DbMsrMessages.DBMSR_NEW_MASTER_UP_RESULT, msg_data)