def _create_snapshot(self): LOG.info("Creating Redis data bundle") backup_obj = backup.backup(type='snap_redis', volume=__redis__['volume'], tags=self.redis_tags) restore = backup_obj.run() return restore.snapshot
def do_databundle(op): try: bus.fire('before_postgresql_data_bundle') LOG.info("Creating PostgreSQL data bundle") backup_obj = backup.backup(type='snap_postgresql', volume=__postgresql__['volume'], tags=__postgresql__['volume'].tags) restore = backup_obj.run() snap = restore.snapshot used_size = int(system2(('df', '-P', '--block-size=M', STORAGE_PATH))[0].split('\n')[1].split()[2][:-1]) bus.fire('postgresql_data_bundle', snapshot_id=snap.id) # Notify scalr msg_data = { 'db_type': BEHAVIOUR, 'status': 'ok', 'used_size' : '%.3f' % (float(used_size) / 1000,), BEHAVIOUR: {OPT_SNAPSHOT_CNF: dict(snap)} } __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) return restore except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
def _create_snapshot(self): LOG.info("Creating PostgreSQL data bundle") backup_obj = backup.backup(type='snap_postgresql', volume=__postgresql__['volume'], tags=self.resource_tags()) restore = backup_obj.run() return restore.snapshot
def do_databundle(op): try: bus.fire('before_%s_data_bundle' % BEHAVIOUR) # Creating snapshot LOG.info("Creating Redis data bundle") backup_obj = backup.backup(type='snap_redis', volume=__redis__['volume'], tags=__redis__['volume'].tags) # TODO: generate the same way as in # mysql api or use __node__ restore = backup_obj.run() snap = restore.snapshot used_size = int(system2(('df', '-P', '--block-size=M', STORAGE_PATH))[0].split('\n')[1].split()[2][:-1]) bus.fire('%s_data_bundle' % BEHAVIOUR, snapshot_id=snap.id) # Notify scalr msg_data = dict( db_type=BEHAVIOUR, used_size='%.3f' % (float(used_size) / 1000,), status='ok' ) msg_data[BEHAVIOUR] = {'snapshot_config': dict(snap)} node.__node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) return restore except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error node.__node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict( db_type=BEHAVIOUR, status='error', last_error=str(e)))
def do_backup(op, backup_conf=None): try: purpose = '{0}-{1}'.format( __mysql__.behavior, 'master' if int(__mysql__.replication_master) == 1 else 'slave') backup = { 'type': 'mysqldump', 'cloudfs_dir': __node__.platform.scalrfs.backups('mysql'), 'description': 'MySQL backup (farm: {0} role: {1})'.format( __node__.farm_id, __node__.role_name), 'tags': build_tags(purpose, 'active') } backup.update(backup_conf or {}) if backup['type'] == 'snap_mysql': backup['description'].replace('backup', 'data bundle') backup['volume'] = __mysql__.volume bak = op.data['bak'] = backup_module.backup(**backup) try: restore = bak.run() finally: del op.data['bak'] # For Scalr < 4.5.0 if bak.type == 'mysqldump': __node__.messaging.send('DbMsr_CreateBackupResult', { 'db_type': __mysql__.behavior, 'status': 'ok', 'backup_parts': restore.parts }) else: data = { 'restore': dict(restore) } if backup["type"] == 'snap_mysql': data.update({ 'snapshot_config': dict(restore.snapshot), 'log_file': restore.log_file, 'log_pos': restore.log_pos, }) __node__.messaging.send('DbMsr_CreateDataBundleResult', { 'db_type': __mysql__.behavior, 'status': 'ok', __mysql__.behavior: data }) return dict(restore) except: # For Scalr < 4.5.0 c, e, t = sys.exc_info() msg_name = 'DbMsr_CreateBackupResult' \ if backup['type'] == 'mysqldump' else \ 'DbMsr_CreateDataBundleResult' __node__.messaging.send(msg_name, { 'db_type': __mysql__.behavior, 'status': 'error', 'last_error': str(e) }) raise c, e, t
def when_i_create_full_xtrabackup(step): __import__('scalarizr.services.mysql2') bak = backup.backup( type='xtrabackup', cloudfs_target=CLOUDFS_TARGET) restore = bak.run() world.restore = {'R1': dict(restore)}
def test_checkpoints(self, *args): fixtures_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'fixtures', 'services', 'mysql', 'dbbackup', '2012-09-18_09-06-49')) bak = backup.backup( type='xtrabackup') mock.patch.object(bak, '_latest_backup_dir', return_value=fixtures_path).start() assert bak._checkpoints() == {'backup_type': 'full-backuped', 'to_lsn': '1597945', 'last_lsn': '1597945', 'from_lsn': '0'} bak._latest_backup_dir.assert_called_once()
def test_binlog_info(self, *args): fixtures_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'fixtures', 'services', 'mysql', 'dbbackup', '2012-09-18_09-06-49')) bak = backup.backup( type='xtrabackup') mock.patch.object(bak, '_latest_backup_dir', return_value=fixtures_path).start() assert bak._binlog_info() == ['binlog.000009', '192'] bak._latest_backup_dir.assert_called_once()
def when_i_create_incremental_xtrabackup(step): __import__('scalarizr.services.mysql2') bak = backup.backup( type='xtrabackup', backup_type='incremental', prev_cloudfs_source=world.restore['R1']['cloudfs_source'], cloudfs_target=CLOUDFS_TARGET) restore = bak.run() world.restore['R2'] = dict(restore)
def do_databundle(self, volume): LOG.info("Creating Redis data bundle") backup_obj = backup.backup(type='snap_redis', volume=volume, tags=volume['tags']) restore = backup_obj.run() snap = restore.snapshot return dict(snap)
def do_databundle(self, volume): LOG.info("Creating PostgreSQL data bundle") volume = storage2.volume(volume) if volume.type == 'eph': volume.ensure() backup_obj = backup.backup(type='snap_postgresql', volume=volume, tags=volume.tags) restore = backup_obj.run() snap = restore.snapshot return dict(snap)
def test_run(self, vol_factory): volume = {'type': 'ebs'} def ln(vol, state): state['custom'] = 1 listener = mock.Mock(side_effect=ln) bak = backup.backup(type='snap', volume=volume) bak.on(freeze=listener) rst = bak.run() listener.assert_called_with(vol_factory.return_value, mock.ANY) assert rst.type == 'snap' assert rst.snapshot == vol_factory.return_value.snapshot.return_value assert rst.custom == 1
def test_run(self, vol_factory): volume = {'type': 'ebs'} def ln(vol, state): state['custom'] = 1 listener = mock.Mock(side_effect=ln) bak = backup.backup( type='snap', volume=volume) bak.on(freeze=listener) rst = bak.run() listener.assert_called_with(vol_factory.return_value, mock.ANY) assert rst.type == 'snap' assert rst.snapshot == vol_factory.return_value.snapshot.return_value assert rst.custom == 1
def test_run_full(self, md, ex, innobackupex, *args): bak = backup.backup( type='xtrabackup') self._patch_bak(bak) rst = bak.run() assert rst.type == 'xtrabackup' assert rst.backup_type == 'full' assert rst.log_file == 'binlog.000003' assert rst.log_pos == '107' assert rst.from_lsn == '0' assert rst.to_lsn == '53201' innobackupex.assert_called_with(bak.backup_dir, user=mock.ANY, password=mock.ANY) bak._checkpoints.assert_called_with() bak._binlog_info.assert_called_with()
def test_run_with_volume(self, md, ex, st2vol, innobackupex, *args): ebs = mock.Mock( id='vol-12345678', size=1, zone='us-east-1a', **{'volume_state.return_value': 'available', 'attachment_state.return_value': 'attaching'} ) bak = backup.backup( type='xtrabackup', backup_type='incremental', from_lsn='23146', volume=ebs) self._patch_bak(bak) st2vol.return_value = ebs rst = bak.run() st2vol.assert_called_with(ebs) md.asert_called_with(bak.backup_dir) assert ebs.mpoint == bak.backup_dir ebs.detach.assert_called_with() ebs.snapshot.assert_called_with('MySQL xtrabackup', None) assert rst.volume == ebs assert rst.snapshot
def do_databundle(op): try: bus.fire('before_%s_data_bundle' % BEHAVIOUR) # Creating snapshot LOG.info("Creating Redis data bundle") backup_obj = backup.backup( type='snap_redis', volume=__redis__['volume'], tags=__redis__['volume'].tags ) # TODO: generate the same way as in # mysql api or use __node__ restore = backup_obj.run() snap = restore.snapshot used_size = int( system2(('df', '-P', '--block-size=M', STORAGE_PATH))[0].split('\n')[1].split()[2][:-1]) bus.fire('%s_data_bundle' % BEHAVIOUR, snapshot_id=snap.id) # Notify scalr msg_data = dict(db_type=BEHAVIOUR, used_size='%.3f' % (float(used_size) / 1000, ), status='ok') msg_data[BEHAVIOUR] = {'snapshot_config': dict(snap)} node.__node__.messaging.send( DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) return restore except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error node.__node__.messaging.send( DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
def setup(self): self.bak = backup.backup(type='snap_mysql') mock.patch.object(self.bak, '_client').start() self.bak._client.return_value.master_status.return_value = ('binlog.000003', '107') self.rst = backup.restore(type='snap_mysql')
def on_host_init_response(self, message): """ Check postgresql data in host init response @type message: scalarizr.messaging.Message @param message: HostInitResponse """ with bus.initialization_op as op: with op.phase(self._phase_postgresql): with op.step(self._step_accept_scalr_conf): if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError("HostInitResponse message for PostgreSQL behaviour must have 'postgresql' property and db_type 'postgresql'") postgresql_data = message.postgresql.copy() #Extracting service configuration preset from message if 'preset' in postgresql_data: self.initial_preset = postgresql_data['preset'] LOG.debug('Scalr sent current preset: %s' % self.initial_preset) del postgresql_data['preset'] #Extracting or generating postgresql root password postgresql_data['%s_password' % ROOT_USER] = postgresql_data.get(OPT_ROOT_PASSWORD) or cryptotool.pwgen(10) del postgresql_data[OPT_ROOT_PASSWORD] #Extracting replication ssh keys from message root = PgUser(ROOT_USER, self.postgresql.pg_keys_dir) root.store_keys(postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY], postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY]) del postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY] del postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY] if postgresql_data.get('volume'): # New format postgresql_data['compat_prior_backup_restore'] = False postgresql_data['volume'] = storage2.volume(postgresql_data['volume']) LOG.debug("message.pg['volume']:", postgresql_data['volume']) if 'backup' in postgresql_data: postgresql_data['backup'] = backup.backup(postgresql_data['backup']) LOG.debug("message.pg['backup']:", postgresql_data['backup']) if 'restore' in postgresql_data: postgresql_data['restore'] = backup.restore(postgresql_data['restore']) LOG.debug("message.pg['restore']:", postgresql_data['restore']) else: # Compatibility transformation # - volume_config -> volume # - master n'th start, type=ebs - del snapshot_config # - snapshot_config -> restore # - create backup object on master 1'st start postgresql_data['compat_prior_backup_restore'] = True if postgresql_data.get(OPT_VOLUME_CNF): postgresql_data['volume'] = storage2.volume( postgresql_data.pop(OPT_VOLUME_CNF)) elif postgresql_data.get(OPT_SNAPSHOT_CNF): postgresql_data['volume'] = storage2.volume( type=postgresql_data[OPT_SNAPSHOT_CNF]['type']) else: raise HandlerError('No volume config or snapshot config provided') if postgresql_data['volume'].device and \ postgresql_data['volume'].type in ('ebs', 'csvol', 'cinder', 'raid'): LOG.debug("Master n'th start detected. Removing snapshot config from message") postgresql_data.pop(OPT_SNAPSHOT_CNF, None) if postgresql_data.get(OPT_SNAPSHOT_CNF): postgresql_data['restore'] = backup.restore( type='snap_postgresql', snapshot=postgresql_data.pop(OPT_SNAPSHOT_CNF), volume=postgresql_data['volume']) if int(postgresql_data['replication_master']): postgresql_data['backup'] = backup.backup( type='snap_postgresql', volume=postgresql_data['volume']) LOG.debug("Update postgresql config with %s", postgresql_data) __postgresql__.update(postgresql_data) __postgresql__['volume'].mpoint = __postgresql__['storage_dir'] __postgresql__['volume'].tags = self.resource_tags() if 'backup' in __postgresql__: __postgresql__['backup'].tags = self.resource_tags()
def on_host_init_response(self, message): """ Check postgresql data in host init response @type message: scalarizr.messaging.Message @param message: HostInitResponse """ log = bus.init_op.logger log.info('Accept Scalr configuration') if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError( "HostInitResponse message for PostgreSQL behaviour must have 'postgresql' property and db_type 'postgresql'" ) postgresql_data = message.postgresql.copy() #Extracting service configuration preset from message if 'preset' in postgresql_data: self.initial_preset = postgresql_data['preset'] LOG.debug('Scalr sent current preset: %s' % self.initial_preset) del postgresql_data['preset'] #Extracting or generating postgresql root password postgresql_data['%s_password' % ROOT_USER] = postgresql_data.get( OPT_ROOT_PASSWORD) or cryptotool.pwgen(10) del postgresql_data[OPT_ROOT_PASSWORD] #Extracting replication ssh keys from message root = PgUser(ROOT_USER, self.postgresql.pg_keys_dir) root.store_keys(postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY], postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY]) del postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY] del postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY] if postgresql_data.get('volume'): # New format postgresql_data['compat_prior_backup_restore'] = False postgresql_data['volume'] = storage2.volume( postgresql_data['volume']) LOG.debug("message.pg['volume']: %s", postgresql_data['volume']) if 'backup' in postgresql_data: postgresql_data['backup'] = backup.backup( postgresql_data['backup']) LOG.debug("message.pg['backup']: %s", postgresql_data['backup']) if 'restore' in postgresql_data: postgresql_data['restore'] = backup.restore( postgresql_data['restore']) LOG.debug("message.pg['restore']: %s", postgresql_data['restore']) else: # Compatibility transformation # - volume_config -> volume # - master n'th start, type=ebs - del snapshot_config # - snapshot_config -> restore # - create backup object on master 1'st start postgresql_data['compat_prior_backup_restore'] = True if postgresql_data.get(OPT_VOLUME_CNF): postgresql_data['volume'] = storage2.volume( postgresql_data.pop(OPT_VOLUME_CNF)) elif postgresql_data.get(OPT_SNAPSHOT_CNF): postgresql_data['volume'] = storage2.volume( type=postgresql_data[OPT_SNAPSHOT_CNF]['type']) else: raise HandlerError( 'No volume config or snapshot config provided') if postgresql_data['volume'].device and \ postgresql_data['volume'].type in ('ebs', 'csvol', 'cinder', 'raid', 'gce_persistent'): LOG.debug( "Master n'th start detected. Removing snapshot config from message" ) postgresql_data.pop(OPT_SNAPSHOT_CNF, None) if postgresql_data.get(OPT_SNAPSHOT_CNF): postgresql_data['restore'] = backup.restore( type='snap_postgresql', snapshot=postgresql_data.pop(OPT_SNAPSHOT_CNF), volume=postgresql_data['volume']) if int(postgresql_data['replication_master']): postgresql_data['backup'] = backup.backup( type='snap_postgresql', volume=postgresql_data['volume']) self._hir_volume_growth = postgresql_data.pop('volume_growth', None) LOG.debug("Update postgresql config with %s", postgresql_data) __postgresql__.update(postgresql_data) __postgresql__['volume'].mpoint = __postgresql__['storage_dir']
def on_host_init_response(self, message): """ Check mysql data in host init response @type message: scalarizr.messaging.Message @param message: HostInitResponse """ LOG.debug("on_host_init_response") with bus.initialization_op as op: with op.phase(self._phase_mysql): with op.step(self._step_accept_scalr_conf): if not message.body.has_key(__mysql__['behavior']): msg = "HostInitResponse message for MySQL behavior " \ "must have '%s' property" % __mysql__['behavior'] raise HandlerError(msg) # Apply MySQL data from HIR md = getattr(message, __mysql__['behavior']).copy() md['compat_prior_backup_restore'] = False if md.get('volume'): # New format md['volume'] = storage2.volume(md['volume']) if 'backup' in md: md['backup'] = backup.backup(md['backup']) if 'restore' in md: md['restore'] = backup.restore(md['restore']) else: # Compatibility transformation # - volume_config -> volume # - master n'th start, type=ebs - del snapshot_config # - snapshot_config + log_file + log_pos -> restore # - create backup on master 1'st start md['compat_prior_backup_restore'] = True if md.get('volume_config'): md['volume'] = storage2.volume( md.pop('volume_config')) else: md['volume'] = storage2.volume( type=md['snapshot_config']['type']) if md['volume'].device and \ md['volume'].type in ('ebs', 'raid'): md.pop('snapshot_config', None) if md.get('snapshot_config'): md['restore'] = backup.restore( type='snap_mysql', snapshot=md.pop('snapshot_config'), volume=md['volume'], log_file=md.pop('log_file'), log_pos=md.pop('log_pos')) elif int(md['replication_master']) and \ not md['volume'].device: md['backup'] = backup.backup( type='snap_mysql', volume=md['volume']) __mysql__.update(md) LOG.debug('__mysql__: %s', md) LOG.debug('volume in __mysql__: %s', 'volume' in __mysql__) LOG.debug('restore in __mysql__: %s', 'restore' in __mysql__) LOG.debug('backup in __mysql__: %s', 'backup' in __mysql__) __mysql__['volume'].mpoint = __mysql__['storage_dir'] __mysql__['volume'].tags = self.resource_tags() if 'backup' in __mysql__: __mysql__['backup'].tags = self.resource_tags()
def do_backup(self, backup_conf): bak = backup_module.backup(**backup_conf) restore = bak.run() return dict(restore)
def on_DbMsr_CreateDataBundle(self, message): LOG.debug("on_DbMsr_CreateDataBundle") try: op = operation(name=self._op_data_bundle, phases=[{ 'name': self._phase_data_bundle, 'steps': [self._step_create_data_bundle] }]) op.define() with op.phase(self._phase_data_bundle): with op.step(self._step_create_data_bundle): bus.fire('before_mysql_data_bundle') backup_info = message.body.get(__mysql__['behavior'], {}) compat_prior_backup_restore = 'backup' not in backup_info if compat_prior_backup_restore: bak = backup.backup( type='snap_mysql', volume=__mysql__['volume']) else: bak = backup.backup(backup_info['backup']) restore = bak.run() ''' # Creating snapshot snap, log_file, log_pos = self._create_snapshot(ROOT_USER, self.root_password, tags=self.mysql_tags) used_size = firstmatched(lambda r: r.mpoint == STORAGE_PATH, filetool.df()).used bus.fire('mysql_data_bundle', snapshot_id=snap.id) ''' # Notify scalr msg_data = { 'db_type': __mysql__['behavior'], 'status': 'ok', __mysql__['behavior']: {} } if compat_prior_backup_restore: msg_data[__mysql__['behavior']].update({ 'snapshot_config': dict(restore.snapshot), 'log_file': restore.log_file, 'log_pos': restore.log_pos, }) else: msg_data[__mysql__['behavior']].update({ 'restore': dict(restore) }) self.send_message(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) op.ok() except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error self.send_message(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict( db_type = __mysql__['behavior'], status ='error', last_error = str(e) ))
def on_DbMsr_PromoteToMaster(self, message): """ Promote slave to master """ LOG.debug("on_DbMsr_PromoteToMaster") #assert message.body['volume_config'] #assert message.mysql2 mysql2 = message.body[__mysql__['behavior']] #assert mysql2['root_password'] #assert mysql2['repl_password'] #assert mysql2['stat_password'] if int(__mysql__['replication_master']): LOG.warning('Cannot promote to master. Already master') return LOG.info('Starting Slave -> Master promotion') bus.fire('before_slave_promote_to_master') __mysql__['compat_prior_backup_restore'] = mysql2.get('volume_config') or mysql2.get('snapshot_config') new_vol = None if mysql2.get('volume_config'): new_vol = storage2.volume(mysql2.get('volume_config')) try: if new_vol and new_vol.type not in ('eph', 'lvm'): if self.mysql.service.running: self.root_client.stop_slave() self.mysql.service.stop('Swapping storages to promote slave to master') # Unplug slave storage and plug master one #old_conf = self.storage_vol.detach(force=True) # ?????? old_vol = storage2.volume(__mysql__['volume']) try: old_vol.umount() #master_vol = self._take_master_volume(master_vol_id) new_vol.mpoint = __mysql__['storage_dir'] new_vol.ensure(mount=True) #new_storage_vol = self._plug_storage(STORAGE_PATH, master_storage_conf) # Continue if master storage is a valid MySQL storage if self._storage_valid(): # Patch configuration files self.mysql.move_mysqldir_to(__mysql__['storage_dir']) self.mysql._init_replication(master=True) # Set read_only option self.mysql.my_cnf.read_only = False self.mysql.service.start() # Update __mysql__['behavior'] configuration __mysql__.update({ 'replication_master': 1, 'root_password': mysql2['root_password'], 'repl_password': mysql2['repl_password'], 'stat_password': mysql2['stat_password'], 'volume': new_vol }) try: old_vol.destroy(remove_disks=True) except: LOG.warn('Failed to destroy old MySQL volume %s: %s', old_vol.id, sys.exc_info()[1]) ''' updates = { OPT_ROOT_PASSWORD : mysql2['root_password'], OPT_REPL_PASSWORD : mysql2['repl_password'], OPT_STAT_PASSWORD : mysql2['stat_password'], OPT_REPLICATION_MASTER : "1" } self._update_config(updates) Storage.backup_config(new_storage_vol.config(), self._volume_config_path) ''' # Send message to Scalr msg_data = { 'status': 'ok', 'db_type': __mysql__['behavior'], __mysql__['behavior']: {} } if __mysql__['compat_prior_backup_restore']: msg_data[__mysql__['behavior']].update({ 'volume_config': dict(__mysql__['volume']) }) else: msg_data[__mysql__['behavior']].update({ 'volume': dict(__mysql__['volume']) }) self.send_message( DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) else: msg = "%s is not a valid MySQL storage" % __mysql__['data_dir'] raise HandlerError(msg) except: self.mysql.service.stop('Detaching new volume') new_vol.detach() old_vol.mount() raise else: # Set read_only option self.mysql.my_cnf.read_only = False self.mysql.service.start() self.root_client.stop_slave() self.root_client.reset_master() self.mysql.flush_logs(__mysql__['data_dir']) __mysql__.update({ 'replication_master': 1, 'root_password': mysql2['root_password'], 'repl_password': mysql2['repl_password'], 'stat_password': mysql2['stat_password'], }) ''' updates = { OPT_ROOT_PASSWORD : mysql2['root_password'], OPT_REPL_PASSWORD : mysql2['repl_password'], OPT_STAT_PASSWORD : mysql2['stat_password'], OPT_REPLICATION_MASTER : "1" } self._update_config(updates) ''' if mysql2.get('backup'): bak = backup.backup(**mysql2.get('backup')) else: bak = backup.backup( type='snap_mysql', volume=__mysql__['volume']) restore = bak.run() ''' snap, log_file, log_pos = self._create_snapshot(ROOT_USER, mysql2['root_password'], tags=self.mysql_tags) Storage.backup_config(snap.config(), self._snapshot_config_path) ''' # Send message to Scalr msg_data = dict( status="ok", db_type = __mysql__['behavior'] ) if __mysql__['compat_prior_backup_restore']: msg_data[__mysql__['behavior']] = { 'log_file': restore.log_file, 'log_pos': restore.log_pos, 'snapshot_config': dict(restore.snapshot), 'volume_config': dict(__mysql__['volume']) } else: msg_data[__mysql__['behavior']] = { 'restore': dict(restore), 'volume': dict(__mysql__['volume']) } self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) LOG.info('Promotion completed') bus.fire('slave_promote_to_master') except (Exception, BaseException), e: LOG.exception(e) msg_data = dict( db_type = __mysql__['behavior'], status="error", last_error=str(e)) self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) # Change back read_only option self.mysql.my_cnf.read_only = True # Start MySQL self.mysql.service.start()
def on_DbMsr_PromoteToMaster(self, message): """ Promote slave to master """ LOG.debug("on_DbMsr_PromoteToMaster") mysql2 = message.body[__mysql__['behavior']] if int(__mysql__['replication_master']): LOG.warning('Cannot promote to master. Already master') return LOG.info('Starting Slave -> Master promotion') bus.fire('before_slave_promote_to_master') __mysql__['compat_prior_backup_restore'] = mysql2.get('volume_config') or \ mysql2.get('snapshot_config') or \ message.body.get('volume_config') and \ not mysql2.get('volume') new_vol = None if __node__['platform'].name == 'idcf': new_vol = None elif mysql2.get('volume_config'): new_vol = storage2.volume(mysql2.get('volume_config')) try: if new_vol and new_vol.type not in ('eph', 'lvm'): if self.mysql.service.running: self.root_client.stop_slave() self.mysql.service.stop('Swapping storages to promote slave to master') # Unplug slave storage and plug master one old_vol = storage2.volume(__mysql__['volume']) try: if old_vol.type == 'raid': old_vol.detach() else: old_vol.umount() new_vol.mpoint = __mysql__['storage_dir'] new_vol.ensure(mount=True) # Continue if master storage is a valid MySQL storage if self._storage_valid(): # Patch configuration files self.mysql.move_mysqldir_to(__mysql__['storage_dir']) self.mysql._init_replication(master=True) # Set read_only option #self.mysql.my_cnf.read_only = False self.mysql.my_cnf.set('mysqld/sync_binlog', '1') self.mysql.my_cnf.set('mysqld/innodb_flush_log_at_trx_commit', '1') self.mysql.my_cnf.delete_options(['mysqld/read_only']) self.mysql.service.start() # Update __mysql__['behavior'] configuration __mysql__.update({ 'replication_master': 1, 'root_password': mysql2['root_password'], 'repl_password': mysql2['repl_password'], 'stat_password': mysql2['stat_password'], 'volume': new_vol }) try: old_vol.destroy(remove_disks=True) except: LOG.warn('Failed to destroy old MySQL volume %s: %s', old_vol.id, sys.exc_info()[1]) # Send message to Scalr msg_data = { 'status': 'ok', 'db_type': __mysql__['behavior'], __mysql__['behavior']: {} } if __mysql__['compat_prior_backup_restore']: msg_data[__mysql__['behavior']].update({ 'volume_config': dict(__mysql__['volume']) }) else: msg_data[__mysql__['behavior']].update({ 'volume': dict(__mysql__['volume']) }) self.send_message( DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) else: msg = "%s is not a valid MySQL storage" % __mysql__['data_dir'] raise HandlerError(msg) except: self.mysql.service.stop('Detaching new volume') new_vol.detach() if old_vol.type == 'raid': old_vol.ensure(mount=True) else: old_vol.mount() raise else: #self.mysql.my_cnf.read_only = False self.mysql.my_cnf.delete_options(['mysqld/read_only']) #self.mysql.service.restart() self.mysql.service.stop() self.mysql.service.start() self.root_client.stop_slave() self.root_client.reset_master() self.mysql.flush_logs(__mysql__['data_dir']) __mysql__.update({ 'replication_master': 1, 'root_password': mysql2['root_password'], 'repl_password': mysql2['repl_password'], 'stat_password': mysql2['stat_password'], }) restore = None no_data_bundle = mysql2.get('no_data_bundle', False) if not no_data_bundle: if mysql2.get('backup'): bak = backup.backup(**mysql2.get('backup')) else: bak = backup.backup( type='snap_mysql', volume=__mysql__['volume'] , description=self._data_bundle_description(), tags=self.resource_tags()) restore = bak.run() # Send message to Scalr msg_data = dict( status="ok", db_type = __mysql__['behavior'] ) if __mysql__['compat_prior_backup_restore']: result = { 'volume_config': dict(__mysql__['volume']) } if restore: result.update({ 'snapshot_config': dict(restore.snapshot), 'log_file': restore.log_file, 'log_pos': restore.log_pos }) else: result = { 'volume': dict(__mysql__['volume']) } if restore: result['restore'] = dict(restore) msg_data[__mysql__['behavior']] = result self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) LOG.info('Promotion completed') bus.fire('slave_promote_to_master') except (Exception, BaseException), e: LOG.exception(e) msg_data = dict( db_type = __mysql__['behavior'], status="error", last_error=str(e)) self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) # Change back read_only option self.mysql.my_cnf.read_only = True # Start MySQL self.mysql.service.start()
def test_latest_backup_dir(self, *args): fixtures_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'fixtures', 'services', 'mysql', 'dbbackup')) bak = backup.backup( type='xtrabackup') bak.backup_dir = fixtures_path assert bak._latest_backup_dir() == os.path.join(fixtures_path, '2012-09-18_09-06-49')
def test_run_invalid_backup_type(self, *args): bak = backup.backup( type='xtrabackup', backup_type=None) bak.run()
def on_host_init_response(self, message): """ Check mysql data in host init response @type message: scalarizr.messaging.Message @param message: HostInitResponse """ LOG.debug("on_host_init_response") log = bus.init_op.logger log.info('Accept Scalr configuration') if not message.body.has_key(__mysql__['behavior']): msg = "HostInitResponse message for MySQL behavior " \ "must have '%s' property" % __mysql__['behavior'] raise HandlerError(msg) # Apply MySQL data from HIR md = getattr(message, __mysql__['behavior']).copy() if 'preset' in md: self.initial_preset = md['preset'] del md['preset'] LOG.debug('Scalr sent current preset: %s' % self.initial_preset) md['compat_prior_backup_restore'] = False if md.get('volume'): # New format md['volume'] = storage2.volume(md['volume']) if 'backup' in md: md['backup'] = backup.backup(md['backup']) if 'restore' in md: md['restore'] = backup.restore(md['restore']) else: # Compatibility transformation # - volume_config -> volume # - master n'th start, type=ebs - del snapshot_config # - snapshot_config + log_file + log_pos -> restore # - create backup on master 1'st start md['compat_prior_backup_restore'] = True if md.get('volume_config'): md['volume'] = storage2.volume( md.pop('volume_config')) else: md['volume'] = storage2.volume( type=md['snapshot_config']['type']) # Initialized persistent disk have latest data. # Next statement prevents restore from snapshot if md['volume'].device and \ md['volume'].type in ('ebs', 'csvol', 'cinder', 'raid'): md.pop('snapshot_config', None) if md.get('snapshot_config'): md['restore'] = backup.restore( type='snap_mysql', snapshot=md.pop('snapshot_config'), volume=md['volume'], log_file=md.pop('log_file'), log_pos=md.pop('log_pos')) elif int(md['replication_master']) and \ not md['volume'].device: md['backup'] = backup.backup( type='snap_mysql', volume=md['volume']) __mysql__.update(md) LOG.debug('__mysql__: %s', md) LOG.debug('volume in __mysql__: %s', 'volume' in __mysql__) LOG.debug('restore in __mysql__: %s', 'restore' in __mysql__) LOG.debug('backup in __mysql__: %s', 'backup' in __mysql__) __mysql__['volume'].mpoint = __mysql__['storage_dir'] __mysql__['volume'].tags = self.resource_tags() if 'backup' in __mysql__: __mysql__['backup'].tags = self.resource_tags() __mysql__['backup'].description = self._data_bundle_description()