def updatedb(self): try: coreutils.clean_dir('/var/lib/apt/lists/partial', recursive=False) except OSError: pass path = '/var/lib/apt/lists' for name in os.listdir(path): filename = os.path.join(path, name) if name != 'lock' and os.path.isfile(filename): os.remove(filename) self.apt_get_command('update')
def updatedb(self): try: coreutils.clean_dir('/var/lib/apt/lists/partial', recursive=False) except OSError: pass path = '/var/lib/apt/lists' for name in os.listdir(path): filename = os.path.join(path, name) if name != 'lock' and os.path.isfile(filename): os.remove(filename) self.apt_get_command('update')
def updatedb(self, **kwds): try: coreutils.clean_dir('/var/lib/apt/lists/partial', recursive=False) except OSError: pass path = '/var/lib/apt/lists' for name in os.listdir(path): filename = os.path.join(path, name) if name != 'lock' and os.path.isfile(filename): os.remove(filename) cmd = '' if kwds.get('apt_repository'): cmd += ('--no-list-cleanup ' '-c {0}/updclient/apt-preserve-update-success-stamp.conf ' '-o Dir::Etc::sourcelist=sources.list.d/{1}.list ' '-o Dir::Etc::sourceparts=- ').format( bus.share_path, kwds['apt_repository']) cmd += 'update' try: self.apt_get_command(cmd) except linux.LinuxError, e: if e.returncode != 100: raise
def updatedb(self, **kwds): try: coreutils.clean_dir('/var/lib/apt/lists/partial', recursive=False) except OSError: pass path = '/var/lib/apt/lists' for name in os.listdir(path): filename = os.path.join(path, name) if name != 'lock' and os.path.isfile(filename): os.remove(filename) cmd = '' if kwds.get('apt_repository'): cmd += ('--no-list-cleanup ' '-c {0}/updclient/apt-preserve-update-success-stamp.conf ' '-o Dir::Etc::sourcelist=sources.list.d/{1}.list ' '-o Dir::Etc::sourceparts=- ' ).format(bus.share_path, kwds['apt_repository']) cmd += 'update' try: self.apt_get_command(cmd) except linux.LinuxError, e: if e.returncode != 100: raise
def _run(self): self._check_backup_type() if self.volume: self.volume = storage2.volume(self.volume) if self.tags: self.volume.tags = self.tags self.volume.mpoint = self.backup_dir self.volume.ensure(mount=True, mkfs=True) elif not os.path.exists(self.backup_dir): os.makedirs(self.backup_dir) kwds = {} if self.backup_type == 'incremental': from_lsn = self.from_lsn if not from_lsn: checkpoints = self._checkpoints() from_lsn = checkpoints['to_lsn'] kwds.update({ 'incremental': True, 'incremental_lsn': from_lsn }) elif 'full' == self.backup_type and self.volume: coreutils.clean_dir(self.backup_dir) exc_info = None try: LOG.info('Creating %s xtrabackup', self.backup_type) innobackupex(self.backup_dir, user=__mysql__['root_user'], password=__mysql__['root_password'], **kwds) log_file, log_pos = self._binlog_info() chkpoints = self._checkpoints() to_lsn = chkpoints['to_lsn'] from_lsn = chkpoints['from_lsn'] snapshot = None except: exc_info = sys.exc_info() finally: if self.volume: try: self.volume.detach() except: msg = 'Failed to detach backup volume: %s' LOG.warn(msg, sys.exc_info()[1]) if exc_info: raise exc_info[0], exc_info[1], exc_info[2] if self.volume: snapshot = self.volume.snapshot( self.description or 'MySQL xtrabackup', self.tags) return backup.restore( type='xtrabackup', log_file=log_file, log_pos=log_pos, from_lsn=from_lsn, to_lsn=to_lsn, backup_type=self.backup_type, backup_dir=self.backup_dir, volume=self.volume.clone(), snapshot=snapshot)
def _run(self): # Apply resource's meta mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source) bak = backup.restore(**mnf.meta) incrementals = [] if bak.backup_type == "incremental": incrementals = [bak] while bak.prev_cloudfs_source: tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source) bak = backup.restore(**tmpmnf.meta) if bak.backup_type == "incremental": incrementals.insert(0, bak) self.incrementals = incrementals if self.incrementals: self.log_file = self.incrementals[-1].log_file self.log_pos = self.incrementals[-1].log_pos else: self.log_file = bak.log_file self.log_pos = bak.log_pos coreutils.clean_dir(__mysql__["data_dir"]) LOG.info("Downloading the base backup (LSN: 0..%s)", bak.to_lsn) trn = cloudfs.LargeTransfer( bak.cloudfs_source, __mysql__["data_dir"], streamer=xbstream.args(extract=True, directory=__mysql__["data_dir"]), ) trn.run() LOG.info("Preparing the base backup") innobackupex( __mysql__["data_dir"], apply_log=True, redo_only=True, ibbackup="xtrabackup", user=__mysql__["root_user"], password=__mysql__["root_password"], ) if incrementals: inc_dir = os.path.join(__mysql__["tmp_dir"], "xtrabackup-restore-inc") i = 0 for inc in incrementals: try: os.makedirs(inc_dir) inc = backup.restore(inc) LOG.info("Downloading incremental backup #%d (LSN: %s..%s)", i, inc.from_lsn, inc.to_lsn) trn = cloudfs.LargeTransfer( inc.cloudfs_source, inc_dir, streamer=xbstream.args(extract=True, directory=inc_dir) ) trn.run() # todo: Largetransfer should support custom decompressor proc LOG.info("Preparing incremental backup #%d", i) innobackupex( __mysql__["data_dir"], apply_log=True, redo_only=True, incremental_dir=inc_dir, ibbackup="xtrabackup", user=__mysql__["root_user"], password=__mysql__["root_password"], ) i += 1 finally: coreutils.remove(inc_dir) LOG.info("Preparing the full backup") innobackupex( __mysql__["data_dir"], apply_log=True, user=__mysql__["root_user"], password=__mysql__["root_password"] ) coreutils.chown_r(__mysql__["data_dir"], "mysql", "mysql") self._mysql_init.start() if int(__mysql__["replication_master"]): LOG.info("Master will reset it's binary logs, " "so updating binary log position in backup manifest") log_file, log_pos = self._client().master_status() meta = mnf.meta meta.update({"log_file": log_file, "log_pos": log_pos}) mnf.meta = meta mnf.save()
def _run(self): # Apply resource's meta mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source) bak = backup.restore(**mnf.meta) incrementals = [] if bak.backup_type == 'incremental': incrementals = [bak] while bak.prev_cloudfs_source: tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source) bak = backup.restore(**tmpmnf.meta) if bak.backup_type == 'incremental': incrementals.insert(0, bak) self.incrementals = incrementals if self.incrementals: self.log_file = self.incrementals[-1].log_file self.log_pos = self.incrementals[-1].log_pos else: self.log_file = bak.log_file self.log_pos = bak.log_pos coreutils.clean_dir(__mysql__['data_dir']) LOG.info('Downloading the base backup (LSN: 0..%s)', bak.to_lsn) trn = largetransfer.Download(bak.cloudfs_source) trn.apply_async() streamer = xbstream.args(extract=True, directory=__mysql__['data_dir']) streamer.popen(stdin=trn.output) trn.join() LOG.info('Preparing the base backup') innobackupex(__mysql__['data_dir'], apply_log=True, redo_only=True, ibbackup='xtrabackup', user=__mysql__['root_user'], password=__mysql__['root_password']) if incrementals: inc_dir = os.path.join(__mysql__['tmp_dir'], 'xtrabackup-restore-inc') i = 0 for inc in incrementals: try: os.makedirs(inc_dir) inc = backup.restore(inc) LOG.info( 'Downloading incremental backup #%d (LSN: %s..%s)', i, inc.from_lsn, inc.to_lsn) trn = largetransfer.Download(inc.cloudfs_source) trn.apply_async() streamer = xbstream.args(extract=True, directory=inc_dir) streamer.popen(stdin=trn.output) trn.join() LOG.info('Preparing incremental backup #%d', i) innobackupex(__mysql__['data_dir'], apply_log=True, redo_only=True, incremental_dir=inc_dir, ibbackup='xtrabackup', user=__mysql__['root_user'], password=__mysql__['root_password']) i += 1 finally: coreutils.remove(inc_dir) LOG.info('Preparing the full backup') innobackupex(__mysql__['data_dir'], apply_log=True, user=__mysql__['root_user'], password=__mysql__['root_password']) coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql') self._mysql_init.start() if int(__mysql__['replication_master']): LOG.info("Master will reset it's binary logs, " "so updating binary log position in backup manifest") log_file, log_pos = self._client().master_status() meta = mnf.meta meta.update({'log_file': log_file, 'log_pos': log_pos}) mnf.meta = meta mnf.save()
def _init_master(self, message): """ Initialize MySQL master @type message: scalarizr.messaging.Message @param message: HostUp message """ LOG.info("Initializing MySQL master") log = bus.init_op.logger log.info('Create storage') if 'restore' in __mysql__ and \ __mysql__['restore'].type == 'snap_mysql': __mysql__['restore'].run() else: if __node__['platform'].name == 'idcf': if __mysql__['volume'].id: LOG.info('Cloning volume to workaround reattachment limitations of IDCF') __mysql__['volume'].snap = __mysql__['volume'].snapshot() __mysql__['volume'].ensure(mount=True, mkfs=True) LOG.debug('MySQL volume config after ensure: %s', dict(__mysql__['volume'])) coreutils.clean_dir(__mysql__['defaults']['datadir']) self.mysql.flush_logs(__mysql__['data_dir']) self.mysql.move_mysqldir_to(__mysql__['storage_dir']) self._change_selinux_ctx() storage_valid = self._storage_valid() user_creds = self.get_user_creds() self._fix_percona_debian_cnf() #datadir = mysql2_svc.my_print_defaults('mysqld').get('datadir', __mysql__['defaults']['datadir']) #if not storage_valid and datadir.find(__mysql__['data_dir']) == 0: # # When role was created from another mysql role it contains modified my.cnf settings # #self.mysql.my_cnf.datadir = '/var/lib/mysql' # self.mysql.my_cnf.delete_options(['mysqld/log_bin']) if not storage_valid: ''' if linux.os['family'] == 'RedHat': try: # Check if selinux enabled selinuxenabled_bin = software.which('selinuxenabled') if selinuxenabled_bin: se_enabled = not system2((selinuxenabled_bin, ), raise_exc=False)[2] if se_enabled: # Set selinux context for new mysql datadir semanage = mysql_svc.get_semanage() linux.system('%s fcontext -a -t mysqld_db_t "%s(/.*)?"' % (semanage, __mysql__['storage_dir']), shell=True) # Restore selinux context restorecon = software.which('restorecon') linux.system('%s -R -v %s' % (restorecon, __mysql__['storage_dir']), shell=True) except: LOG.debug('Selinux context setup failed', exc_info=sys.exc_info()) ''' self.mysql.my_cnf.delete_options(['mysqld/log_bin']) linux.system(['mysql_install_db', '--user=mysql', '--datadir=%s' % __mysql__['data_dir']]) # Patch configuration options = { 'bind-address': '0.0.0.0', 'datadir': __mysql__['data_dir'], 'log_bin': os.path.join(__mysql__['binlog_dir'], 'binlog'), 'log-bin-index': os.path.join(__mysql__['binlog_dir'], 'binlog.index'), # MariaDB 'sync_binlog': '1', 'innodb_flush_log_at_trx_commit': '1', 'expire_logs_days': '10' } for key, value in options.items(): self.mysql.my_cnf.set('mysqld/' + key, value) if not storage_valid: if linux.os.debian_family and os.path.exists(__mysql__['debian.cnf']): self.mysql.service.start() debian_cnf = metaconf.Configuration('mysql') debian_cnf.read(__mysql__['debian.cnf']) sql = ("GRANT ALL PRIVILEGES ON *.* " "TO 'debian-sys-maint'@'localhost' " "IDENTIFIED BY '{0}'").format(debian_cnf.get('client/password')) linux.system(['mysql', '-u', 'root', '-e', sql]) self.mysql.service.stop() coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql') if 'restore' in __mysql__ and \ __mysql__['restore'].type == 'xtrabackup': # XXX: when restoring data bundle on ephemeral storage, data dir should by empty # but move_mysqldir_to call required to set several options in my.cnf coreutils.clean_dir(__mysql__['data_dir']) #self._change_selinux_ctx() log.info('Patch my.cnf configuration file') # Init replication self.mysql._init_replication(master=True) if 'restore' in __mysql__ and \ __mysql__['restore'].type == 'xtrabackup': __mysql__['restore'].run() # If It's 1st init of mysql master storage if not storage_valid: if os.path.exists(__mysql__['debian.cnf']): log.info("Copying debian.cnf file to mysql storage") shutil.copy(__mysql__['debian.cnf'], __mysql__['storage_dir']) # If volume has mysql storage directory structure (N-th init) else: log.info('InnoDB recovery') self._copy_debian_cnf_back() if 'restore' in __mysql__ and __mysql__['restore'].type != 'xtrabackup': self._innodb_recovery() self.mysql.service.start() log.info('Create Scalr users') # Check and create mysql system users self.create_users(**user_creds) log.info('Create data bundle') if 'backup' in __mysql__: __mysql__['restore'] = __mysql__['backup'].run() # Update HostUp message log.info('Collect HostUp data') md = dict( replication_master=__mysql__['replication_master'], root_password=__mysql__['root_password'], repl_password=__mysql__['repl_password'], stat_password=__mysql__['stat_password'], master_password=__mysql__['master_password'] ) if __mysql__['compat_prior_backup_restore']: if 'restore' in __mysql__: md.update(dict( log_file=__mysql__['restore'].log_file, log_pos=__mysql__['restore'].log_pos, snapshot_config=dict(__mysql__['restore'].snapshot))) elif 'log_file' in __mysql__: md.update(dict( log_file=__mysql__['log_file'], log_pos=__mysql__['log_pos'])) md.update(dict( volume_config=dict(__mysql__['volume']))) else: md.update(dict( volume=dict(__mysql__['volume']) )) for key in ('backup', 'restore'): if key in __mysql__: md[key] = dict(__mysql__[key]) message.db_type = __mysql__['behavior'] setattr(message, __mysql__['behavior'], md)