def _change_selinux_ctx(self): chcon = software.whereis('chcon') if disttool.is_rhel() and chcon: LOG.debug('Changing SELinux file security context for new mysql datadir') system2((chcon[0], '-R', '-u', 'system_u', '-r', 'object_r', '-t', 'mysqld_db_t', os.path.dirname(__mysql__['storage_dir'])), raise_exc=False)
def restore(self, queue, volume, download_finished): tmp_mpoint = mkdtemp() volume.mount(tmp_mpoint) try: pigz_bins = whereis('pigz') cmd1 = ('pigz' if pigz_bins else 'gzip', '-d') cmd2 = ('tar', 'px', '-C', tmp_mpoint) compressor = subprocess.Popen(cmd1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) tar = subprocess.Popen(cmd2, stdin=compressor.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) self.concat_chunks(queue, download_finished, compressor.stdin) compressor.stdin.close() r_code = compressor.wait() if r_code: raise Exception('Archiver finished with return code %s' % r_code) r_code = tar.wait() if r_code: raise Exception('Tar finished with return code %s' % r_code) finally: mount.umount(tmp_mpoint)
def _create(self, volume, snapshot, snap_lv, tranzit_path, complete_cb): try: chunk_prefix = '%s.data' % snapshot.id snapshot.path = None snap_mpoint = mkdtemp() try: opts = [] if volume.fstype == 'xfs': opts += ['-o', 'nouuid,ro'] mount.mount(snap_lv, snap_mpoint, *opts) tar_cmd = ['tar', 'cp', '-C', snap_mpoint, '.'] pigz_bins = whereis('pigz') compress_cmd = [pigz_bins[0] if pigz_bins else 'gzip', '-5'] self._logger.debug("Creating and compressing snapshot data.") tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) compress = subprocess.Popen(compress_cmd, stdin=tar.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) tar.stdout.close() # Allow tar to receive a SIGPIPE if compress exits. split = threading.Thread(target=self._split, name='split', args=(compress.stdout, tranzit_path, chunk_prefix, snapshot)) split.start() uploaders = [] for i in range(2): uploader = threading.Thread(name="Uploader-%s" % i, target=self._uploader, args=(volume.snap_backend['path'], snapshot)) self._logger.debug("Starting uploader '%s'", uploader.getName()) uploader.start() uploaders.append(uploader) self._logger.debug('uploaders started. waiting compress') compress.wait() self._logger.debug('compress completed (code: %s). waiting split', compress.returncode) if compress.returncode: raise StorageError('Compress process terminated with exit code %s. <err>: %s' % (compress.returncode, compress.stderr.read())) split.join() self._logger.debug('split completed. waiting uploaders') for uploader in uploaders: uploader.join() self._logger.debug('uploaders completed') if self._inner_exc_info: t, e, s = self._inner_exc_info raise t, e, s finally: self._return_ev.set() mount.umount(snap_mpoint) os.rmdir(snap_mpoint) self._lvm.remove_lv(snap_lv) self._inner_exc_info = None self._state_map[snapshot.id] = Snapshot.COMPLETED except (Exception, BaseException), e: self._state_map[snapshot.id] = Snapshot.FAILED self._logger.exception('Snapshot creation failed. %s' % e)
def __init__(self): res = software.whereis('mysql-proxy') if not res: raise initdv2.InitdError("Mysql-proxy binary not found. Check your installation") self.bin_path = res[0] version_str = system2((self.bin_path, '-V'))[0].splitlines()[0] self.version = tuple(map(int, version_str.split()[1].split('.'))) self.sock = initdv2.SockParam(4040)
def __init__(self): res = software.whereis('mysql-proxy') if not res: raise initdv2.InitdError( "Mysql-proxy binary not found. Check your installation") self.bin_path = res[0] version_str = system2((self.bin_path, '-V'))[0].splitlines()[0] self.version = tuple(map(int, version_str.split()[1].split('.'))) self.sock = initdv2.SockParam(4040)
def get_mx_records(email): out = system2('%s -t mx %s' % (whereis('host')[0], email.split('@')[-1]), shell=True)[0] mxs = [mx.split()[-1][:-1] if mx.endswith('.') else mx for mx in out.split('\n')] if '' in mxs: mxs.remove('') #from sets import Set #return list(Set(mxs)) temp = {} for x in mxs: temp[x] = None return list(temp.keys())
def restore(self, queue, volume, download_finished): device_fp = open(volume.device, 'w') pigz_bins = whereis('pigz') cmd = ('pigz' if pigz_bins else 'gzip', '-d') compressor = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=device_fp, stderr=subprocess.PIPE, close_fds=True) self.concat_chunks(queue, download_finished, compressor.stdin) compressor.stdin.close() ret_code = compressor.wait() if ret_code: raise StorageError('Snapshot decompression failed.')
def get_mx_records(email): out = system2('%s -t mx %s' % (whereis('host')[0], email.split('@')[-1]), shell=True)[0] mxs = [ mx.split()[-1][:-1] if mx.endswith('.') else mx for mx in out.split('\n') ] if '' in mxs: mxs.remove('') #from sets import Set #return list(Set(mxs)) temp = {} for x in mxs: temp[x] = None return list(temp.keys())
def __init__(self): if not software.whereis('rabbitmqctl'): raise HandlerError("Rabbitmqctl binary was not found. Check your installation.") bus.on("init", self.on_init) self._logger = logging.getLogger(__name__) self.rabbitmq = rabbitmq_svc.rabbitmq self.service = initdv2.lookup(BuiltinBehaviours.RABBITMQ) self._service_name = BEHAVIOUR self.on_reload() if 'ec2' == self.platform.name: updates = dict(hostname_as_pubdns = '0') self.cnf.update_ini('ec2', {'ec2': updates}, private=False)
def start(self, *params, **keys): try: if not self._checked: self.check() if len(keys) == 1 and 'kwargs' in keys: keys = keys['kwargs'] # Set locale if not 'env' in self.subprocess_kwds: self.subprocess_kwds['env'] = os.environ # Set en_US locale or C if not self.subprocess_kwds['env'].get('LANG'): default_locale = locale.getdefaultlocale() if default_locale == ('en_US', 'UTF-8'): self.subprocess_kwds['env']['LANG'] = 'en_US' else: self.subprocess_kwds['env']['LANG'] = 'C' cmd_args = self.prepare_args(*params, **keys) if not self.subprocess_kwds.get( 'shell') and not self.executable.startswith('/'): # TODO: Raise error if not found self.executable = software.whereis(self.executable)[0] final_args = (self.executable, ) + tuple(cmd_args) self._check_streams() read_stdout = self.stdout == subprocess.PIPE read_stderr = self.stderr == subprocess.PIPE self.popen = subprocess.Popen(final_args, **self.subprocess_kwds) if self.wait_for_process: rcode = self.wait(self.popen, self.timeout) if rcode not in self.acceptable_codes and self.raise_exc: raise ProcessError('Process %s finished with code %s' % (self.executable, rcode)) ret = dict(return_code=rcode) if read_stdout: ret['stdout'] = self.popen.stdout.read() self.logger.debug('Stdout: %s' % ret['stdout']) if read_stderr: ret['stderr'] = self.popen.stderr.read() self.logger.debug('Stderr: %s' % ret['stderr']) return ret else: return self.popen finally: for stream in ('stderr, stdout, stdin'): self.subprocess_kwds.pop(stream, None)
def check(self): if not self.executable.startswith('/'): exec_paths = software.whereis(self.executable) exec_path = exec_paths[0] if exec_paths else None else: exec_path = self.executable if not exec_path or not os.access(exec_path, os.X_OK): if self.package: pkgmgr.installed(self.package) else: msg = 'Executable %s is not found, you should either ' \ 'specify `package` attribute or install the software ' \ 'manually' % self.executable raise linux.LinuxError(msg)
def __init__(self): if not software.whereis('rabbitmqctl'): raise HandlerError("Rabbitmqctl binary was not found. Check your installation.") bus.on("init", self.on_init) self._logger = logging.getLogger(__name__) self.rabbitmq = rabbitmq_svc.rabbitmq self.service = initdv2.lookup(BuiltinBehaviours.RABBITMQ) self._service_name = BEHAVIOUR self.on_reload() if 'ec2' == self.platform.name: self._logger.debug('Setting hostname_as_pubdns to 0') __ec2__ = __node__['ec2'] __ec2__['hostname_as_pubdns'] = 0
def start(self, *params, **keys): try: if not self._checked: self.check() if len(keys) == 1 and 'kwargs' in keys: keys = keys['kwargs'] # Set locale if not 'env' in self.subprocess_kwds: self.subprocess_kwds['env'] = os.environ # Set en_US locale or C if not self.subprocess_kwds['env'].get('LANG'): default_locale = locale.getdefaultlocale() if default_locale == ('en_US', 'UTF-8'): self.subprocess_kwds['env']['LANG'] = 'en_US' else: self.subprocess_kwds['env']['LANG'] = 'C' cmd_args = self.prepare_args(*params, **keys) if not self.subprocess_kwds.get('shell') and not self.executable.startswith('/'): # TODO: Raise error if not found self.executable = software.whereis(self.executable)[0] final_args = (self.executable,) + tuple(cmd_args) self._check_streams() read_stdout = self.stdout == subprocess.PIPE read_stderr = self.stderr == subprocess.PIPE self.popen = subprocess.Popen(final_args, **self.subprocess_kwds) if self.wait_for_process: rcode = self.wait(self.popen, self.timeout) if rcode not in self.acceptable_codes and self.raise_exc: raise ProcessError('Process %s finished with code %s' % (self.executable, rcode)) ret = dict(return_code=rcode) if read_stdout: ret['stdout'] = self.popen.stdout.read() self.logger.debug('Stdout: %s' % ret['stdout']) if read_stderr: ret['stderr'] = self.popen.stderr.read() self.logger.debug('Stderr: %s' % ret['stderr']) return ret else: return self.popen finally: for stream in ('stderr, stdout, stdin'): self.subprocess_kwds.pop(stream, None)
def _innodb_recovery(self, storage_path=None): storage_path = storage_path or __mysql__['storage_dir'] binlog_path = os.path.join(storage_path, mysql_svc.STORAGE_BINLOG) data_dir = os.path.join(storage_path, mysql_svc.STORAGE_DATA_DIR), pid_file = os.path.join(storage_path, 'mysql.pid') socket_file = os.path.join(storage_path, 'mysql.sock') mysqld_safe_bin = software.whereis('mysqld_safe')[0] LOG.info('Performing InnoDB recovery') mysqld_safe_cmd = (mysqld_safe_bin, '--socket=%s' % socket_file, '--pid-file=%s' % pid_file, '--datadir=%s' % data_dir, '--log-bin=%s' % binlog_path, '--skip-networking', '--skip-grant', '--bootstrap', '--skip-slave-start') system2(mysqld_safe_cmd, stdin="select 1;")
logger = logging.getLogger(__name__) class Lvm2Error(PopenError): pass if not os.path.exists('/sbin/pvs'): mgr = dynimp.package_mgr() if not mgr.installed('lvm2'): mgr.install('lvm2', mgr.candidates('lvm2')[-1]) try: PVS = whereis('pvs')[0] VGS = whereis('vgs')[0] LVS = whereis('lvs')[0] PVSCAN = whereis('pvscan')[0] PVCREATE = whereis('pvcreate')[0] VGCREATE = whereis('vgcreate')[0] LVCREATE = whereis('lvcreate')[0] LVCHANGE = whereis('lvchange')[0] VGCHANGE = whereis('vgchange')[0] VGEXTEND = whereis('vgextend')[0] VGREDUCE = whereis('vgreduce')[0] VGCFGRESTORE = whereis('vgcfgrestore')[0] PVREMOVE = whereis('pvremove')[0]
'WHERE message_name = ? ' 'ORDER BY id DESC ' 'LIMIT 1', ('HostInitResponse', ) ) xml = cur.fetchone()['message'] msg.fromxml(xml) producer = msg_service.get_producer() producer.send(Queues.CONTROL, msg) finally: cur.close() if options.report: #collecting hostname = system2(whereis('hostname'), shell=True)[0] tar_file = os.path.join(os.getcwd(), 'report-%s.tar.gz' % hostname) json_file = os.path.join(os.getcwd(), 'sysinfo-%s.json' % hostname) cnf = bus.cnf cnf.bootstrap() ini = cnf.rawini try: log_params = ini.get('handler_file', 'args') try: log_file = log_params(0) except IndexError, TypeError: raise except Exception, BaseException: log_file = '/var/log/scalarizr.log'
def on_init(self): #temporary fix for starting-after-rebundle issue if not os.path.exists(PG_SOCKET_DIR): os.makedirs(PG_SOCKET_DIR) rchown(user='******', path=PG_SOCKET_DIR) bus.on("host_init_response", self.on_host_init_response) bus.on("before_host_up", self.on_before_host_up) bus.on("before_reboot_start", self.on_before_reboot_start) bus.on("before_reboot_finish", self.on_before_reboot_finish) if self._cnf.state == ScalarizrState.BOOTSTRAPPING: self._insert_iptables_rules() if disttool.is_redhat_based(): checkmodule_paths = software.whereis('checkmodule') semodule_package_paths = software.whereis('semodule_package') semodule_paths = software.whereis('semodule') if all((checkmodule_paths, semodule_package_paths, semodule_paths)): filetool.write_file('/tmp/sshkeygen.te', SSH_KEYGEN_SELINUX_MODULE, logger=self._logger) self._logger.debug('Compiling SELinux policy for ssh-keygen') system2((checkmodule_paths[0], '-M', '-m', '-o', '/tmp/sshkeygen.mod', '/tmp/sshkeygen.te'), logger=self._logger) self._logger.debug('Building SELinux package for ssh-keygen') system2((semodule_package_paths[0], '-o', '/tmp/sshkeygen.pp', '-m', '/tmp/sshkeygen.mod'), logger=self._logger) self._logger.debug('Loading ssh-keygen SELinux package') system2((semodule_paths[0], '-i', '/tmp/sshkeygen.pp'), logger=self._logger) if self._cnf.state == ScalarizrState.RUNNING: storage_conf = Storage.restore_config(self._volume_config_path) storage_conf['tags'] = self.postgres_tags self.storage_vol = Storage.create(storage_conf) if not self.storage_vol.mounted(): self.storage_vol.mount() self.postgresql.service.start() self.accept_all_clients() self._logger.debug("Checking presence of Scalr's PostgreSQL root user.") root_password = self.root_password if not self.postgresql.root_user.exists(): self._logger.debug("Scalr's PostgreSQL root user does not exist. Recreating") self.postgresql.root_user = self.postgresql.create_user(ROOT_USER, root_password) else: try: self.postgresql.root_user.check_system_password(root_password) self._logger.debug("Scalr's root PgSQL user is present. Password is correct.") except ValueError: self._logger.warning("Scalr's root PgSQL user was changed. Recreating.") self.postgresql.root_user.change_system_password(root_password) if self.is_replication_master: #ALTER ROLE cannot be executed in a read-only transaction self._logger.debug("Checking password for pg_role scalr.") if not self.postgresql.root_user.check_role_password(root_password): self._logger.warning("Scalr's root PgSQL role was changed. Recreating.") self.postgresql.root_user.change_role_password(root_password)
'LIMIT 1', ('HostInitResponse', )) raw_msg, format = cur.fetchone() if 'xml' == format: msg.fromxml(raw_msg) elif 'json' == format: msg.fromjson(raw_msg) producer = msg_service.get_producer() producer.send(Queues.CONTROL, msg) finally: cur.close() if options.report: #collecting hostname = system2(whereis('hostname'), shell=True)[0] tar_file = os.path.join(os.getcwd(), 'report-%s.tar.gz' % hostname) json_file = os.path.join(os.getcwd(), 'sysinfo-%s.json' % hostname) cnf = bus.cnf cnf.bootstrap() ini = cnf.rawini try: log_params = ini.get('handler_file', 'args') try: log_file = log_params(0) except (IndexError, TypeError): raise except Exception, BaseException: log_file = '/var/log/scalarizr.log'
def on_init(self): #temporary fix for starting-after-rebundle issue if not os.path.exists(PG_SOCKET_DIR): os.makedirs(PG_SOCKET_DIR) chown_r(PG_SOCKET_DIR, 'postgres') bus.on("host_init_response", self.on_host_init_response) bus.on("before_host_up", self.on_before_host_up) bus.on("before_reboot_start", self.on_before_reboot_start) self._insert_iptables_rules() if __node__['state'] == ScalarizrState.BOOTSTRAPPING: if disttool.is_redhat_based(): checkmodule_paths = software.whereis('checkmodule') semodule_package_paths = software.whereis('semodule_package') semodule_paths = software.whereis('semodule') if all((checkmodule_paths, semodule_package_paths, semodule_paths)): with open('/tmp/sshkeygen.te', 'w') as fp: fp.write(SSH_KEYGEN_SELINUX_MODULE) self._logger.debug( 'Compiling SELinux policy for ssh-keygen') system2((checkmodule_paths[0], '-M', '-m', '-o', '/tmp/sshkeygen.mod', '/tmp/sshkeygen.te'), logger=self._logger) self._logger.debug( 'Building SELinux package for ssh-keygen') system2((semodule_package_paths[0], '-o', '/tmp/sshkeygen.pp', '-m', '/tmp/sshkeygen.mod'), logger=self._logger) self._logger.debug('Loading ssh-keygen SELinux package') system2((semodule_paths[0], '-i', '/tmp/sshkeygen.pp'), logger=self._logger) if __node__['state'] == 'running': vol = storage2.volume(__postgresql__['volume']) if not vol.tags: vol.tags = self.resource_tags() vol.ensure(mount=True) self.postgresql.service.start() self.accept_all_clients() self._logger.debug( "Checking presence of Scalr's PostgreSQL root user.") root_password = self.root_password if not self.postgresql.root_user.exists(): self._logger.debug( "Scalr's PostgreSQL root user does not exist. Recreating") self.postgresql.root_user = self.postgresql.create_linux_user( ROOT_USER, root_password) else: try: self.postgresql.root_user.check_system_password( root_password) self._logger.debug( "Scalr's root PgSQL user is present. Password is correct." ) except ValueError: self._logger.warning( "Scalr's root PgSQL user was changed. Recreating.") self.postgresql.root_user.change_system_password( root_password) if self.is_replication_master: #ALTER ROLE cannot be executed in a read-only transaction self._logger.debug("Checking password for pg_role scalr.") if not self.postgresql.root_user.check_role_password( root_password): LOG.warning( "Scalr's root PgSQL role was changed. Recreating.") self.postgresql.root_user.change_role_password( root_password)
def _create(self, volume, snapshot, snap_lv, tranzit_path, complete_cb): try: chunk_prefix = '%s.data' % snapshot.id snapshot.path = None snap_mpoint = mkdtemp() try: opts = [] if volume.fstype == 'xfs': opts += ['-o', 'nouuid,ro'] mount.mount(snap_lv, snap_mpoint, *opts) tar_cmd = ['tar', 'cp', '-C', snap_mpoint, '.'] pigz_bins = whereis('pigz') compress_cmd = [pigz_bins[0] if pigz_bins else 'gzip', '-5'] self._logger.debug("Creating and compressing snapshot data.") tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) compress = subprocess.Popen(compress_cmd, stdin=tar.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) tar.stdout.close( ) # Allow tar to receive a SIGPIPE if compress exits. split = threading.Thread(target=self._split, name='split', args=(compress.stdout, tranzit_path, chunk_prefix, snapshot)) split.start() uploaders = [] for i in range(2): uploader = threading.Thread( name="Uploader-%s" % i, target=self._uploader, args=(volume.snap_backend['path'], snapshot)) self._logger.debug("Starting uploader '%s'", uploader.getName()) uploader.start() uploaders.append(uploader) self._logger.debug('uploaders started. waiting compress') compress.wait() self._logger.debug( 'compress completed (code: %s). waiting split', compress.returncode) if compress.returncode: raise StorageError( 'Compress process terminated with exit code %s. <err>: %s' % (compress.returncode, compress.stderr.read())) split.join() self._logger.debug('split completed. waiting uploaders') for uploader in uploaders: uploader.join() self._logger.debug('uploaders completed') if self._inner_exc_info: t, e, s = self._inner_exc_info raise t, e, s finally: self._return_ev.set() mount.umount(snap_mpoint) os.rmdir(snap_mpoint) self._lvm.remove_lv(snap_lv) self._inner_exc_info = None self._state_map[snapshot.id] = Snapshot.COMPLETED except (Exception, BaseException), e: self._state_map[snapshot.id] = Snapshot.FAILED self._logger.exception('Snapshot creation failed. %s' % e)