def start(self): # Check interpreter here, and not in __init__ # cause scripts can create sequences when previous script # installs interpreter for the next one if not os.path.exists(self.interpreter): raise HandlerError("Can't execute script '%s' cause " "interpreter '%s' not found" % (self.name, self.interpreter)) # Write script to disk, prepare execution exec_dir = os.path.dirname(self.exec_path) if not os.path.exists(exec_dir): os.makedirs(exec_dir) write_file(self.exec_path, self.body.encode('utf-8'), logger=LOG) os.chmod(self.exec_path, stat.S_IREAD | stat.S_IEXEC) stdout = open(self.stdout_path, 'w+') stderr = open(self.stderr_path, 'w+') # Start process self.logger.debug('Executing %s' '\n %s' '\n 1>%s' '\n 2>%s' '\n timeout: %s seconds', self.interpreter, self.exec_path, self.stdout_path, self.stderr_path, self.exec_timeout) self.proc = subprocess.Popen(self.exec_path, stdout=stdout, stderr=stderr, close_fds=True) self.pid = self.proc.pid self.start_time = time.time()
def add_record(self, record, replace_similar=False): if replace_similar: for old_record in self.records: if old_record != record and old_record.is_similar_to(record): self.delete_record(old_record) if record not in self.records: self._logger.debug('Adding record "%s" to %s' % (str(record),self.path)) write_file(self.path, '\n'+str(record)+'\n', 'a')
def set_cache_size(sub): mcd_conf = read_file(mcd_conf_path) if mcd_conf: if expression.findall(mcd_conf): write_file(mcd_conf_path, re.sub(expression, sub, mcd_conf)) else: write_file(mcd_conf_path, sub, mode='a')
def update(self, workdir): if not os.access(self.executable, os.X_OK): self._logger.info('Installing Git SCM...') if disttool.is_debian_based(): system2(('apt-get', '-y', 'install', 'git-core')) elif disttool.is_redhat_based(): system2(('yum', '-y', 'install', 'git')) else: raise SourceError('Cannot install Git. Unknown distribution %s' % str(disttool.linux_dist())) #if not os.path.exists(workdir): # self._logger.info('Creating destination directory') # os.makedirs(workdir) tmpdir = tempfile.mkdtemp() env = {} try: if self.private_key: pk_path = os.path.join(tmpdir, 'pk.pem') filetool.write_file(pk_path, self.private_key) os.chmod(pk_path, 0400) git_ssh_path = os.path.join(tmpdir, 'git_ssh.sh') filetool.write_file(git_ssh_path, self.ssh_tpl % pk_path) os.chmod(git_ssh_path, 0755) env.update(dict(GIT_SSH=git_ssh_path)) if os.path.exists(os.path.join(workdir, '.git')): origin_url = system2(('git', 'config', '--get', 'remote.origin.url'), cwd=workdir, raise_exc=False)[0] if origin_url.strip() != self.url.strip(): self._logger.info('%s is not origin of %s (%s is)', self.url, workdir, origin_url) self._logger.info('Remove all files in %s and checkout from %s', workdir, self.url ) shutil.rmtree(workdir) os.mkdir(workdir) out, err, ret_code = system2(('git', 'clone', self.url, workdir), env=env) else: self._logger.info('Updating directory %s (git-pull)', workdir) out, err, ret_code = system2(('git', 'pull'), env=env, cwd=workdir) else: self._logger.info('Checkout from %s', self.url) out, err, ret_code = system2(('git', 'clone', self.url, workdir), env=env) if ret_code: raise Exception('Git failed to clone repository. %s' % out) self._logger.info('Successfully deployed %s from %s', workdir, self.url) finally: shutil.rmtree(tmpdir)
def disable_requiretty(self): """ requiretty If set, sudo will only run when the user is logged in to a real tty. When this flag is set, sudo can only be run from a login session and not via other means such as cron(8) or cgi-bin scripts. This flag is off by default on all systems but CentOS5. """ path = "/etc/sudoers" self._logger.debug("Disabling requiretty in %s" % path) if not disttool.is_ubuntu(): orig = read_file(path) new = re.sub("Defaults\s+requiretty", "\n", orig) if new != orig: write_file(path, new)
def apply_public_ssh_key(self, source_path=None): source_path = source_path or self.public_key_path if not os.path.exists(self.ssh_dir): os.makedirs(self.ssh_dir) rchown(self.name, self.ssh_dir) pub_key = read_file(source_path,logger=self._logger) path = os.path.join(self.ssh_dir, 'authorized_keys') keys = read_file(path,logger=self._logger) if os.path.exists(path) else '' if not keys or not pub_key in keys: write_file(path, data='\n%s %s\n' % (pub_key, self.name), mode='a', logger=self._logger) rchown(self.name, path)
def delete_record(self, record, delete_similar=False): deleted = [] lines = [] changed = False for old_record in self.records: if (old_record == record) or (delete_similar and old_record.is_similar_to(record)): deleted.append(str(old_record)) changed = True continue else: lines.append(str(old_record)) if changed: self._logger.debug('Removing records "%s" from %s' % (deleted,self.path)) write_file(self.path, '\n'.join(lines))
def create_from_snapshot(self, **kwargs): ''' @param level: Raid level 0, 1, 5 - are valid values @param vg: Volume group name to restore @param lvm_group_cfg: Base64 encoded RAID volume group configuration @param disks: Volumes ''' vg = kwargs['vg'] level = kwargs['level'] raw_vg = os.path.basename(vg) if int(level) in (1, 10): raid_pv = self._mdadm.create([vol.devname for vol in kwargs['disks']], level) else: raid_pv = self._mdadm.assemble([vol.devname for vol in kwargs['disks']]) pv_uuid = kwargs.get('pv_uuid') self._create_pv(raid_pv, pv_uuid ) lvm_raw_backup = binascii.a2b_base64(kwargs['lvm_group_cfg']) write_file(self._lvm_backup_filename, lvm_raw_backup, logger=logger) try: self._lvm.restore_vg(vg, self._lvm_backup_filename) finally: os.unlink(self._lvm_backup_filename) lvinfo = self._lvm.lv_info(kwargs['device']) self._lvm.change_vg(raw_vg, available=True) wait_until(lambda: os.path.exists(kwargs['device']), logger=self._logger) pv_info = self._lvm.pv_info(raid_pv) return RaidVolume( lvinfo.lv_path, raid_pv = raid_pv, vg = vg, disks = kwargs['disks'], level = kwargs['level'], pv_uuid = pv_info.uuid, lvm_group_cfg = kwargs['lvm_group_cfg'], mpoint = kwargs.get('mpoint'), type = self.type)
def __init__(self): if not os.path.exists(MDADM_EXEC): if disttool.is_redhat_based(): system2(('/usr/bin/yum', '-d0', '-y', 'install', 'mdadm', '-x', 'exim'), raise_exc=False) else: mgr = dynimp.package_mgr() mgr.install('mdadm', mgr.candidates('mdadm')[-1]) for location in ['/etc ', '/lib']: path = os.path.join(location, 'udev/rules.d/85-mdadm.rules') if os.path.exists(path): rule = read_file(path) if rule: rule = re.sub(re.compile('^([^#])', re.M), '#\\1', rule) write_file(path, rule) self._raid_devices_re = re.compile('Raid\s+Devices\s+:\s+(?P<count>\d+)') self._total_devices_re = re.compile('Total\s+Devices\s+:\s+(?P<count>\d+)') self._state_re = re.compile('State\s+:\s+(?P<state>.+)') self._rebuild_re = re.compile('Rebuild\s+Status\s+:\s+(?P<percent>\d+)%') self._level_re = re.compile('Raid Level : (?P<level>.+)')
def on_init(self, *args, **kwargs): bus.on("before_hello", self.on_before_hello) bus.on("before_host_init", self.on_before_host_init) bus.on("before_restart", self.on_before_restart) msg_service = bus.messaging_service producer = msg_service.get_producer() producer.on("before_send", self.on_before_message_send) # Set the hostname to this instance's public hostname cnf = bus.cnf try: hostname_as_pubdns = int(cnf.rawini.get('ec2', 'hostname_as_pubdns')) except ConfigParser.Error: hostname_as_pubdns = True if hostname_as_pubdns: system2("hostname " + self._platform.get_public_hostname(), shell=True) if disttool.is_ubuntu(): # Ubuntu cloud-init scripts may disable root ssh login for path in ('/etc/ec2-init/ec2-config.cfg', '/etc/cloud/cloud.cfg'): if os.path.exists(path): c = filetool.read_file(path) c = re.sub(re.compile(r'^disable_root[^:=]*([:=]).*', re.M), r'disable_root\1 0', c) filetool.write_file(path, c) # Add server ssh public key to authorized_keys authorized_keys_path = "/root/.ssh/authorized_keys" if os.path.exists(authorized_keys_path): c = filetool.read_file(authorized_keys_path) ssh_key = self._platform.get_ssh_pub_key() idx = c.find(ssh_key) if idx == -1: if c and c[-1] != '\n': c += '\n' c += ssh_key + "\n" self._logger.debug("Add server ssh public key to authorized_keys") filetool.write_file(authorized_keys_path, c) elif idx > 0 and c[idx-1] != '\n': c = c[0:idx] + '\n' + c[idx:] self._logger.warn('Adding new-line character before server SSH key in authorized_keys file') filetool.write_file(authorized_keys_path, c) # Mount ephemeral devices # Seen on eucalyptus: # - fstab contains invalid fstype and `mount -a` fails mtab = Mtab() fstab = Fstab() for device in self._platform.instance_store_devices: if os.path.exists(device) and fstab.contains(device) and not mtab.contains(device): entry = fstab.find(device)[0] try: mount(device, entry.mpoint, ('-o', entry.options)) except: self._logger.warn(sys.exc_info()[1])
globals()['_pid'] = pid = os.getpid() logger.info('[pid: %d] Starting scalarizr %s', pid, __version__) # Check for another running scalarzir if os.path.exists(PID_FILE): try: another_pid = int(read_file(PID_FILE).strip()) except ValueError: pass else: if pid != another_pid and os.path.exists('/proc/%s/status' % (another_pid,)): logger.error('Cannot start scalarizr: Another process (pid: %s) already running', another_pid) sys.exit(1) # Write PID write_file(PID_FILE, str(pid)) cnf = bus.cnf cnf.on('apply_user_data', _apply_user_data) if optparser.values.configure: do_configure() sys.exit() elif optparser.values.import_server: print "Starting import process..." print "Don't terminate Scalarizr until Scalr will create the new role" cnf.state = ScalarizrState.IMPORTING # Load Command-line configuration options and auto-configure Scalarizr values = CmdLineIni.to_kvals(optparser.values.cnf) if not values.get('server_id'):
def public_key(self): if not os.path.exists(self.public_key_path): key = self.extract_public_ssh_key() write_file(self.public_key_path, key, logger=self._logger) self.apply_public_ssh_key() return read_file(self.public_key_path, logger=self._logger)
def _set_state(self, v): filetool.write_file(self.private_path('.state'), v, logger=self._logger) self._logger.info('State: %s', v)
def _store_key(self, key_str, private=True): write_file(self.private_key_path if private else self.public_key_path, data=key_str, logger=self._logger)
def _write_ssh_keys_file(self, content): ret = write_file(self.authorized_keys_file, content, msg="Writing authorized keys", logger=self._logger) if not ret: raise UpdateSshAuthorizedKeysError("Unable to write ssh keys to %s" % self.authorized_keys_file) os.chmod(self.authorized_keys_file, 0600)
def create(self): if not self.exists(): null = '' write_file(self.path, null, 'w', logger=self._logger)
def on_init(self): #temporary fix for starting-after-rebundle issue if not os.path.exists(PG_SOCKET_DIR): os.makedirs(PG_SOCKET_DIR) rchown(user='******', path=PG_SOCKET_DIR) bus.on("host_init_response", self.on_host_init_response) bus.on("before_host_up", self.on_before_host_up) bus.on("before_reboot_start", self.on_before_reboot_start) bus.on("before_reboot_finish", self.on_before_reboot_finish) if self._cnf.state == ScalarizrState.BOOTSTRAPPING: self._insert_iptables_rules() if disttool.is_redhat_based(): checkmodule_paths = software.whereis('checkmodule') semodule_package_paths = software.whereis('semodule_package') semodule_paths = software.whereis('semodule') if all((checkmodule_paths, semodule_package_paths, semodule_paths)): filetool.write_file('/tmp/sshkeygen.te', SSH_KEYGEN_SELINUX_MODULE, logger=self._logger) self._logger.debug('Compiling SELinux policy for ssh-keygen') system2((checkmodule_paths[0], '-M', '-m', '-o', '/tmp/sshkeygen.mod', '/tmp/sshkeygen.te'), logger=self._logger) self._logger.debug('Building SELinux package for ssh-keygen') system2((semodule_package_paths[0], '-o', '/tmp/sshkeygen.pp', '-m', '/tmp/sshkeygen.mod'), logger=self._logger) self._logger.debug('Loading ssh-keygen SELinux package') system2((semodule_paths[0], '-i', '/tmp/sshkeygen.pp'), logger=self._logger) if self._cnf.state == ScalarizrState.RUNNING: storage_conf = Storage.restore_config(self._volume_config_path) storage_conf['tags'] = self.postgres_tags self.storage_vol = Storage.create(storage_conf) if not self.storage_vol.mounted(): self.storage_vol.mount() self.postgresql.service.start() self.accept_all_clients() self._logger.debug("Checking presence of Scalr's PostgreSQL root user.") root_password = self.root_password if not self.postgresql.root_user.exists(): self._logger.debug("Scalr's PostgreSQL root user does not exist. Recreating") self.postgresql.root_user = self.postgresql.create_user(ROOT_USER, root_password) else: try: self.postgresql.root_user.check_system_password(root_password) self._logger.debug("Scalr's root PgSQL user is present. Password is correct.") except ValueError: self._logger.warning("Scalr's root PgSQL user was changed. Recreating.") self.postgresql.root_user.change_system_password(root_password) if self.is_replication_master: #ALTER ROLE cannot be executed in a read-only transaction self._logger.debug("Checking password for pg_role scalr.") if not self.postgresql.root_user.check_role_password(root_password): self._logger.warning("Scalr's root PgSQL role was changed. Recreating.") self.postgresql.root_user.change_role_password(root_password)