def __init__(self): Handler.__init__(self) # init script will set later FarmSecurityMixin.__init__(self) self.init_farm_security([11211]) self._logger = logging.getLogger(__name__) self._queryenv = bus.queryenv_service bus.on(init=self.on_init, start=self.on_start)
def on_init(self): bus.on(host_init_response=self.on_host_init_response) # Add internal messages to scripting skip list try: for m in (Messages.INT_SERVER_REBOOT, Messages.INT_SERVER_HALT, Messages.HOST_INIT_RESPONSE): scalarizr.handlers.script_executor.skip_events.add(m) except AttributeError: pass if __node__['state'] == 'running': scalarizr.handlers.check_supported_behaviors() # Mount all filesystems if os_dist['family'] != 'Windows': system2(('mount', '-a'), raise_exc=False) # cloud-init scripts may disable root ssh login for path in ('/etc/ec2-init/ec2-config.cfg', '/etc/cloud/cloud.cfg'): if os.path.exists(path): c = None with open(path, 'r') as fp: c = fp.read() c = re.sub(re.compile(r'^disable_root[^:=]*([:=]).*', re.M), r'disable_root\1 0', c) with open(path, 'w') as fp: fp.write(c) # Add firewall rules #if self._cnf.state in (ScalarizrState.BOOTSTRAPPING, ScalarizrState.IMPORTING): self._insert_iptables_rules() #if __node__['state'] != ScalarizrState.IMPORTING: if __node__['state'] == 'running': scalarizr.handlers.sync_globals()
def on_init(self): bus.on(host_init_response=self.on_host_init_response, block_device_mounted=self.on_block_device_mounted) # Add internal messages to scripting skip list try: for m in (Messages.INT_SERVER_REBOOT, Messages.INT_SERVER_HALT, Messages.HOST_INIT_RESPONSE): scalarizr.handlers.script_executor.skip_events.add(m) except AttributeError: pass # Mount all filesystems if os_dist["family"] != "Windows": system2(("mount", "-a"), raise_exc=False) # cloud-init scripts may disable root ssh login for path in ("/etc/ec2-init/ec2-config.cfg", "/etc/cloud/cloud.cfg"): if os.path.exists(path): c = None with open(path, "r") as fp: c = fp.read() c = re.sub(re.compile(r"^disable_root[^:=]*([:=]).*", re.M), r"disable_root\1 0", c) with open(path, "w") as fp: fp.write(c) # Add firewall rules # if self._cnf.state in (ScalarizrState.BOOTSTRAPPING, ScalarizrState.IMPORTING): self._insert_iptables_rules() # if __node__['state'] != ScalarizrState.IMPORTING: if __node__["state"] == "running": scalarizr.handlers.sync_globals()
def on_init(self, *args, **kwds): bus.on( host_init_response=self.on_host_init_response, before_host_up=self.on_before_host_up, reload=self.on_reload, start=self.on_start )
def on_init(self): bus.on( start=self.on_start, before_host_up=self.on_before_host_up, host_init_response=self.on_host_init_response, before_reboot_finish=self.on_before_reboot_finish, )
def on_init(self): bus.on("before_host_init", self.on_before_host_init) bus.on("host_init_response", self.on_host_init_response) try: handlers.script_executor.skip_events.add(Messages.INT_BLOCK_DEVICE_UPDATED) except AttributeError: pass
def on_init(self): bus.on("host_init_response", self.on_host_init_response) bus.on("before_host_up", self.on_before_host_up) bus.on("before_hello", self.on_before_hello) bus.on("rebundle_cleanup_image", self.cleanup_hosts_file) bus.on("before_host_down", self.on_before_host_down) if self.cnf.state == ScalarizrState.BOOTSTRAPPING: self.cleanup_hosts_file('/') self._logger.info('Performing initial cluster reset') if os.path.exists(DEFAULT_STORAGE_PATH): rabbitmq_user = pwd.getpwnam("rabbitmq") os.chown(DEFAULT_STORAGE_PATH, rabbitmq_user.pw_uid, rabbitmq_user.pw_gid) self.service.start() self.rabbitmq.stop_app() self.rabbitmq.reset() self.service.stop() elif self.cnf.state == ScalarizrState.RUNNING: storage_conf = storage.Storage.restore_config(self._volume_config_path) storage_conf['tags'] = self.rabbitmq_tags self.storage_vol = storage.Storage.create(storage_conf) if not self.storage_vol.mounted(): self.service.stop() self.storage_vol.mount() self.service.start()
def on_init(self): bus.on("host_init_response", self.on_host_init_response) bus.on("before_host_up", self.on_before_host_up) bus.on("before_hello", self.on_before_hello) bus.on("rebundle_cleanup_image", self.cleanup_hosts_file) bus.on("before_host_down", self.on_before_host_down) if 'bootstrapping' == __node__['state']: self.cleanup_hosts_file('/') self._logger.info('Performing initial cluster reset') if os.path.exists(DEFAULT_STORAGE_PATH): rabbitmq_user = pwd.getpwnam("rabbitmq") os.chown(DEFAULT_STORAGE_PATH, rabbitmq_user.pw_uid, rabbitmq_user.pw_gid) self.service.start() self.rabbitmq.stop_app() self.rabbitmq.reset() self.service.stop() elif 'running' == __node__['state']: rabbitmq_vol = __rabbitmq__['volume'] rabbitmq_vol.tags = self.rabbitmq_tags if not __rabbitmq__['volume'].mounted_to(): self.service.stop() rabbitmq_vol.ensure() self.service.start() __rabbitmq__['volume'] = rabbitmq_vol
def __init__(self): self._service_name = SERVICE_NAME ServiceCtlHandler.__init__(self, SERVICE_NAME, initdv2.lookup(SERVICE_NAME)) bus.on("init", self.on_init) bus.define_events( 'before_postgresql_data_bundle', 'postgresql_data_bundle', # @param host: New master hostname 'before_postgresql_change_master', # @param host: New master hostname 'postgresql_change_master', 'before_slave_promote_to_master', 'slave_promote_to_master' ) self._phase_postgresql = 'Configure PostgreSQL' self._phase_data_bundle = self._op_data_bundle = 'PostgreSQL data bundle' self._phase_backup = self._op_backup = 'PostgreSQL backup' self._step_upload_to_cloud_storage = 'Upload data to cloud storage' self._step_accept_scalr_conf = 'Accept Scalr configuration' self._step_patch_conf = 'Patch configuration files' self._step_create_storage = 'Create storage' self._step_init_master = 'Initialize Master' self._step_init_slave = 'Initialize Slave' self._step_create_data_bundle = 'Create data bundle' self._step_change_replication_master = 'Change replication Master' self._step_collect_host_up_data = 'Collect HostUp data' self.on_reload()
def __init__(self): self._service_name = SERVICE_NAME ServiceCtlHandler.__init__(self, SERVICE_NAME, initdv2.lookup(SERVICE_NAME)) bus.on("init", self.on_init) bus.define_events( 'before_postgresql_data_bundle', 'postgresql_data_bundle', # @param host: New master hostname 'before_postgresql_change_master', # @param host: New master hostname 'postgresql_change_master', 'before_slave_promote_to_master', 'slave_promote_to_master') self._phase_postgresql = 'Configure PostgreSQL' self._phase_data_bundle = self._op_data_bundle = 'PostgreSQL data bundle' self._phase_backup = self._op_backup = 'PostgreSQL backup' self._step_upload_to_cloud_storage = 'Upload data to cloud storage' self._step_accept_scalr_conf = 'Accept Scalr configuration' self._step_patch_conf = 'Patch configuration files' self._step_create_storage = 'Create storage' self._step_init_master = 'Initialize Master' self._step_init_slave = 'Initialize Slave' self._step_create_data_bundle = 'Create data bundle' self._step_change_replication_master = 'Change replication Master' self._step_collect_host_up_data = 'Collect HostUp data' self.on_reload()
def on_init(self): bus.on("host_init_response", self.on_host_init_response) bus.on("before_host_up", self.on_before_host_up) bus.on("before_hello", self.on_before_hello) if bus.event_defined('rebundle_cleanup_image'): bus.on("rebundle_cleanup_image", self.cleanup_hosts_file) bus.on("before_host_down", self.on_before_host_down) if 'bootstrapping' == __node__['state']: self.cleanup_hosts_file('/') self._logger.info('Performing initial cluster reset') if os.path.exists(DEFAULT_STORAGE_PATH): rabbitmq_user = pwd.getpwnam("rabbitmq") os.chown(DEFAULT_STORAGE_PATH, rabbitmq_user.pw_uid, rabbitmq_user.pw_gid) self.service.start() self.rabbitmq.stop_app() self.rabbitmq.reset() self.service.stop() elif 'running' == __node__['state']: rabbitmq_vol = __rabbitmq__['volume'] if not __rabbitmq__['volume'].mounted_to(): self.service.stop() rabbitmq_vol.ensure() self.service.start() __rabbitmq__['volume'] = rabbitmq_vol
def __init__(self): self.preset_provider = MemcachedPresetProvider() preset_service.services[BEHAVIOUR] = self.preset_provider FarmSecurityMixin.__init__(self, [11211]) self._logger = logging.getLogger(__name__) self._queryenv = bus.queryenv_service bus.on("init", self.on_init)
def __init__(self): self.mysql = mysql_svc.MySQL() cnf_ctl = MysqlCnfController() if __mysql__['behavior'] in ('mysql2', 'percona') else None # mariadb dont do old presets ServiceCtlHandler.__init__(self, __mysql__['behavior'], self.mysql.service, cnf_ctl) self.preset_provider = mysql_svc.MySQLPresetProvider() preset_service.services[__mysql__['behavior']] = self.preset_provider bus.on(init=self.on_init, reload=self.on_reload) bus.define_events( 'before_mysql_data_bundle', 'mysql_data_bundle', # @param host: New master hostname 'before_mysql_change_master', # @param host: New master hostname # @param log_file: log file to start from # @param log_pos: log pos to start from 'mysql_change_master' 'before_slave_promote_to_master', 'slave_promote_to_master' ) self._mysql_api = mysql_api.MySQLAPI() self._op_api = operation_api.OperationAPI() self._backup_id = None self._data_bundle_id = None self.on_reload()
def on_init(self, *args, **kwds): LOG.debug('Called on_init') bus.on( reload=self.on_reload, start=self.on_start, before_host_up=self.on_before_host_up, before_reboot_start=self.on_before_reboot_start )
def __init__(self): self.preset_provider = MemcachedPresetProvider() FarmSecurityMixin.__init__(self) self.init_farm_security([11211]) ServiceCtlHandler.__init__(self, BEHAVIOUR, memcached_api.MemcachedInitScript()) self._logger = logging.getLogger(__name__) self._queryenv = bus.queryenv_service bus.on("init", self.on_init)
def __init__(self): self._logger = logging.getLogger(__name__) self._iptables = iptables if not self._iptables.enabled(): raise HandlerError('iptables is not installed. iptables is required for cassandra behaviour') bus.on("init", self.on_init)
def __init__(self): self.preset_provider = MemcachedPresetProvider() preset_service.services[BEHAVIOUR] = self.preset_provider ServiceCtlHandler.__init__(self, SERVICE_NAME, initdv2.lookup('memcached'), MemcachedCnfController()) FarmSecurityMixin.__init__(self, [11211]) self._logger = logging.getLogger(__name__) self._queryenv = bus.queryenv_service bus.on("init", self.on_init)
def __init__(self): handlers.Handler.__init__(self) bus.on( init=self.on_init, start=self.on_start ) self.api = tomcat_api.TomcatAPI() self.service = self.api.service
def __init__(self, num_entries = 20, send_interval = '30s'): logging.Handler.__init__(self) m = INTERVAL_RE.match(send_interval) self.send_interval = (int(m.group('seconds') or 0) + 60*int(m.group('minutes') or 0)) or 1 self.num_entries = num_entries self._logger = logging.getLogger(__name__) bus.on("shutdown", self.on_shutdown)
def __init__(self): self.preset_provider = MemcachedPresetProvider() preset_service.services[BEHAVIOUR] = self.preset_provider FarmSecurityMixin.__init__(self, [11211]) ServiceCtlHandler.__init__(self, BEHAVIOUR, MemcachedInitScript()) self._logger = logging.getLogger(__name__) self._queryenv = bus.queryenv_service bus.on("init", self.on_init)
def __init__(self): LOG.debug("HAProxyHandler __init__") self.api = haproxy_api.HAProxyAPI() self._proxies = None self.on_reload() bus.on( init=self.on_init, reload=self.on_reload )
def __init__(self, num_entries=20, send_interval='30s'): logging.Handler.__init__(self) m = INTERVAL_RE.match(send_interval) self.send_interval = (int(m.group('seconds') or 0) + 60 * int(m.group('minutes') or 0)) or 1 self.num_entries = num_entries self._logger = logging.getLogger(__name__) bus.on("shutdown", self.on_shutdown)
def on_init(self): bus.on(start=self.on_start, before_host_up=self.on_before_host_up, host_init_response=self.on_host_init_response) self._insert_iptables_rules() if __node__['state'] == ScalarizrState.BOOTSTRAPPING: self._stop_service('Configuring')
def on_init(self): if is_cloud_controller(): bus.on( start=self.on_start, host_init_response=self.on_host_init_response, before_host_up=self.on_before_host_up, reload=self.on_reload ) self._init_objects()
def init_farm_security(self, ports): self._logger = logging.getLogger(__name__) self._ports = ports self._iptables = iptables if self._iptables.enabled(): bus.on(reload=self.__on_reload) self.__on_reload() self.__insert_iptables_rules() self.__enabled = True
def __init__(self): super(ChefHandler, self).__init__() bus.on(init=self.on_init) self._chef_data = None self._run_list = None self._with_json_attributes = None self._platform = bus.platform self._global_variables = {} self._init_script = initdv2.lookup('chef')
def __init__(self, ports, enabled=True): self._logger = logging.getLogger(__name__) self._ports = ports self._enabled = enabled self._iptables = iptables if self._iptables.enabled(): bus.on('init', self.__on_init) else: LOG.warn( "iptables is not enabled. ports %s won't be protected by firewall" % (ports, ))
def __init__(self): self._logger = logging.getLogger(__name__) self._log_hdlr = DeployLogHandler() self._script_executor = None self._phase_deploy = 'Deploy' self._step_execute_pre_deploy_script = 'Execute pre deploy script' self._step_execute_post_deploy_script = 'Execute post deploy script' self._step_update_from_scm = 'Update from SCM' bus.on(init=self.on_init)
def __init__(self): super(LifeCycleHandler, self).__init__() self._logger = logging.getLogger(__name__) self._op_api = operation.OperationAPI() self._system_api = system_api.SystemAPI() bus.define_events( # Fires before HostInit message is sent # @param msg "before_host_init", # Fires after HostInit message is sent "host_init", # Fires when HostInitResponse received # @param msg "host_init_response", # Fires before HostUp message is sent # @param msg "before_host_up", # Fires after HostUp message is sent "host_up", # Fires before RebootStart message is sent # @param msg "before_reboot_start", # Fires after RebootStart message is sent "reboot_start", # Fires before RebootFinish message is sent # @param msg "before_reboot_finish", # Fires after RebootFinish message is sent "reboot_finish", # Fires before Restart message is sent # @param msg: Restart message "before_restart", # Fires after Restart message is sent "restart", # Fires before Hello message is sent # @param msg "before_hello", # Fires after Hello message is sent "hello", # Fires after HostDown message is sent # @param msg "before_host_down", # Fires after HostDown message is sent "host_down", # # Service events # # Fires when behaviour is configured # @param service_name: Service name. Ex: mysql "service_configured", ) bus.on(init=self.on_init, start=self.on_start, reload=self.on_reload, shutdown=self.on_shutdown) self.on_reload()
def __init__(self): Handler.__init__(self) self._initial_preset = None self._initial_v_hosts = [] self._queryenv = bus.queryenv_service self.api = apache_api.ApacheAPI() self.preset_provider = ApachePresetProvider() bus.on(init=self.on_init) bus.define_events("apache_rpaf_reload")
def __init__(self): self._logger = logging.getLogger(__name__) if linux.os.redhat_family: init_script = ('/sbin/service', 'sshd') elif linux.os.ubuntu and linux.os['version'] >= (10, 4): init_script = ('/usr/sbin/service', 'ssh') else: init_script = firstmatched(os.path.exists, ('/etc/init.d/ssh', '/etc/init.d/sshd')) self._sshd_init = ParametrizedInitScript('sshd', init_script) bus.on(init=self.on_init)
def on_init(self): if is_cloud_controller(): bus.on(start=self.on_start, host_init_response=self.on_host_init_response, before_host_up=self.on_before_host_up, reload=self.on_reload) self._init_objects() self._phase_cloudfoundry = 'Configure CloudFoundry' self._step_create_storage = 'Create VCAP data storage' self._step_locate_nginx = 'Locate Nginx frontend' self._step_create_database = 'Create CloudController database'
def __init__(self): self._logger = logging.getLogger(__name__) if disttool.is_redhat_based(): init_script = ('/sbin/service', 'sshd') elif disttool.is_ubuntu() and disttool.version_info() >= (10, 4): init_script = ('/usr/sbin/service', 'ssh') else: init_script = firstmatched(os.path.exists, ('/etc/init.d/ssh', '/etc/init.d/sshd')) self._sshd_init = ParametrizedInitScript('sshd', init_script) bus.on(init=self.on_init)
def __init__(self): self._cnf = bus.cnf ServiceCtlHandler.__init__(self, BEHAVIOUR, initdv2.lookup('nginx'), NginxCnfController()) self._logger = logging.getLogger(__name__) self.preset_provider = NginxPresetProvider() self.api = NginxAPI() self._terminating_servers = [] preset_service.services[BEHAVIOUR] = self.preset_provider bus.define_events("nginx_upstream_reload") bus.on(init=self.on_init, reload=self.on_reload) self.on_reload()
def __init__(self): bus.on("init", self.on_init) self._logger = logging.getLogger(__name__) self.rabbitmq = rabbitmq_svc.rabbitmq self.service = initdv2.lookup(BuiltinBehaviours.RABBITMQ) self._service_name = BEHAVIOUR self.on_reload() if 'ec2' == self.platform.name: self._logger.debug('Setting hostname_as_pubdns to 0') __ec2__ = __node__['ec2'] __ec2__['hostname_as_pubdns'] = 0
def __init__(self): bus.on("init", self.on_init) self._logger = logging.getLogger(__name__) self.rabbitmq = rabbitmq_svc.RabbitMQ() self.service = initdv2.lookup(BuiltinBehaviours.RABBITMQ) self._service_name = BEHAVIOUR self.on_reload() if 'ec2' == self.platform.name: self._logger.debug('Setting hostname_as_pubdns to 0') __ec2__ = __node__['ec2'] __ec2__['hostname_as_pubdns'] = 0
def __init__(self): self._cnf = bus.cnf self._nginx_v2_flag_filepath = os.path.join(bus.etc_path, "private.d/nginx_v2") ServiceCtlHandler.__init__(self, BEHAVIOUR, initdv2.lookup('nginx'), NginxCnfController()) self._logger = logging.getLogger(__name__) self.preset_provider = NginxPresetProvider() self.api = NginxAPI() self.api.init_service() self._terminating_servers = [] bus.define_events("nginx_upstream_reload") bus.on(init=self.on_init, reload=self.on_reload) self.on_reload()
def on_init(self, *args, **kwargs): bus.on("before_hello", self.on_before_hello) bus.on("before_host_init", self.on_before_host_init) bus.on("before_restart", self.on_before_restart) bus.on("before_reboot_finish", self.on_before_reboot_finish) try: system(('ntpdate', '-u', '0.amazon.pool.ntp.org')) except: pass msg_service = bus.messaging_service producer = msg_service.get_producer() producer.on("before_send", self.on_before_message_send) if not os_dist.windows_family and not __node__.get('hostname'): # Set the hostname to this instance's public hostname try: hostname_as_pubdns = int(__ec2__['hostname_as_pubdns']) except: hostname_as_pubdns = True if hostname_as_pubdns: pub_hostname = self._platform.get_public_hostname() self._logger.debug('Setting hostname to %s' % pub_hostname) system2("hostname " + pub_hostname, shell=True) if disttool.is_ubuntu(): # Ubuntu cloud-init scripts may disable root ssh login for path in ('/etc/ec2-init/ec2-config.cfg', '/etc/cloud/cloud.cfg'): if os.path.exists(path): c = None with open(path, 'r') as fp: c = fp.read() c = re.sub(re.compile(r'^disable_root[^:=]*([:=]).*', re.M), r'disable_root\1 0', c) with open(path, 'w') as fp: fp.write(c) if not linux.os.windows_family: # Add server ssh public key to authorized_keys ssh_key = self._platform.get_ssh_pub_key() if ssh_key: add_authorized_key(ssh_key) # Mount ephemeral devices # Seen on eucalyptus: # - fstab contains invalid fstype and `mount -a` fails if self._platform.name == 'eucalyptus': mtab = mount.mounts() fstab = mount.fstab() for device in self._platform.instance_store_devices: if os.path.exists(device) and device in fstab and device not in mtab: entry = fstab[device] try: mount.mount(device, entry.mpoint, '-o', entry.options) except: self._logger.warn(sys.exc_info()[1]) else: if not os_dist.windows_family: system2('mount -a', shell=True, raise_exc=False)
def on_init(self): if is_cloud_controller(): bus.on( start=self.on_start, host_init_response=self.on_host_init_response, before_host_up=self.on_before_host_up, reload=self.on_reload ) self._init_objects() self._phase_cloudfoundry = 'Configure CloudFoundry' self._step_create_storage = 'Create VCAP data storage' self._step_locate_nginx = 'Locate Nginx frontend' self._step_create_database = 'Create CloudController database'
def __init__(self): if not software.whereis('rabbitmqctl'): raise HandlerError("Rabbitmqctl binary was not found. Check your installation.") bus.on("init", self.on_init) self._logger = logging.getLogger(__name__) self.rabbitmq = rabbitmq_svc.rabbitmq self.service = initdv2.lookup(BuiltinBehaviours.RABBITMQ) self._service_name = BEHAVIOUR self.on_reload() if 'ec2' == self.platform.name: updates = dict(hostname_as_pubdns = '0') self.cnf.update_ini('ec2', {'ec2': updates}, private=False)
def __init__(self): self.queue = Queue.Queue() self.in_progress = [] bus.on(init=self.on_init, start=self.on_start, shutdown=self.on_shutdown) # Operations self._op_exec_scripts = 'Execute scripts' self._step_exec_tpl = "Execute '%s' in %s mode" # Services self._cnf = bus.cnf self._queryenv = bus.queryenv_service self._platform = bus.platform
def __init__(self): if not software.whereis('rabbitmqctl'): raise HandlerError("Rabbitmqctl binary was not found. Check your installation.") bus.on("init", self.on_init) self._logger = logging.getLogger(__name__) self.rabbitmq = rabbitmq_svc.rabbitmq self.service = initdv2.lookup(BuiltinBehaviours.RABBITMQ) self._service_name = BEHAVIOUR self.on_reload() if 'ec2' == self.platform.name: self._logger.debug('Setting hostname_as_pubdns to 0') __ec2__ = __node__['ec2'] __ec2__['hostname_as_pubdns'] = 0
def __init__(self): self._redis_api = redis_api.RedisAPI() self.preset_provider = redis.RedisPresetProvider() preset_service.services[BEHAVIOUR] = self.preset_provider from_port = __redis__['ports_range'][0] to_port = __redis__['ports_range'][-1] handlers.FarmSecurityMixin.__init__( self, ["{0}:{1}".format(from_port, to_port)]) ServiceCtlHandler.__init__(self, SERVICE_NAME, cnf_ctl=RedisCnfController()) bus.on("init", self.on_init) bus.define_events( 'before_%s_data_bundle' % BEHAVIOUR, '%s_data_bundle' % BEHAVIOUR, # @param host: New master hostname 'before_%s_change_master' % BEHAVIOUR, # @param host: New master hostname '%s_change_master' % BEHAVIOUR, 'before_slave_promote_to_master', 'slave_promote_to_master') self._phase_redis = 'Configure Redis' self._phase_data_bundle = self._op_data_bundle = 'Redis data bundle' self._phase_backup = self._op_backup = 'Redis backup' self._step_copy_database_file = 'Copy database file' self._step_upload_to_cloud_storage = 'Upload data to cloud storage' self._step_accept_scalr_conf = 'Accept Scalr configuration' self._step_patch_conf = 'Patch configuration files' self._step_create_storage = 'Create storage' self._step_init_master = 'Initialize Master' self._step_init_slave = 'Initialize Slave' self._step_create_data_bundle = 'Create data bundle' self._step_change_replication_master = 'Change replication Master' self._step_collect_host_up_data = 'Collect HostUp data' self.on_reload() if self._cnf.state == ScalarizrState.RUNNING: # Fix to enable access outside farm when use_passwords=True if self.use_passwords: self.security_off()
def on_init(self): global exec_dir_prefix, logs_dir, logs_truncate_over bus.on( host_init_response=self.on_host_init_response, before_host_up=self.on_before_host_up ) # Configuration cnf = bus.cnf ini = cnf.rawini # read exec_dir_prefix ''' TODO: completely remove ini options try: exec_dir_prefix = ini.get(self.name, 'exec_dir_prefix') except ConfigParser.Error: pass if linux.os['family'] == 'Windows': exec_dir_prefix = os.path.expandvars(exec_dir_prefix) ''' if not os.path.isabs(exec_dir_prefix): os.path.join(bus.base_path, exec_dir_prefix) # read logs_dir ''' try: logs_dir = ini.get(self.name, 'logs_dir') except ConfigParser.Error: pass if linux.os['family'] == 'Windows': logs_dir = os.path.expandvars(logs_dir) ''' if not os.path.exists(logs_dir): os.makedirs(logs_dir) # logs_truncate_over try: logs_truncate_over = parse_size(ini.get(self.name, 'logs_truncate_over')) except ConfigParser.Error: pass self.log_rotate_runnable = LogRotateRunnable() self.log_rotate_thread = threading.Thread(name='ScriptingLogRotate', target=self.log_rotate_runnable) self.log_rotate_thread.setDaemon(True)
def on_init(self): bus.on(before_host_init=self.on_before_host_init, host_init_response=self.on_host_init_response, before_host_up=self.on_before_host_up) try: handlers.script_executor.skip_events.add( Messages.INT_BLOCK_DEVICE_UPDATED) except AttributeError: pass if __node__['state'] == 'running': volumes = self._queryenv.list_farm_role_params( __node__['farm_role_id']).get('params', {}).get('volumes', []) volumes = volumes or [] # Cast to list for vol in volumes: vol = storage2.volume(vol) vol.ensure(mount=bool(vol.mpoint))
def on_init(self): bus.on("host_init_response", self.on_host_init_response) self._producer.on("before_send", self.on_before_message_send) # Add internal messages to scripting skip list try: map(scalarizr.handlers.script_executor.skip_events.add, (Messages.INT_SERVER_REBOOT, Messages.INT_SERVER_HALT, Messages.HOST_INIT_RESPONSE)) except AttributeError: pass # Mount all filesystems system2(('mount', '-a'), raise_exc=False) # Add firewall rules #if self._cnf.state in (ScalarizrState.BOOTSTRAPPING, ScalarizrState.IMPORTING): self._insert_iptables_rules()
def __init__(self, vol_type): super(BlockDeviceHandler, self).__init__() self._vol_type = vol_type self._volumes = [] self.on_reload() bus.on(init=self.on_init, reload=self.on_reload) bus.define_events( # Fires when volume is attached to instance # @param device: device name, ex: /dev/sdf "block_device_attached", # Fires when volume is detached from instance # @param device: device name, ex: /dev/sdf "block_device_detached", # Fires when volume is mounted # @param device: device name, ex: /dev/sdf "block_device_mounted")