def on_before_host_up(self, message): """ Configure redis behaviour @type message: scalarizr.messaging.Message @param message: HostUp message """ repl = 'master' if self.is_replication_master else 'slave' message.redis = {} if self.is_replication_master: self._init_master(message) else: self._init_slave(message) __redis__['volume'].tags = self.redis_tags __redis__['volume'] = storage2.volume(__redis__['volume']) self._init_script = self.redis_instances.get_default_process() message.redis['ports'] = self.redis_instances.ports message.redis['passwords'] = self.redis_instances.passwords message.redis['num_processes'] = len(self.redis_instances.instances) message.redis['volume_config'] = dict(__redis__['volume']) bus.fire('service_configured', service_name=SERVICE_NAME, replication=repl, preset=self.initial_preset)
def on_before_host_up(self, message): self._logger.debug('Handling on_before_host_up message') self.api.init_service() log = bus.init_op.logger self._init_script.stop() log.info('Copy default html error pages') self._copy_error_pages() log.info('Setup proxying') self._logger.debug('Updating main config') v2_mode = bool(self._proxies) or self._get_nginx_v2_mode_flag() self.api._update_main_config(remove_server_section=v2_mode, reload_service=False) if v2_mode: self._logger.debug('Recreating proxying with proxies:\n%s' % self._proxies) self.api.recreate_proxying(self._proxies) else: # default behaviour roles_for_proxy = [] if __nginx__['upstream_app_role']: roles_for_proxy = [__nginx__['upstream_app_role']] else: roles_for_proxy = get_all_app_roles() self.api.make_default_proxy(roles_for_proxy) bus.fire('service_configured', service_name=SERVICE_NAME, preset=self.initial_preset)
def on_before_host_up(self, message): self._defer_init() op_log = bus.init_op.logger self.api.stop_service("Configuring Apache Web Server") self.api.init_service() self.configure_ip_forwarding() if self._initial_v_hosts: op_log.info("Configuring VirtualHosts.") LOG.debug("VirtualHosts to configure: %s" % self._initial_v_hosts) applied_vhosts = self.api.reconfigure(self._initial_v_hosts, reload=False, rollback_on_error=False, async=False) if len(applied_vhosts) != len(self._initial_v_hosts): raise apache_api.ApacheError( "%s Apache VirtualHosts were assigned to server but only %s were applied." % ( len(applied_vhosts), len(self._initial_v_hosts), )) op_log.info("%s Virtual Hosts configured." % len(applied_vhosts)) self.api.start_service() bus.fire("service_configured", service_name=SERVICE_NAME, preset=self._initial_preset)
def on_DbMsr_NewMasterUp(self, message): """ Switch replication to a new master server @type message: scalarizr.messaging.Message @param message: DbMsr_NewMasterUp """ try: assert message.body.has_key("db_type") assert message.body.has_key("local_ip") assert message.body.has_key("remote_ip") assert message.body.has_key(BEHAVIOUR) postgresql_data = message.body[BEHAVIOUR] if int(__postgresql__['replication_master']): LOG.debug('Skip NewMasterUp. My replication role is master') return host = message.local_ip or message.remote_ip LOG.info("Switching replication to a new PostgreSQL master %s", host) bus.fire('before_postgresql_change_master', host=host) LOG.debug("__postgresql__['volume']: %s", __postgresql__['volume']) if __postgresql__['volume'].type in ('eph', 'lvm'): if 'restore' in postgresql_data: restore = backup.restore(**postgresql_data['restore']) else: restore = backup.restore( type='snap_postgresql', volume=__postgresql__['volume'], snapshot=postgresql_data[OPT_SNAPSHOT_CNF]) if __postgresql__['volume'].type == 'eph': self.postgresql.service.stop('Swapping storages to reinitialize slave') LOG.info('Reinitializing Slave from the new snapshot %s', restore.snapshot['id']) new_vol = restore.run() #self.postgresql.service.start() self.postgresql.init_slave(STORAGE_PATH, host, __postgresql__['port'], self.root_password) LOG.debug("Replication switched") bus.fire('postgresql_change_master', host=host) msg_data = dict( db_type = BEHAVIOUR, status = 'ok' ) self.send_message(DbMsrMessages.DBMSR_NEW_MASTER_UP_RESULT, msg_data) except (Exception, BaseException), e: LOG.exception(e) msg_data = dict( db_type = BEHAVIOUR, status="error", last_error=str(e)) self.send_message(DbMsrMessages.DBMSR_NEW_MASTER_UP_RESULT, msg_data)
def on_IntBlockDeviceUpdated(self, message): if not message.devname: return if message.action == "add": LOG.debug("udev notified me that block device %s was attached", message.devname) self.send_message( Messages.BLOCK_DEVICE_ATTACHED, {"device_name": self.get_devname(message.devname)}, broadcast=True) bus.fire("block_device_attached", device=message.devname) elif message.action == "remove": LOG.debug("udev notified me that block device %s was detached", message.devname) fstab = mount.fstab() fstab.remove(message.devname) self.send_message( Messages.BLOCK_DEVICE_DETACHED, {"device_name": self.get_devname(message.devname)}, broadcast=True) bus.fire("block_device_detached", device=message.devname)
def mount(self): self._check(mpoint=True) mounted_to = self.mounted_to() if mounted_to == self.mpoint: return elif mounted_to: LOG.debug('Umounting %s from %s', self.id, mounted_to) self.umount() if not os.path.exists(self.mpoint): os.makedirs(self.mpoint) short_args = [] if self.mount_options: short_args += ['-o', ','.join(self.mount_options)] LOG.debug('Mounting %s to %s', self.id, self.mpoint) try: mod_mount.mount(self.device, self.mpoint, *short_args) except linux.LinuxError as e: # XXX: SCALARIZR-1974 is for complete solution if linux.os.ubuntu and \ re.search(r'already mounted or /mnt/(dbstorage|pgstorage|redisstorage) busy', str(e)): LOG.debug( 'upstart already mounted database storage volume {} to {}'. format(self.device, self.mpoint)) else: raise bus.fire("block_device_mounted", volume=self)
def on_before_host_up(self, message): self._logger.debug('Handling on_before_host_up message') log = bus.init_op.logger self._init_script.stop() log.info('Copy default html error pages') self._copy_error_pages() log.info('Setup proxying') self._logger.debug('Updating main config') v2_mode = bool(self._proxies) or self._get_nginx_v2_mode_flag() self._update_main_config(remove_server_section=v2_mode, reload_service=False) if v2_mode: self._logger.debug('Recreating proxying with proxies:\n%s' % self._proxies) self.api.recreate_proxying(self._proxies) else: # default behaviour roles_for_proxy = [] if __nginx__['upstream_app_role']: roles_for_proxy = [__nginx__['upstream_app_role']] else: roles_for_proxy = self.get_all_app_roles() self.make_default_proxy(roles_for_proxy) bus.fire('service_configured', service_name=SERVICE_NAME, preset=self.initial_preset)
def test_create_hook(self): bus.base_path = os.path.realpath(os.path.dirname(__file__) + "/../../..") config = ConfigParser() config.read(bus.base_path + "/etc/config.ini") bus.config = config init_tests() resources_path = os.path.realpath(os.path.dirname(__file__) + "/../../" + "resources") bus.base_path = resources_path server_id = config.get(configtool.SECT_GENERAL, configtool.OPT_SERVER_ID) bus.define_events("init", "test") handler = hooks.HooksHandler() bus.fire('init') bus.fire("test", "test_2_done", aenv="test_3_done") #absolutely valid script created an empty file self.assertTrue(os.path.exists(resources_path + "/hooks/test_1_done")) #next script created a file named as 1st execution parameter self.assertTrue(os.path.exists(resources_path + "/hooks/test_2_done")) #3rd script touched file named $aenv self.assertTrue(os.path.exists(resources_path + "/hooks/test_3_done")) #test 4 touched file named server_id self.assertTrue(os.path.exists(resources_path + "/hooks/" + server_id)) #test 5 doesn`t have an execution bit self.assertFalse(os.path.exists(resources_path + "/hooks/test_5_done")) #test 6 tried to execute script file with Exec format error self.assertFalse(os.path.exists(resources_path + "/hooks/test_6_done")) #test 7 consists an execution error self.assertTrue(os.path.exists(resources_path + "/hooks/test_7_done")) #test8 script has not a valid name , so supposed not to be executed self.assertFalse(os.path.exists(resources_path + "/hooks/test_8_done"))
def do_databundle(op): try: bus.fire('before_%s_data_bundle' % BEHAVIOUR) # Creating snapshot LOG.info("Creating Redis data bundle") backup_obj = backup.backup(type='snap_redis', volume=__redis__['volume'], tags=__redis__['volume'].tags) # TODO: generate the same way as in # mysql api or use __node__ restore = backup_obj.run() snap = restore.snapshot used_size = int(system2(('df', '-P', '--block-size=M', STORAGE_PATH))[0].split('\n')[1].split()[2][:-1]) bus.fire('%s_data_bundle' % BEHAVIOUR, snapshot_id=snap.id) # Notify scalr msg_data = dict( db_type=BEHAVIOUR, used_size='%.3f' % (float(used_size) / 1000,), status='ok' ) msg_data[BEHAVIOUR] = {'snapshot_config': dict(snap)} node.__node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) return restore except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error node.__node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict( db_type=BEHAVIOUR, status='error', last_error=str(e)))
def on_IntBlockDeviceUpdated(self, message): if not message.devname: return if message.action == "add": LOG.debug("udev notified me that block device %s was attached", message.devname) self.send_message( Messages.BLOCK_DEVICE_ATTACHED, {"device_name" : self.get_devname(message.devname)}, broadcast=True ) bus.fire("block_device_attached", device=message.devname) elif message.action == "remove": LOG.debug("udev notified me that block device %s was detached", message.devname) fstab = mount.fstab() fstab.remove(message.devname) self.send_message( Messages.BLOCK_DEVICE_DETACHED, {"device_name" : self.get_devname(message.devname)}, broadcast=True ) bus.fire("block_device_detached", device=message.devname)
def on_before_host_up(self, message): op_log = bus.init_op.logger self.api.stop_service("Configuring Apache Web Server") self.api.init_service() self.configure_ip_forwarding() if self._initial_v_hosts: op_log.info("Configuring VirtualHosts.") LOG.debug("VirtualHosts to configure: %s" % self._initial_v_hosts) applied_vhosts = self.api.reconfigure(self._initial_v_hosts, reload=False, rollback_on_error=False, async=False) if len(applied_vhosts) != len(self._initial_v_hosts): raise apache_api.ApacheError("%s Apache VirtualHosts were assigned to server but only %s were applied." % ( len(applied_vhosts), len(self._initial_v_hosts), )) op_log.info("%s Virtual Hosts configured." % len(applied_vhosts)) self.api.start_service() bus.fire("service_configured", service_name=SERVICE_NAME, preset=self._initial_preset)
def _start_init(self): # Regenerage key new_crypto_key = cryptotool.keygen() # Prepare HostInit __node__['boot_time'] = time.time() - sysutil.uptime() msg = self.new_message( Messages.HOST_INIT, dict( seconds_since_start=float( '%.2f' % (time.time() - __node__['start_time'], )), seconds_since_boot=float( '%.2f' % (time.time() - __node__['boot_time'], )), #operation_id = bus.init_op.operation_id, crypto_key=new_crypto_key), broadcast=True) bus.fire("before_host_init", msg) result_msg = self.send_message(msg, new_crypto_key=new_crypto_key, handle_host_init=True) bus.cnf.state = ScalarizrState.INITIALIZING bus.fire("host_init") if result_msg and \ parse_bool(result_msg.body.get('base', {}).get('reboot_after_hostinit_phase')): # apply setting from HostInit self._system_api.reboot() threading.Event().wait(600)
def _plug_volume(self, qe_mpoint): try: assert len(qe_mpoint.volumes), 'Invalid mpoint info %s. Volumes list is empty' % qe_mpoint qe_volume = qe_mpoint.volumes[0] mpoint = qe_mpoint.dir or None assert qe_volume.volume_id, 'Invalid volume info %s. volume_id should be non-empty' % qe_volume vol = storage2.volume( type=self._vol_type, id=qe_volume.volume_id, name=qe_volume.device, mpoint=mpoint ) if mpoint: logger = bus.init_op.logger if bus.init_op else LOG logger.info('Ensure %s: take %s, mount to %s', self._vol_type, vol.id, vol.mpoint) vol.ensure(mount=True, mkfs=True, fstab=True) bus.fire("block_device_mounted", volume_id=vol.id, device=vol.device) self.send_message(Messages.BLOCK_DEVICE_MOUNTED, {"device_name": vol.device, "volume_id": vol.id, "mountpoint": vol.mpoint} ) except: LOG.exception("Can't attach volume")
def on_before_host_up(self, message): LOG.debug("on_before_host_up") """ Configure MySQL __mysql__['behavior'] @type message: scalarizr.messaging.Message @param message: HostUp message """ self.generate_datadir() self.mysql.service.stop('Configuring MySQL') # On Debian/GCE we've got 'Another MySQL daemon already running with the same unix socket.' socket_file = mysql2_svc.my_print_defaults('mysqld').get('socket') if socket_file: coreutils.remove(socket_file) if 'Amazon' == linux.os['name']: self.mysql.my_cnf.pid_file = os.path.join(__mysql__['data_dir'], 'mysqld.pid') repl = 'master' if int(__mysql__['replication_master']) else 'slave' bus.fire('before_mysql_configure', replication=repl) if repl == 'master': self._init_master(message) else: self._init_slave(message) # Force to resave volume settings __mysql__['volume'] = storage2.volume(__mysql__['volume']) bus.fire('service_configured', service_name=__mysql__['behavior'], replication=repl, preset=self.initial_preset)
def _plug_volume(self, qe_mpoint): try: assert len( qe_mpoint.volumes ), 'Invalid mpoint info %s. Volumes list is empty' % qe_mpoint qe_volume = qe_mpoint.volumes[0] mpoint = qe_mpoint.dir or None assert qe_volume.volume_id, 'Invalid volume info %s. volume_id should be non-empty' % qe_volume vol = storage2.volume(type=self._vol_type, id=qe_volume.volume_id, name=qe_volume.device, mpoint=mpoint) if mpoint: logger = bus.init_op.logger if bus.init_op else LOG logger.info('Ensure %s: take %s, mount to %s', self._vol_type, vol.id, vol.mpoint) vol.ensure(mount=True, mkfs=True, fstab=True) bus.fire("block_device_mounted", volume_id=vol.id, device=vol.device) self.send_message( Messages.BLOCK_DEVICE_MOUNTED, { "device_name": vol.device, "volume_id": vol.id, "mountpoint": vol.mpoint }) except: LOG.exception("Can't attach volume")
def do_databundle(op): try: bus.fire('before_postgresql_data_bundle') LOG.info("Creating PostgreSQL data bundle") backup_obj = backup.backup(type='snap_postgresql', volume=__postgresql__['volume'], tags=__postgresql__['volume'].tags) restore = backup_obj.run() snap = restore.snapshot used_size = int(system2(('df', '-P', '--block-size=M', STORAGE_PATH))[0].split('\n')[1].split()[2][:-1]) bus.fire('postgresql_data_bundle', snapshot_id=snap.id) # Notify scalr msg_data = { 'db_type': BEHAVIOUR, 'status': 'ok', 'used_size' : '%.3f' % (float(used_size) / 1000,), BEHAVIOUR: {OPT_SNAPSHOT_CNF: dict(snap)} } __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) return restore except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
def on_DbMsr_NewMasterUp(self, message): """ Switch replication to a new master server @type message: scalarizr.messaging.Message @param message: DbMsr__NewMasterUp """ if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError( "DbMsr_NewMasterUp message for %s behaviour must have '%s' property and db_type '%s'" % BEHAVIOUR, BEHAVIOUR, BEHAVIOUR) if self.is_replication_master: LOG.debug('Skipping NewMasterUp. My replication role is master') return host = message.local_ip or message.remote_ip LOG.info("Switching replication to a new %s master %s" % (BEHAVIOUR, host)) bus.fire('before_%s_change_master' % BEHAVIOUR, host=host) self.redis_instances.init_as_slaves(self._storage_path, host) self.redis_instances.wait_for_sync() LOG.debug("Replication switched") bus.fire('%s_change_master' % BEHAVIOUR, host=host)
def on_IntServerHalt(self, message): Flag.set(Flag.HALT) msg = self.new_message(Messages.HOST_DOWN, broadcast=True) try: bus.fire("before_host_down", msg) finally: self.send_message(msg) bus.fire("host_down")
def on_IntServerReboot(self, message): # Scalarizr must detect that it was resumed after reboot self._set_flag(self.FLAG_REBOOT) # Send message msg = self.new_message(Messages.REBOOT_START, broadcast=True) try: bus.fire("before_reboot_start", msg) finally: self.send_message(msg) bus.fire("reboot_start")
def on_IntServerReboot(self, message): # Scalarizr must detect that it was resumed after reboot Flag.set(Flag.REBOOT) # Send message msg = self.new_message(Messages.REBOOT_START, broadcast=True) try: bus.fire("before_reboot_start", msg) finally: self.send_message(msg) bus.fire("reboot_start")
def _reload_service(self, reason=None): LOG.info("Reloading %s%s", self._service_name, '. (%s)' % reason if reason else '') try: self._init_script.reload() bus.fire(self._service_name + '_reload') except initdv2.InitdError, e: if e.code == initdv2.InitdError.NOT_RUNNING: LOG.debug('%s not running', self._service_name) else: raise
def on_IntServerReboot(self, message): # Scalarizr must detect that it was resumed after reboot Flag.set(Flag.REBOOT) # Send message msg = self.new_message(Messages.REBOOT_START, broadcast=True) try: bus.fire("before_reboot_start", msg) finally: #mesage now send in scripts/reboot.py #self.send_message(msg) pass bus.fire("reboot_start")
def mount(self): self._check(mpoint=True) mounted_to = self.mounted_to() if mounted_to == self.mpoint: return elif mounted_to: LOG.debug('Umounting %s from %s', self.id, mounted_to) self.umount() if not os.path.exists(self.mpoint): os.makedirs(self.mpoint) LOG.debug('Mounting %s to %s', self.id, self.mpoint) mod_mount.mount(self.device, self.mpoint) bus.fire("block_device_mounted", volume=self)
def _start_import(self): data = software.system_info() data['architecture'] = self._platform.get_architecture() data['server_id'] = self._cnf.rawini.get(config.SECT_GENERAL, config.OPT_SERVER_ID) # Send Hello msg = self.new_message(Messages.HELLO, data, broadcast=True # It's not really broadcast but need to contain broadcast message data ) msg.body['behaviour'] = self.get_ready_behaviours() bus.fire("before_hello", msg) self.send_message(msg) bus.fire("hello")
def on_before_host_up(self, message): """ Configure PostgreSQL behaviour @type message: scalarizr.messaging.Message @param message: HostUp message """ repl = 'master' if self.is_replication_master else 'slave' #bus.fire('before_postgresql_configure', replication=repl) if self.is_replication_master: self._init_master(message) else: self._init_slave(message) bus.fire('service_configured', service_name=SERVICE_NAME, replication=repl)
def on_before_host_up(self, message): """ Configure redis behaviour @type message: scalarizr.messaging.Message @param message: HostUp message """ repl = 'master' if self.is_replication_master else 'slave' if self.is_replication_master: self._init_master(message) else: self._init_slave(message) self._init_script = self.redis_instances.get_default_process() bus.fire('service_configured', service_name=SERVICE_NAME, replication=repl)
def _start_after_reboot(self): if __node__['state'] != 'running': self._logger.info( 'Skipping RebootFinish firing, server state is: {}'.format( __node__['state'])) return msg = self.new_message( Messages.REBOOT_FINISH, msg_body={'base': { 'hostname': self._system_api.get_hostname() }}, broadcast=True) bus.fire("before_reboot_finish", msg) self.send_message(msg) bus.fire("reboot_finish")
def _start_init(self): # Regenerage key new_crypto_key = cryptotool.keygen() # Prepare HostInit msg = self.new_message(Messages.HOST_INIT, dict( crypto_key = new_crypto_key, snmp_port = self._cnf.rawini.get(config.SECT_SNMP, config.OPT_PORT), snmp_community_name = self._cnf.rawini.get(config.SECT_SNMP, config.OPT_COMMUNITY_NAME) ), broadcast=True) bus.fire("before_host_init", msg) self.send_message(msg, new_crypto_key=new_crypto_key, wait_ack=True) bus.cnf.state = ScalarizrState.INITIALIZING bus.fire("host_init")
def onSIGHUP(self, *args): pid = os.getpid() self._logger.debug('Received SIGHUP (pid: %d)', pid) if pid != _pid: return self._logger.info('Reloading scalarizr') self._running = False bus.fire('shutdown') self._shutdown_services() self._running = True cnf = bus.cnf cnf.bootstrap(force_reload=True) self._init_services() self._start_services() bus.fire('reload')
def on_before_host_up(self, message): """ Configure PostgreSQL behaviour @type message: scalarizr.messaging.Message @param message: HostUp message """ repl = 'master' if self.is_replication_master else 'slave' #bus.fire('before_postgresql_configure', replication=repl) if self.is_replication_master: self._init_master(message) else: self._init_slave(message) # Force to resave volume settings __postgresql__['volume'] = storage2.volume(__postgresql__['volume']) bus.fire('service_configured', service_name=SERVICE_NAME, replication=repl, preset=self.initial_preset)
def _shutdown(self): self._running = False try: bus.fire("shutdown") except: self._logger.debug('Shutdown hooks exception', exc_info=sys.exc_info()) try: self._logger.info("[pid: %d] Stopping scalarizr %s", os.getpid(), __version__) self._shutdown_services() except: self._logger.debug('Shutdown services exception', exc_info=sys.exc_info()) finally: if os.path.exists(PID_FILE): os.remove(PID_FILE) self._logger.info('[pid: %d] Scalarizr terminated', os.getpid())
def create_databundle_callback(task, meta): if task['state'] == 'completed': snap = task.result bus.fire('%s_data_bundle' % BEHAVIOUR, snapshot_id=snap['id']) used_size = int(system2(('df', '-P', '--block-size=M', STORAGE_PATH))[0].split('\n')[1].split()[2][:-1]) msg_data = { 'db_type': BEHAVIOUR, 'status': 'ok', 'used_size': '%.3f' % (float(used_size) / 1000,), BEHAVIOUR: {OPT_SNAPSHOT_CNF: snap} } __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) else: __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(task.exception)))
def on_before_host_up(self, message): LOG.debug("on_before_host_up") """ Configure MySQL __mysql__['behavior'] @type message: scalarizr.messaging.Message @param message: HostUp message """ self.generate_datadir() self.mysql.service.stop('Configuring MySQL') repl = 'master' if int(__mysql__['replication_master']) else 'slave' bus.fire('before_mysql_configure', replication=repl) if repl == 'master': self._init_master(message) else: self._init_slave(message) # Force to resave volume settings __mysql__['volume'] = storage2.volume(__mysql__['volume']) bus.fire('service_configured', service_name=__mysql__['behavior'], replication=repl)
def _shutdown(*args): logger = logging.getLogger(__name__) globals()["_running"] = False try: bus.fire("shutdown") except: logger.debug('Shutdown hooks exception', exc_info=sys.exc_info()) try: logger.info("[pid: %d] Stopping scalarizr %s", os.getpid(), __version__) _shutdown_services() except: logger.debug('Shutdown services exception', exc_info=sys.exc_info()) finally: if os.path.exists(PID_FILE): os.remove(PID_FILE) logger.info('[pid: %d] Scalarizr terminated', os.getpid())
def on_DbMsr_CreateDataBundle(self, message): try: op = operation(name=self._op_data_bundle, phases=[{ 'name': self._phase_data_bundle, 'steps': [self._step_create_data_bundle] }]) op.define() with op.phase(self._phase_data_bundle): with op.step(self._step_create_data_bundle): bus.fire('before_postgresql_data_bundle') snap = self._create_snapshot() used_size = int( system2( ('df', '-P', '--block-size=M', STORAGE_PATH))[0].split('\n')[1].split()[2][:-1]) bus.fire('postgresql_data_bundle', snapshot_id=snap.id) # Notify scalr msg_data = { 'db_type': BEHAVIOUR, 'status': 'ok', 'used_size': '%.3f' % (float(used_size) / 1000, ), BEHAVIOUR: { OPT_SNAPSHOT_CNF: dict(snap) } } self.send_message( DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) op.ok() except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error self.send_message( DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
def _start_init(self): # Regenerage key new_crypto_key = cryptotool.keygen() # Prepare HostInit msg = self.new_message(Messages.HOST_INIT, dict(crypto_key=new_crypto_key, snmp_port=self._cnf.rawini.get( config.SECT_SNMP, config.OPT_PORT), snmp_community_name=self._cnf.rawini.get( config.SECT_SNMP, config.OPT_COMMUNITY_NAME)), broadcast=True) bus.fire("before_host_init", msg) self.send_message(msg, new_crypto_key=new_crypto_key, wait_ack=True) bus.cnf.state = ScalarizrState.INITIALIZING bus.fire("host_init")
def on_DbMsr_NewMasterUp(self, message): """ Switch replication to a new master server @type message: scalarizr.messaging.Message @param message: DbMsr_NewMasterUp """ if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError("DbMsr_NewMasterUp message for PostgreSQL behaviour must have 'postgresql' property and db_type 'postgresql'") postgresql_data = message.postgresql.copy() if self.is_replication_master: self._logger.debug('Skipping NewMasterUp. My replication role is master') return host = message.local_ip or message.remote_ip self._logger.info("Switching replication to a new postgresql master %s", host) bus.fire('before_postgresql_change_master', host=host) if OPT_SNAPSHOT_CNF in postgresql_data and postgresql_data[OPT_SNAPSHOT_CNF]['type'] != 'eph': snap_data = postgresql_data[OPT_SNAPSHOT_CNF] self._logger.info('Reinitializing Slave from the new snapshot %s', snap_data['id']) self.postgresql.service.stop() self._logger.debug('Destroying old storage') self.storage_vol.destroy() self._logger.debug('Storage destroyed') self._logger.debug('Plugging new storage') vol = Storage.create(snapshot=snap_data.copy(), tags=self.postgres_tags) self._plug_storage(self._storage_path, vol) self._logger.debug('Storage plugged') Storage.backup_config(vol.config(), self._volume_config_path) Storage.backup_config(snap_data, self._snapshot_config_path) self.storage_vol = vol self.postgresql.init_slave(self._storage_path, host, POSTGRESQL_DEFAULT_PORT, self.root_password) self._logger.debug("Replication switched") bus.fire('postgresql_change_master', host=host)
def _start_import(self): data = software.system_info() data["architecture"] = self._platform.get_architecture() data["server_id"] = self._cnf.rawini.get(config.SECT_GENERAL, config.OPT_SERVER_ID) # Send Hello msg = self.new_message( Messages.HELLO, data, broadcast=True # It's not really broadcast but need to contain broadcast message data ) behs = self.get_ready_behaviours() if "mysql2" in behs or "percona" in behs: # only mysql2 should be returned to Scalr try: behs.remove("mysql") except (IndexError, ValueError): pass msg.body["behaviour"] = behs bus.fire("before_hello", msg) self.send_message(msg) bus.fire("hello")
def onSIGHUP(*args): pid = os.getpid() logger = logging.getLogger(__name__) logger.debug('Received SIGHUP (pid: %d)', pid) if pid != _pid: return logger.info('Reloading scalarizr') signal.signal(signal.SIGCHLD, signal.SIG_IGN) globals()["_running"] = False bus.fire('shutdown') _shutdown_services() globals()["_running"] = True signal.signal(signal.SIGCHLD, onSIGCHILD) cnf = bus.cnf cnf.bootstrap(force_reload=True) _init_services() _start_services() bus.fire('reload')
def _start_init(self): # Regenerage key new_crypto_key = cryptotool.keygen() bus.init_op = self._op_api.create('system.init', lambda op: None) # Prepare HostInit msg = self.new_message( Messages.HOST_INIT, dict(seconds_since_start=float( '%.2f' % (time.time() - __node__['start_time'], )), seconds_since_boot=float( '%.2f' % (time.time() - metadata.boot_time(), )), operation_id=bus.init_op.operation_id, crypto_key=new_crypto_key), broadcast=True) bus.fire("before_host_init", msg) self.send_message(msg, new_crypto_key=new_crypto_key, wait_ack=True) bus.cnf.state = ScalarizrState.INITIALIZING bus.fire("host_init")
def on_HostInitResponse(self, message): if bus.cnf.state == ScalarizrState.RUNNING: self._logger.info("Ignoring 'HostInitResponse' message, cause state is '%s'", bus.cnf.state) return bus.initialization_op = operation(name='Initialization') try: self._define_initialization(message) bus.fire("host_init_response", message) hostup_msg = self.new_message(Messages.HOST_UP, broadcast=True) bus.fire("before_host_up", hostup_msg) if bus.scalr_version >= (2, 2, 3): self.send_message(Messages.BEFORE_HOST_UP, broadcast=True, wait_subhandler=True) self.send_message(hostup_msg) bus.cnf.state = ScalarizrState.RUNNING bus.fire("host_up") except: with bus.initialization_op as op: if not op.finished: with op.phase('Scalarizr routines'): with op.step('Scalarizr routines'): op.error() raise with bus.initialization_op as op: op.ok()
def on_HostInitResponse(self, message): if bus.cnf.state == ScalarizrState.RUNNING: self._logger.info( "Ignoring 'HostInitResponse' message, cause state is '%s'", bus.cnf.state) return self._check_control_ports() bus.initialization_op = operation(name='Initialization') try: self._define_initialization(message) bus.fire("host_init_response", message) hostup_msg = self.new_message(Messages.HOST_UP, broadcast=True) bus.fire("before_host_up", hostup_msg) if bus.scalr_version >= (2, 2, 3): self.send_message(Messages.BEFORE_HOST_UP, broadcast=True, wait_subhandler=True) self.send_message(hostup_msg) bus.cnf.state = ScalarizrState.RUNNING bus.fire("host_up") except: with bus.initialization_op as op: if not op.finished: with op.phase('Scalarizr routines'): with op.step('Scalarizr routines'): op.error() raise with bus.initialization_op as op: op.ok()
def on_DbMsr_CreateDataBundle(self, message): try: op = operation(name=self._op_data_bundle, phases=[{ 'name': self._phase_data_bundle, 'steps': [self._step_create_data_bundle] }]) op.define() with op.phase(self._phase_data_bundle): with op.step(self._step_create_data_bundle): bus.fire('before_%s_data_bundle' % BEHAVIOUR) # Creating snapshot snap = self._create_snapshot() used_size = int(system2(('df', '-P', '--block-size=M', self._storage_path))[0].split('\n')[1].split()[2][:-1]) bus.fire('%s_data_bundle' % BEHAVIOUR, snapshot_id=snap.id) # Notify scalr msg_data = dict( db_type = BEHAVIOUR, used_size = '%.3f' % (float(used_size) / 1000,), status = 'ok' ) msg_data[BEHAVIOUR] = {'snapshot_config': dict(snap)} self.send_message(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) op.ok() except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error self.send_message(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict( db_type = BEHAVIOUR, status ='error', last_error = str(e) ))
def _start_init(self): # Regenerage key new_crypto_key = cryptotool.keygen() bus.init_op = self._op_api.create("system.init", lambda op: None) # Prepare HostInit msg = self.new_message( Messages.HOST_INIT, dict( seconds_since_start=float("%.2f" % (time.time() - __node__["start_time"],)), seconds_since_boot=float("%.2f" % (time.time() - metadata.boot_time(),)), operation_id=bus.init_op.operation_id, crypto_key=new_crypto_key, ), broadcast=True, ) bus.fire("before_host_init", msg) self.send_message(msg, new_crypto_key=new_crypto_key, wait_ack=True) bus.cnf.state = ScalarizrState.INITIALIZING bus.fire("host_init")
def on_DbMsr_NewMasterUp(self, message): """ Switch replication to a new master server @type message: scalarizr.messaging.Message @param message: DbMsr__NewMasterUp """ if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError("DbMsr_NewMasterUp message for %s behaviour must have '%s' property and db_type '%s'" % BEHAVIOUR, BEHAVIOUR, BEHAVIOUR) if self.is_replication_master: LOG.debug('Skipping NewMasterUp. My replication role is master') return host = message.local_ip or message.remote_ip LOG.info("Switching replication to a new %s master %s"% (BEHAVIOUR, host)) bus.fire('before_%s_change_master' % BEHAVIOUR, host=host) self.redis_instances.init_as_slaves(self._storage_path, host) self.redis_instances.wait_for_sync() LOG.debug("Replication switched") bus.fire('%s_change_master' % BEHAVIOUR, host=host)
def on_DbMsr_CreateDataBundle(self, message): try: op = operation(name=self._op_data_bundle, phases=[{ 'name': self._phase_data_bundle, 'steps': [self._step_create_data_bundle] }]) op.define() with op.phase(self._phase_data_bundle): with op.step(self._step_create_data_bundle): bus.fire('before_postgresql_data_bundle') snap = self._create_snapshot() used_size = int(system2(('df', '-P', '--block-size=M', STORAGE_PATH))[0].split('\n')[1].split()[2][:-1]) bus.fire('postgresql_data_bundle', snapshot_id=snap.id) # Notify scalr msg_data = { 'db_type': BEHAVIOUR, 'status': 'ok', 'used_size' : '%.3f' % (float(used_size) / 1000,), BEHAVIOUR: {OPT_SNAPSHOT_CNF: dict(snap)} } self.send_message(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) op.ok() except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error self.send_message(DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict( db_type = BEHAVIOUR, status ='error', last_error = str(e) ))
def do_databundle(op): try: bus.fire('before_%s_data_bundle' % BEHAVIOUR) # Creating snapshot LOG.info("Creating Redis data bundle") backup_obj = backup.backup( type='snap_redis', volume=__redis__['volume'], tags=__redis__['volume'].tags ) # TODO: generate the same way as in # mysql api or use __node__ restore = backup_obj.run() snap = restore.snapshot used_size = int( system2(('df', '-P', '--block-size=M', STORAGE_PATH))[0].split('\n')[1].split()[2][:-1]) bus.fire('%s_data_bundle' % BEHAVIOUR, snapshot_id=snap.id) # Notify scalr msg_data = dict(db_type=BEHAVIOUR, used_size='%.3f' % (float(used_size) / 1000, ), status='ok') msg_data[BEHAVIOUR] = {'snapshot_config': dict(snap)} node.__node__.messaging.send( DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, msg_data) return restore except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error node.__node__.messaging.send( DbMsrMessages.DBMSR_CREATE_DATA_BUNDLE_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))