def _start_import_snapshot(self, message): try: storage_size = cassandra.ini.get(CNF_SECTION, OPT_STORAGE_SIZE) snapshot_url = cassandra.ini.get(CNF_SECTION, OPT_SNAPSHOT_URL) filename = os.path.basename(snapshot_url) snap_path = os.path.join(TMP_EBS_MNTPOINT, filename) result = re.search('^s3://(.*?)/(.*?)$', snapshot_url) # If s3 link if result: bucket_name = result.group(1) file_name = result.group(2) s3_conn = cassandra.platform.new_s3_conn() bucket = s3_conn.get_bucket(bucket_name) key = bucket.get_key(file_name) if not key: raise HandlerError('File %s does not exist on bucket %s', (file_name, bucket_name)) # Determine snapshot size in Gb length = int(key.size//(1024*1024*1024) +1) if length > storage_size: raise HandlerError('Snapshot length (%s) is bigger then storage size (%s)' % (length, storage_size)) temp_ebs_size = length*10 if length*10 < 1000 else 1000 tmp_ebs_devname, temp_ebs_dev = self._create_attach_mount_volume(temp_ebs_size, auto_mount=False, mpoint=TMP_EBS_MNTPOINT) self._logger.debug('Starting download cassandra snapshot: %s', snapshot_url) key.get_contents_to_filename(snap_path) # Just usual http or ftp link else: try: result = urllib2.urlopen(snapshot_url) except urllib2.URLError: raise HandlerError('Cannot download snapshot. URL: %s' % snapshot_url) # Determine snapshot size in Gb try: length = int(int(result.info()['content-length'])//(1024*1024*1024) + 1) except: self._logger.error('Cannot determine snapshot size. URL: %s', snapshot_url) length = storage_size if length > storage_size: raise HandlerError('Snapshot length (%s) is bigger then storage size (%s)' % (length, storage_size)) temp_ebs_size = length*10 if length*10 < 1000 else 1000 tmp_ebs_devname, temp_ebs_dev = self._create_attach_mount_volume(temp_ebs_size, auto_mount=False, mpoint=TMP_EBS_MNTPOINT) self._logger.debug('Starting download cassandra snapshot: %s', snapshot_url) try: fp = open(snap_path, 'wb') except (Exception, BaseException), e: raise HandlerError('Cannot open snapshot file %s for write: %s', (filename, str(e))) else:
def _change_master(self, host, user, password, log_file, log_pos, timeout=None): LOG.info("Changing replication Master to server %s (log_file: %s, log_pos: %s)", host, log_file, log_pos) timeout = timeout or int(__mysql__['change_master_timeout']) # Changing replication master self.root_client.stop_slave() self.root_client.change_master_to(host, user, password, log_file, log_pos) # Starting slave result = self.root_client.start_slave() LOG.debug('Start slave returned: %s' % result) if result and 'ERROR' in result: raise HandlerError('Cannot start mysql slave: %s' % result) time_until = time.time() + timeout status = None while time.time() <= time_until: status = self.root_client.slave_status() if status['Slave_IO_Running'] == 'Yes' and \ status['Slave_SQL_Running'] == 'Yes': break time.sleep(5) else: if status: if not status['Last_Error']: logfile = firstmatched(lambda p: os.path.exists(p), ('/var/log/mysqld.log', '/var/log/mysql.log')) if logfile: gotcha = '[ERROR] Slave I/O thread: ' size = os.path.getsize(logfile) fp = open(logfile, 'r') try: fp.seek(max((0, size - 8192))) lines = fp.read().split('\n') for line in lines: if gotcha in line: status['Last_Error'] = line.split(gotcha)[-1] finally: fp.close() msg = "Cannot change replication Master server to '%s'. " \ "Slave_IO_Running: %s, Slave_SQL_Running: %s, " \ "Last_Errno: %s, Last_Error: '%s'" % ( host, status['Slave_IO_Running'], status['Slave_SQL_Running'], status['Last_Errno'], status['Last_Error']) raise HandlerError(msg) else: raise HandlerError('Cannot change replication master to %s' % (host)) LOG.debug('Replication master is changed to host %s', host)
def start(self): # Check interpreter here, and not in __init__ # cause scripts can create sequences when previous script # installs interpreter for the next one if not os.path.exists(self.interpreter): raise HandlerError("Can't execute script '%s' cause " "interpreter '%s' not found" % (self.name, self.interpreter)) # Write script to disk, prepare execution exec_dir = os.path.dirname(self.exec_path) if not os.path.exists(exec_dir): os.makedirs(exec_dir) with open(self.exec_path, 'w') as fp: fp.write(self.body.encode('utf-8')) os.chmod(self.exec_path, stat.S_IREAD | stat.S_IEXEC) stdout = open(self.stdout_path, 'w+') stderr = open(self.stderr_path, 'w+') # Start process self.logger.debug( 'Executing %s' '\n %s' '\n 1>%s' '\n 2>%s' '\n timeout: %s seconds', self.interpreter, self.exec_path, self.stdout_path, self.stderr_path, self.exec_timeout) self.proc = subprocess.Popen(self.exec_path, stdout=stdout, stderr=stderr, close_fds=True) self.pid = self.proc.pid self.start_time = time.time()
def on_DbMsr_NewMasterUp(self, message): """ Switch replication to a new master server @type message: scalarizr.messaging.Message @param message: DbMsr__NewMasterUp """ if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError( "DbMsr_NewMasterUp message for %s behaviour must have '%s' property and db_type '%s'" % BEHAVIOUR, BEHAVIOUR, BEHAVIOUR) if self.is_replication_master: LOG.debug('Skipping NewMasterUp. My replication role is master') return host = message.local_ip or message.remote_ip LOG.info("Switching replication to a new %s master %s" % (BEHAVIOUR, host)) bus.fire('before_%s_change_master' % BEHAVIOUR, host=host) self.redis_instances.init_as_slaves(self._storage_path, host) self.redis_instances.wait_for_sync() LOG.debug("Replication switched") bus.fire('%s_change_master' % BEHAVIOUR, host=host)
def _execute_one_script0(self, script): exc_info = None try: self.in_progress.append(script) if not script.start_time: script.start() script.wait() except: exc_info = sys.exc_info() if script.asynchronous: msg = 'Asynchronous script {0!r} error: {1}'.format( script.name, str(exc_info[1])) LOG.warn(msg, exc_info=exc_info) raise finally: script_result = script.get_result() if exc_info: with open(script.stderr_path, 'w+') as stderr_log: stderr_log.write(str(exc_info[1])) script_result['stderr'] = binascii.b2a_base64(str(exc_info[1])) script_result['return_code'] = 1 LOG.debug('sending exec script result message') self.send_message(Messages.EXEC_SCRIPT_RESULT, script_result, queue=Queues.LOG) self.in_progress.remove(script) if not exc_info \ and script_result['return_code'] != 0 \ and script.event_name == 'BeforeHostUp' \ and int(__node__['base'].get('abort_init_on_script_fail', False)): msg = ('Script {0} exited with code {1}, ' 'and the option to abort initialization when a Blocking BeforeHostUp Script fails was enabled. ' 'Update the script, or disable the option in the Advanced Tab.').format( script.name, script_result['return_code']) raise HandlerError(msg)
def on_host_init_response(self, message): with bus.initialization_op as op: with op.phase(self._phase_rabbitmq): with op.step(self._step_accept_scalr_conf): if not message.body.has_key("rabbitmq"): raise HandlerError("HostInitResponse message for RabbitMQ behaviour must have 'rabbitmq' property") rabbitmq_data = message.rabbitmq.copy() if not rabbitmq_data['password']: rabbitmq_data['password'] = cryptotool.pwgen(10) hostname = RABBIT_HOSTNAME_TPL % int(message.server_index) rabbitmq_data['server_index'] = message.server_index rabbitmq_data['hostname'] = hostname dns.ScalrHosts.set('127.0.0.1', hostname) with open('/etc/hostname', 'w') as f: f.write(hostname) system2(('hostname', '-F', '/etc/hostname')) volume_config = rabbitmq_data.pop('volume_config') volume_config['mpoint'] = DEFAULT_STORAGE_PATH rabbitmq_data['volume'] = storage2.volume(volume_config) __rabbitmq__.update(rabbitmq_data)
def handle_request(self, req_message, resp_message): def cleanup(): err = system2('nodetool -h localhost cleanup', shell=True)[2] if err: raise HandlerError('Cannot do cleanup: %s' % err) def repair(keyspace): err = system2('nodetool -h localhost repair %s' % keyspace, shell=True)[2] if err: raise HandlerError('Cannot do cleanup: %s' % err) keyspace_name = req_message.cassandra['keyspace'] new_rf = req_message.cassandra['rf'] try: rf = cassandra.cassandra_conf.get("Storage/Keyspaces/Keyspace[@Name='"+keyspace_name+"']/ReplicationFactor") except NoPathError: raise HandlerError('Keyspace %s does not exist or configuration file is broken' % keyspace_name) if not rf == new_rf: cassandra.cassandra_conf.set("Storage/Keyspaces/Keyspace[@Name='"+keyspace_name+"']/ReplicationFactor", new_rf) cassandra.write_config() cassandra.restart_service() cleanup() if rf < new_rf: repair(keyspace_name) resp_message.body.update(dict( status = 'ok' ))
def on_BeforeHostTerminate(self, *args): cassandra.start_service() err = system2('nodetool -h localhost decommission', shell=True)[2] if err: raise HandlerError('Cannot decommission node: %s' % err) wait_until(self._is_decommissioned, timeout=300, error_text="Node wasn't decommissioned in a reasonable time") cassandra.stop_service()
def _single_backup(db_name): dump_path = tmpdir + os.sep + db_name + '.sql' pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path) su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args] err = system2(su_args)[1] if err: raise HandlerError('Error while dumping database %s: %s' % (db_name, err)) # ? dumps.append(dump_path)
def __init__(self): self._logger = logging.getLogger(__name__) self._iptables = iptables if not self._iptables.enabled(): raise HandlerError('iptables is not installed. iptables is required for cassandra behaviour') bus.on("init", self.on_init)
def __init__(self): pl = bus.platform initd_script = "/etc/init.d/cassandra" if not os.path.exists(initd_script): raise HandlerError("Cannot find Cassandra init script at %s. Make sure that Cassandra is installed" % initd_script) pid_file = '/var/run/cassandra.pid' socks = [initdv2.SockParam(7000, conn_address = pl.get_private_ip(), timeout = 60)] initdv2.ParametrizedInitScript.__init__(self, 'cassandra', initd_script, pid_file, socks=socks)
def extract_json_attributes(chef_data): """ Extract json attributes dictionary from scalr formatted structure """ try: json_attributes = json.loads(chef_data.get('json_attributes') or "{}") except ValueError, e: raise HandlerError( "Chef attributes is not a valid JSON: {0}".format(e))
def on_ConvertVolume(self, message): try: if __node__['state'] != 'running': raise HandlerError('scalarizr is not in "running" state') old_volume = storage2.volume(__mysql__['volume']) new_volume = storage2.volume(message.volume) if old_volume.type != 'eph' or new_volume.type != 'lvm': raise HandlerError('%s to %s convertation unsupported.' % (old_volume.type, new_volume.type)) new_volume.ensure() __mysql__.update({'volume': new_volume}) except: e = sys.exc_info()[1] LOG.error('Volume convertation failed: %s' % e) self.send_message(MysqlMessages.CONVERT_VOLUME_RESULT, dict(status='error', last_error=str(e)))
def on_before_hello(self, message): try: rabbit_version = software.rabbitmq_software_info() except: raise HandlerError("Can't find rabbitmq on this server.") if rabbit_version.version < (2, 7, 0): self._logger.error("Unsupported RabbitMQ version. Assertion failed: %s >= 2.7.0", '.'.join(rabbit_version.version)) sys.exit(1)
def _https_config_exists(self): config_dir = os.path.dirname(self.api.app_inc_path) conf_path = os.path.join(config_dir, 'https.include') config = None try: config = Configuration('nginx') config.read(conf_path) except (Exception, BaseException), e: raise HandlerError('Cannot read/parse nginx main configuration file: %s' % str(e))
def start(self): # Check interpreter here, and not in __init__ # cause scripts can create sequences when previous script # installs interpreter for the next one if not os.path.exists( self.interpreter) and linux.os['family'] != 'Windows': raise HandlerError("Can't execute script '%s' cause " "interpreter '%s' not found" % (self.name, self.interpreter)) if not self.path: # Write script to disk, prepare execution exec_dir = os.path.dirname(self.exec_path) if not os.path.exists(exec_dir): os.makedirs(exec_dir) with open(self.exec_path, 'w') as fp: fp.write(self.body.encode('utf-8')) if not linux.os.windows_family: os.chmod( self.exec_path, stat.S_IREAD | stat.S_IRGRP | stat.S_IROTH | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH) stdout = open(self.stdout_path, 'w+') stderr = open(self.stderr_path, 'w+') if self.interpreter == 'powershell': command = [ 'powershell.exe', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'RemoteSigned', '-File', self.exec_path ] elif self.interpreter == 'cmd': command = ['cmd.exe', '/C', self.exec_path] else: command = [] if self.run_as and self.run_as != 'root': command = ['sudo', '-u', self.run_as] command += [self.exec_path] print 'command: ', command # Start process self.logger.debug( 'Executing %s' '\n %s' '\n 1>%s' '\n 2>%s' '\n timeout: %s seconds', self.interpreter, self.exec_path, self.stdout_path, self.stderr_path, self.exec_timeout) self.proc = subprocess.Popen(command, stdout=stdout, stderr=stderr, close_fds=linux.os['family'] != 'Windows', env=self.environ) self.pid = self.proc.pid self.start_time = time.time()
def on_before_host_up(self, msg): if not self._chef_data: return log = bus.init_op.logger if bus.init_op else LOG try: # Create client configuration if self._chef_data.get('server_url'): # Delete client.pem if os.path.exists(CLIENT_KEY_PATH): os.remove(CLIENT_KEY_PATH) chef_client = ChefClient( chef_server_url=self._chef_data['server_url'], json_attributes=self._with_json_attributes, node_name=self._chef_data['node_name'], validator_name=self._chef_data['validator_name'], validation_pem=self._chef_data['validator_key'], environment=self._chef_data['environment'], environment_variables=self._environ_variables, log_level=self._chef_data.get('log_level', 'auto')) try: chef_client.prepare() self.send_message('HostUpdate', dict(chef=self._chef_data)) chef_client.run() finally: chef_client.cleanup() daemonize = self._chef_data.get('daemonize') if daemonize and int(daemonize): log.info('Daemonizing chef-client') self.daemonize() elif self._chef_data.get('cookbook_url'): solo = ChefSolo( cookbook_url=self._chef_data['cookbook_url'], cookbook_url_type=self._chef_data['cookbook_url_type'], json_attributes=self._with_json_attributes, relative_path=self._chef_data.get('relative_path'), environment=self._environ_variables, ssh_private_key=self._chef_data.get('ssh_private_key'), binary_path=CHEF_SOLO_BIN) try: solo.prepare() solo.run() finally: solo.cleanup() else: raise HandlerError( 'Neither chef server nor cookbook url were specified') msg.chef = self._chef_data finally: self._chef_data = None
def daemonize(self): if linux.os.windows_family: self._logger.info('Starting chef-client service') try: win32serviceutil.StartService(WIN_SERVICE_NAME) except pywintypes.error, e: if e.args[0] == 1060: err = ( "Can't daemonize Chef cause 'chef-client', " "cause 'chef-client' is not a registered Windows Service.\n" "Most likely you haven't selected Chef Service option in Chef installer." ) raise HandlerError(err)
def on_RabbitMq_Reconfigure(self, message): try: if not 'running' == __node__['state']: raise HandlerError('Server is not in RUNNING state yet') if message.node_type != __rabbitmq__['node_type']: self._logger.info('Changing node type to %s' % message.node_type) disk_node = message.node_type == 'disk' cluster_nodes = self._get_cluster_nodes() nodes_to_cluster_with = [] for node in cluster_nodes: nodes_to_cluster_with.append(node.hostname) dns.ScalrHosts.set(node.ip, node.hostname) if nodes_to_cluster_with or disk_node: self_hostname = __rabbitmq__['hostname'] self.rabbitmq.change_node_type(self_hostname, nodes_to_cluster_with, disk_node) else: raise HandlerError('At least 1 disk node should' 'present in cluster') __rabbitmq__['node_type'] = message.node_type else: raise HandlerError('Node type is already %s' % message.node_type) msg_body = dict(status='ok', node_type=message.node_type) except: error = str(sys.exc_info()[1]) msg_body = dict(status='error', last_error=error) finally: self.send_message(RabbitMQMessages.RABBITMQ_RECONFIGURE_RESULT, msg_body)
def __init__(self): pid_file = None if disttool.is_redhat_based(): pid_file = "/var/run/memcached/memcached.pid" elif disttool.is_debian_based(): pid_file = "/var/run/memcached.pid" initd_script = '/etc/init.d/memcached' if not os.path.exists(initd_script): raise HandlerError("Cannot find Memcached init script at %s. Make sure that memcached is installed" % initd_script) initdv2.ParametrizedInitScript.__init__(self, 'memcached', initd_script, pid_file, socks=[initdv2.SockParam(11211)])
def on_host_init_response(self, message): log = bus.init_op.logger log.info('Accept Scalr configuration') if not message.body.has_key("rabbitmq"): raise HandlerError( "HostInitResponse message for RabbitMQ behaviour must have 'rabbitmq' property" ) rabbitmq_data = message.rabbitmq.copy() if not rabbitmq_data['password']: rabbitmq_data['password'] = cryptotool.pwgen(10) self.service.stop() self.cleanup_hosts_file('/') if os.path.exists(RABBITMQ_ENV_CFG_PATH): os.remove(RABBITMQ_ENV_CFG_PATH) if not os.path.isdir(DEFAULT_STORAGE_PATH): os.makedirs(DEFAULT_STORAGE_PATH) rabbitmq_user = pwd.getpwnam("rabbitmq") os.chown(DEFAULT_STORAGE_PATH, rabbitmq_user.pw_uid, rabbitmq_user.pw_gid) self._logger.info('Performing initial cluster reset') hostname = rabbitmq_svc.RABBIT_HOSTNAME_TPL % int(message.server_index) __rabbitmq__['hostname'] = hostname dns.ScalrHosts.set('127.0.0.1', hostname) self._prepare_env_config() self.service.start() self.rabbitmq.stop_app() self.rabbitmq.reset() self.service.stop() # Use RABBITMQ_NODENAME instead of setting actual hostname #with open('/etc/hostname', 'w') as f: # f.write(hostname) #system2(('hostname', '-F', '/etc/hostname')) volume_config = rabbitmq_data.pop('volume_config') volume_config['mpoint'] = DEFAULT_STORAGE_PATH rabbitmq_data['volume'] = storage2.volume(volume_config) __rabbitmq__.update(rabbitmq_data)
def check_runability(self): path_params = urlparse(self.path or '') if path_params.scheme in ('http', 'https'): if path_params.scheme == 'https' and with_httplib2: # we are using httplib2 for opening https url because it # makes ssl certificate validation and urlopen doesn't h = httplib2.Http() meta, self.body = h.request(self.path) if meta['status'].startswith('4') or meta['status'].startswith( '5'): raise HandlerError( "Can't download script from URL '%s'. Status code: %s" % (self.path, meta['status'])) else: try: response = urlopen(self.path) self.body = response.read() except: raise HandlerError( "Can't download script from URL '%s'. Reason: " "%s" % (self.path, sys.exc_info()[1])) self.path = None self.exec_path = self._generate_exec_path() if self.body or (self.path and not coreutils.is_binary(self.path)): self.interpreter = read_shebang(path=self.path, script=self.body) if linux.os['family'] == 'Windows' and self.body: # Erase first line with #! self.body = '\n'.join(self.body.splitlines()[1:]) elif self.path: self.interpreter = self.path if self.interpreter == 'powershell' \ and os.path.splitext(self.exec_path)[1] not in ('.ps1', '.psm1'): self.exec_path += '.ps1' elif self.interpreter == 'cmd' \ and os.path.splitext(self.exec_path)[1] not in ('.cmd', '.bat'): self.exec_path += '.bat' if self.path and not os.access(self.path, os.X_OK): msg = 'Path {0!r} is not executable'.format(self.path) raise HandlerError(msg) if linux.os['family'] == 'Windows' and self.run_as: raise HandlerError("Windows can't execute scripts remotely " \ "under user other than Administrator. " \ "Script '%s', given user: '******'" % (self.name, self.run_as)) if not self.interpreter: raise HandlerError( "Can't execute script '%s' cause it has no shebang.\n" "First line of the script should have the form of a shebang " "interpreter directive is as follows:\n" "#!interpreter [optional-arg]" % (self.name, )) if not os.path.exists( self.interpreter) and linux.os['family'] != 'Windows': raise HandlerError("Can't execute script '%s' cause " "interpreter '%s' not found" % (self.name, self.interpreter))
def __init__(self): if not software.whereis('rabbitmqctl'): raise HandlerError("Rabbitmqctl binary was not found. Check your installation.") bus.on("init", self.on_init) self._logger = logging.getLogger(__name__) self.rabbitmq = rabbitmq_svc.rabbitmq self.service = initdv2.lookup(BuiltinBehaviours.RABBITMQ) self._service_name = BEHAVIOUR self.on_reload() if 'ec2' == self.platform.name: self._logger.debug('Setting hostname_as_pubdns to 0') __ec2__ = __node__['ec2'] __ec2__['hostname_as_pubdns'] = 0
def on_RabbitMq_SetupControlPanel(self, message): try: if not 'running' == __node__['state']: raise HandlerError('Server is not in RUNNING state yet') try: self.service.stop() self.rabbitmq.enable_plugin(RABBITMQ_MGMT_PLUGIN_NAME) finally: self.service.start() panel_url = 'http://%s:55672/mgmt/' % self.platform.get_public_ip() msg_body = dict(status='ok', cpanel_url=panel_url) except: error = str(sys.exc_info()[1]) msg_body = dict(status='error', last_error=error) finally: self.send_message(RabbitMQMessages.RABBITMQ_SETUP_CONTROL_PANEL_RESULT, msg_body)
def on_RabbitMq_SetupControlPanel(self, message): try: if not 'running' == __node__['state']: raise HandlerError('Server is not in RUNNING state yet') try: self.service.stop('enable plugin') self.rabbitmq.enable_plugin(RABBITMQ_MGMT_PLUGIN_NAME) finally: self.service.start() rabbit_version = software.rabbitmq_software_info() panel_port = 55672 if rabbit_version.version <= (3, 0, 0) else 15672 msg_body = dict(status='ok', port=panel_port) except: error = str(sys.exc_info()[1]) msg_body = dict(status='error', last_error=error) finally: self.send_message(RabbitMQMessages.RABBITMQ_SETUP_CONTROL_PANEL_RESULT, msg_body)
def on_Mysql_CreatePmaUser(self, message): LOG.debug("on_Mysql_CreatePmaUser") assert message.pma_server_ip assert message.farm_role_id try: # Operation allowed only on Master server if not int(__mysql__['replication_master']): msg = 'Cannot add pma user on slave. ' \ 'It should be a Master server' raise HandlerError(msg) pma_server_ip = message.pma_server_ip farm_role_id = message.farm_role_id pma_password = cryptotool.pwgen(20) LOG.info("Adding phpMyAdmin system user") if self.root_client.user_exists(__mysql__['pma_user'], pma_server_ip): LOG.info('PhpMyAdmin system user already exists. Removing user.') self.root_client.remove_user(__mysql__['pma_user'], pma_server_ip) self.root_client.create_user(__mysql__['pma_user'], pma_server_ip, pma_password, privileges=None) LOG.info('PhpMyAdmin system user successfully added') # Notify Scalr self.send_message(MysqlMessages.CREATE_PMA_USER_RESULT, dict( status = 'ok', pma_user = __mysql__['pma_user'], pma_password = pma_password, farm_role_id = farm_role_id, )) except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error self.send_message(MysqlMessages.CREATE_PMA_USER_RESULT, dict( status = 'error', last_error = str(e).strip(), farm_role_id = farm_role_id ))
def rebundle(self): now = time.strftime('%Y%m%d%H%M%S') if len(self._role_name) > self.IMAGE_NAME_MAXLEN - len(now) - 1: image_name = self._role_name[0:16] + '--' + now else: image_name = self._role_name + "-" + now pl = bus.platform conn = pl.new_cloudstack_conn() try: root_vol = filter( lambda x: x.type == 'ROOT', conn.listVolumes(virtualMachineId=pl.get_instance_id()))[0] except IndexError: raise HandlerError( "Can't find root volume for virtual machine %s" % pl.get_instance_id()) instance = conn.listVirtualMachines(id=pl.get_instance_id())[0] try: # Create snapshot LOG.info('Creating ROOT volume snapshot (volume: %s)', root_vol.id) snap = voltool.create_snapshot(conn, root_vol.id, wait_completion=True, logger=LOG) LOG.info('ROOT volume snapshot created (snapshot: %s)', snap.id) LOG.info('Creating image') image = conn.createTemplate( image_name, image_name, self.get_os_type_id(conn), snapshotId=snap.id, passwordEnabled=instance.passwordenabled) LOG.info('Image created (template: %s)', image.id) return image.id finally: pass
def prepare(self): if self.cookbook_url_type == 'git': downloader = deploy.GitSource(self.cookbook_url, ssh_private_key=self.ssh_private_key) downloader.update(self.temp_dir) elif self.cookbook_url_type == 'http': downloader = deploy.HttpSource(self.cookbook_url) downloader.update(self.temp_dir) else: raise HandlerError('Unknown cookbook source type: %s' % self.cookbook_url_type) cookbook_path = os.path.join(self.temp_dir, self.relative_path or '') with open(self.chef_solo_cfg_path, 'w') as f: f.write( SOLO_CONF_TPL.format(cookbook_path, self.temp_dir, self.log_level)) with open(self.attrs_path, 'w') as f: json.dump(self.json_attributes, f)
def tarzip_image(self, prefix, file, path): self._logger.info('Tarring image') tar_file = '%s.tar.gz' % os.path.join(path, prefix) outfile = open(tar_file, 'wb') file_path = self.get_file_path(file) tar_cmd = ['tar', 'c', '-S'] if file_path: tar_cmd.append('-C') tar_cmd.append(file_path) tar_cmd.append(self.get_relative_filename(file)) else: tar_cmd.append(file) p1 = Popen(tar_cmd, stdout=PIPE) p2 = Popen(['gzip'], stdin=p1.stdout, stdout=outfile) p2.communicate() outfile.close if os.path.getsize(tar_file) <= 0: raise HandlerError('Could not tar image') return tar_file
def __init__(self, **kwds): ''' Variant A: Script(name='AppPreStart', body='#!/usr/bin/python ...', asynchronous=True) Variant B: Script(id=43432234343, name='AppPreStart', pid=12145, interpreter='/usr/bin/python', start_time=4342424324, asynchronous=True) ''' for key, value in kwds.items(): setattr(self, key, value) assert self.name, '`name` required' assert self.exec_timeout, '`exec_timeout` required' if self.name and self.body: self.id = str(time.time()) interpreter = read_shebang(script=self.body) if not interpreter: raise HandlerError( "Can't execute script '%s' cause it hasn't shebang.\n" "First line of the script should have the form of a shebang " "interpreter directive is as follows:\n" "#!interpreter [optional-arg]" % (self.name, )) self.interpreter = interpreter else: assert self.id, '`id` required' assert self.pid, '`pid` required' assert self.start_time, '`start_time` required' if self.interpreter: self.interpreter = split_strip(self.interpreter)[0] self.logger = logging.getLogger('%s.%s' % (__name__, self.id)) self.exec_path = os.path.join(exec_dir_prefix + self.id, self.name) if self.exec_timeout: self.exec_timeout = int(self.exec_timeout) args = (self.name, self.event_name, self.role_name, self.id) self.stdout_path = os.path.join(logs_dir, '%s.%s.%s.%s-out.log' % args) self.stderr_path = os.path.join(logs_dir, '%s.%s.%s.%s-err.log' % args)