def _deploy_security_configuration(): ctx.logger.info('Deploying REST Security configuration file...') # Generating random hash salt and secret key security_configuration = { 'hash_salt': base64.b64encode(os.urandom(32)), 'secret_key': base64.b64encode(os.urandom(32)), 'encoding_alphabet': _random_alphanumeric(), 'encoding_block_size': 24, 'encoding_min_length': 5 } # Pre-creating paths so permissions fix can work correctly # in mgmtworker for path in utils.MANAGER_RESOURCES_SNAPSHOT_PATHS: utils.mkdir(path) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, utils.MANAGER_RESOURCES_HOME) utils.sudo(['ls', '-la', '/opt/manager']) current_props = runtime_props['security_configuration'] current_props.update(security_configuration) runtime_props['security_configuration'] = current_props for key in ['admin_username', 'admin_password']: security_configuration[key] = current_props[key] fd, path = tempfile.mkstemp() os.close(fd) with open(path, 'w') as f: json.dump(security_configuration, f) rest_security_path = join(runtime_props['home_dir'], 'rest-security.conf') utils.move(path, rest_security_path) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, rest_security_path) utils.chmod('g+r', rest_security_path)
def _create_postgres_pass_file(host, db_name, username, password): pgpass_path = '/root/.pgpass' ctx.logger.info('Creating postgresql pgpass file: {0}'.format(pgpass_path)) postgresql_default_port = 5432 pgpass_content = '{host}:{port}:{db_name}:{user}:{password}'.format( host=host, port=postgresql_default_port, db_name=db_name, user=username, password=password) # .pgpass file used by mgmtworker in snapshot workflow, # and need to be under th home directory of the user who run the snapshot # (currently root) if os.path.isfile(pgpass_path): ctx.logger.debug('Deleting {0} file..'.format(pgpass_path)) os.remove(pgpass_path) with tempfile.NamedTemporaryFile(delete=False) as temp_file: temp_file.write(pgpass_content) temp_file.flush() utils.chmod('0600', temp_file.name) utils.move(source=temp_file.name, destination=pgpass_path, rename_only=True) ctx.logger.debug( 'Postgresql pass file {0} created'.format(pgpass_path))
def _install_rabbitmq(): erlang_rpm_source_url = ctx.node.properties['erlang_rpm_source_url'] rabbitmq_rpm_source_url = ctx.node.properties['rabbitmq_rpm_source_url'] # TODO: maybe we don't need this env var os.putenv('RABBITMQ_FD_LIMIT', str(ctx.node.properties['rabbitmq_fd_limit'])) rabbitmq_log_path = '/var/log/cloudify/rabbitmq' rabbitmq_username = ctx.node.properties['rabbitmq_username'] rabbitmq_password = ctx.node.properties['rabbitmq_password'] rabbitmq_cert_public = ctx.node.properties['rabbitmq_cert_public'] rabbitmq_ssl_enabled = ctx.node.properties['rabbitmq_ssl_enabled'] rabbitmq_cert_private = ctx.node.properties['rabbitmq_cert_private'] ctx.logger.info('Installing RabbitMQ...') utils.set_selinux_permissive() utils.copy_notice('rabbitmq') utils.mkdir(rabbitmq_log_path) utils.yum_install(erlang_rpm_source_url) utils.yum_install(rabbitmq_rpm_source_url) utils.logrotate('rabbitmq') utils.deploy_blueprint_resource( '{0}/kill-rabbit'.format(CONFIG_PATH), '/usr/local/bin/kill-rabbit') utils.chmod('500', '/usr/local/bin/kill-rabbit') utils.systemd.configure('rabbitmq') ctx.logger.info('Configuring File Descriptors Limit...') utils.deploy_blueprint_resource( '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH), '/etc/security/limits.d/rabbitmq.conf') utils.systemd.systemctl('daemon-reload') utils.chown('rabbitmq', 'rabbitmq', rabbitmq_log_path) utils.systemd.start('cloudify-rabbitmq') time.sleep(10) utils.wait_for_port(5672) ctx.logger.info('Enabling RabbitMQ Plugins...') # Occasional timing issues with rabbitmq starting have resulted in # failures when first trying to enable plugins utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'], retries=5) utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5) _clear_guest_permissions_if_guest_exists() _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password) _set_security( rabbitmq_ssl_enabled, rabbitmq_cert_private, rabbitmq_cert_public) utils.systemd.stop('cloudify-rabbitmq', retries=5)
def _create_postgres_pass_file(host, db_name, username, password): pgpass_path = '/root/.pgpass' ctx.logger.info('Creating postgresql pgpass file: {0}'.format( pgpass_path)) postgresql_default_port = 5432 pgpass_content = '{host}:{port}:{db_name}:{user}:{password}'.format( host=host, port=postgresql_default_port, db_name=db_name, user=username, password=password ) # .pgpass file used by mgmtworker in snapshot workflow, # and need to be under th home directory of the user who run the snapshot # (currently root) if os.path.isfile(pgpass_path): ctx.logger.debug('Deleting {0} file..'.format( pgpass_path )) os.remove(pgpass_path) with tempfile.NamedTemporaryFile(delete=False) as temp_file: temp_file.write(pgpass_content) temp_file.flush() utils.chmod('0600', temp_file.name) utils.move(source=temp_file.name, destination=pgpass_path, rename_only=True) ctx.logger.debug('Postgresql pass file {0} created'.format( pgpass_path))
def saveUrlImpl(self, pathPrefix, url, retries): r = Network.get(url, retries=retries) if r is None: return None # TODO: add other judgement for http response if 'Content-Type' not in r.headers.keys(): print('Failed to save', url) return None contentType = r.headers['Content-Type'] if 'image/jpeg' == contentType: pathname = '{}.jpg'.format(pathPrefix) elif 'image/png' == contentType: pathname = '{}.png'.format(pathPrefix) elif 'image/gif' == contentType: pathname = '{}.gif'.format(pathPrefix) elif 'audio/mpeg' == contentType: pathname = '{}.mp3'.format(pathPrefix) else: print('Not support', contentType, 'for', url) return None with open(pathname, 'wb') as fp: fp.write(r.content) chmod(pathname) print('Downloaded:', pathname) return pathname
def saveConfig(self, config, filename): """saves config to filename""" with open(filename, "wb") as f: chmod(filename, 0600) f.write("version: %i \n" % CONF_VERSION) for section in config.iterkeys(): f.write('\n%s - "%s":\n' % (section, config[section]["desc"])) for option, data in config[section].iteritems(): if option in ("desc", "outline"): continue if isinstance(data["value"], list): value = "[ \n" for x in data["value"]: value += "\t\t" + str(x) + ",\n" value += "\t\t]\n" else: if type(data["value"]) in (str, unicode): value = data["value"] + "\n" else: value = str(data["value"]) + "\n" try: f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value)) except UnicodeEncodeError: f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value.encode("utf8")))
def _install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] if not utils.resource_factory.local_resource_exists(stage_source_url): ctx.logger.info('Stage package not found in manager resources ' 'package. Stage will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME) utils.untar(nodejs, NODEJS_DIR) utils.remove(nodejs) ctx.logger.info('Installing Cloudify Stage (UI)...') stage_tar = utils.download_cloudify_resource(stage_source_url, SERVICE_NAME) if 'community' in stage_tar: ctx.logger.info('Community edition') ctx.instance.runtime_properties['community_mode'] = '-mode community' else: ctx.instance.runtime_properties['community_mode'] = '' utils.untar(stage_tar, HOME_DIR) utils.remove(stage_tar) ctx.logger.info('Fixing permissions...') utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR) utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR) utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR) utils.deploy_sudo_command_script( 'restore-snapshot.py', 'Restore stage directories from a snapshot path', component=SERVICE_NAME, allow_as=STAGE_USER) utils.chmod('a+rx', '/opt/cloudify/stage/restore-snapshot.py') utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, STAGE_USER]) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) backend_dir = join(HOME_DIR, 'backend') npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call('cd {0}; {1} run db-migrate'.format( backend_dir, npm_path), shell=True)
def execute_before_bootstrap(): exec_paths = ctx_properties['execute_before_bootstrap'] for path in exec_paths: # TODO: Upon moving to Python 3, convert to urllib2.urlparse if '://' in path and path.split('://', 1)[0] in ('http', 'https'): path = utils.download_file(path) utils.chmod('744', path) utils.run(path)
def execute_before_bootstrap(): exec_paths = ctx_properties['execute_before_bootstrap'] for path in exec_paths: # TODO: Upon moving to Python 3, convert to urllib2.urlparse if '://' in path and path.split('://', 1)[0] in ('http', 'https'): path = utils.download_file(path) utils.chmod('744', path) utils.run(path)
def deploy_script(script_name): config_file_temp_destination = join(tempfile.gettempdir(), script_name) ctx.download_resource_and_render( join('components', 'manager-ip-setter', 'scripts', script_name), config_file_temp_destination) remote_script_path = join(MANAGER_IP_SETTER_DIR, script_name) utils.move(config_file_temp_destination, remote_script_path) utils.chmod('+x', remote_script_path) utils.systemd.configure(MANAGER_IP_SETTER_SERVICE_NAME)
def configure_script(script_name, description): utils.deploy_sudo_command_script( script_name, description, component=SERVICE_NAME, allow_as=STAGE_USER, ) utils.chmod('a+rx', '/opt/cloudify/stage/' + script_name) utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, STAGE_USER])
def install_mgmtworker(): riemann_dir = '/opt/riemann' management_worker_rpm_source_url = \ ctx_properties['management_worker_rpm_source_url'] runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip() # Fix possible injections in json of rabbit credentials # See json.org for string spec for key in ['rabbitmq_username', 'rabbitmq_password']: # We will not escape newlines or other control characters, # we will accept them breaking # things noisily, e.g. on newlines and backspaces. # TODO: add: # sed 's/"/\\"/' | sed 's/\\/\\\\/' | sed s-/-\\/- | sed 's/\t/\\t/' runtime_props[key] = ctx_properties[key] utils.set_service_as_cloudify_service(runtime_props) ctx.logger.info('Installing Management Worker...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.mkdir(join(HOME_DIR, 'config')) utils.mkdir(join(HOME_DIR, 'work')) utils.mkdir(LOG_DIR) utils.mkdir(riemann_dir) mgmtworker_venv = join(HOME_DIR, 'env') # used to run the sanity check runtime_props['python_executable'] = join(mgmtworker_venv, 'bin', 'python') # this create the mgmtworker_venv and installs the relevant # modules into it. utils.yum_install(management_worker_rpm_source_url, service_name=SERVICE_NAME) _install_optional(mgmtworker_venv) # Add certificate and select port, as applicable runtime_props['broker_cert_path'] = utils.INTERNAL_CA_CERT_PATH # Use SSL port runtime_props['broker_port'] = AMQP_SSL_PORT utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, HOME_DIR) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, LOG_DIR) # Changing perms on workdir and venv in case they are put outside homedir utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, mgmtworker_venv) # Prepare riemann dir. We will change the owner to riemann later, but the # management worker will still need access to it utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, riemann_dir) utils.chmod('770', riemann_dir) ctx.logger.info("Using broker port: {0}".format( ctx.instance.runtime_properties['broker_port']))
def install_mgmtworker(): riemann_dir = '/opt/riemann' management_worker_rpm_source_url = \ ctx_properties['management_worker_rpm_source_url'] runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip() # Fix possible injections in json of rabbit credentials # See json.org for string spec for key in ['rabbitmq_username', 'rabbitmq_password']: # We will not escape newlines or other control characters, # we will accept them breaking # things noisily, e.g. on newlines and backspaces. # TODO: add: # sed 's/"/\\"/' | sed 's/\\/\\\\/' | sed s-/-\\/- | sed 's/\t/\\t/' runtime_props[key] = ctx_properties[key] runtime_props['rabbitmq_ssl_enabled'] = True utils.set_service_as_cloudify_service(runtime_props) ctx.logger.info('Installing Management Worker...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.mkdir(join(HOME_DIR, 'config')) utils.mkdir(join(HOME_DIR, 'work')) utils.mkdir(LOG_DIR) utils.mkdir(riemann_dir) mgmtworker_venv = join(HOME_DIR, 'env') # this create the mgmtworker_venv and installs the relevant # modules into it. utils.yum_install(management_worker_rpm_source_url, service_name=SERVICE_NAME) _install_optional(mgmtworker_venv) # Add certificate and select port, as applicable runtime_props['broker_cert_path'] = utils.INTERNAL_CERT_PATH # Use SSL port runtime_props['broker_port'] = AMQP_SSL_PORT utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, HOME_DIR) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, LOG_DIR) # Changing perms on workdir and venv in case they are put outside homedir utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, mgmtworker_venv) # Prepare riemann dir. We will change the owner to riemann later, but the # management worker will still need access to it utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, riemann_dir) utils.chmod('770', riemann_dir) ctx.logger.info("Using broker port: {0}".format( ctx.instance.runtime_properties['broker_port']))
def deploy_utils(): temp_destination = join(tempfile.gettempdir(), 'utils.py') ctx.download_resource_and_render( join('components', 'utils.py'), temp_destination, ) utils_path = join(MANAGER_IP_SETTER_DIR, 'utils.py') utils.move(temp_destination, utils_path) utils.chmod('550', utils_path) utils.chown('root', utils.CLOUDIFY_GROUP, utils_path)
def deploy_utils(): temp_destination = join(tempfile.gettempdir(), 'utils.py') ctx.download_resource_and_render( join('components', 'utils.py'), temp_destination, ) utils_path = join(MANAGER_IP_SETTER_DIR, 'utils.py') utils.move(temp_destination, utils_path) utils.chmod('550', utils_path) utils.chown('root', utils.CLOUDIFY_GROUP, utils_path)
def _install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] if not utils.resource_factory.local_resource_exists(stage_source_url): ctx.logger.info('Stage package not found in manager resources ' 'package. Stage will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME) utils.untar(nodejs, NODEJS_DIR) utils.remove(nodejs) ctx.logger.info('Installing Cloudify Stage (UI)...') stage_tar = utils.download_cloudify_resource(stage_source_url, SERVICE_NAME) utils.untar(stage_tar, HOME_DIR) utils.remove(stage_tar) ctx.logger.info('Fixing permissions...') utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR) utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR) utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR) utils.deploy_sudo_command_script( 'restore-snapshot.py', 'Restore stage directories from a snapshot path', component=SERVICE_NAME, allow_as=STAGE_USER) utils.chmod('a+rx', '/opt/cloudify/stage/restore-snapshot.py') utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) backend_dir = join(HOME_DIR, 'backend') npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call( 'cd {0}; {1} run db-migrate'.format(backend_dir, npm_path), shell=True)
def _create_default_db(db_name, username, password): ctx.logger.info('Creating default postgresql database: {0}...'.format( db_name)) ps_config_source = 'components/postgresql/config/create_default_db.sh' ps_config_destination = join(tempfile.gettempdir(), 'create_default_db.sh') ctx.download_resource(source=ps_config_source, destination=ps_config_destination) utils.chmod('+x', ps_config_destination) # TODO: Can't we use a rest call here? Is there such a thing? utils.sudo('su - postgres -c "{cmd} {db} {user} {password}"' .format(cmd=ps_config_destination, db=db_name, user=username, password=password))
def prepare_snapshot_permissions(): # TODO: See if all of this is necessary pgpass_location = '/root/.pgpass' destination = join(utils.CLOUDIFY_HOME_DIR, '.pgpass') utils.chmod('400', pgpass_location) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, pgpass_location) utils.sudo(['mv', pgpass_location, destination]) utils.sudo(['chgrp', CLOUDIFY_GROUP, '/opt/manager']) utils.sudo(['chmod', 'g+rw', '/opt/manager']) utils.sudo(['chgrp', '-R', CLOUDIFY_GROUP, utils.SSL_CERTS_TARGET_DIR]) utils.sudo(['chgrp', CLOUDIFY_GROUP, dirname(utils.SSL_CERTS_TARGET_DIR)]) utils.sudo(['chmod', '-R', 'g+rw', utils.SSL_CERTS_TARGET_DIR]) utils.sudo(['chmod', 'g+rw', dirname(utils.SSL_CERTS_TARGET_DIR)])
def getComplexImage(self): content = self.getHtml() path = OutputPath.getDataPath('sku-{}-complex'.format(self.skuid), 'html') with open(path, 'w') as fp: fp.write(content) chmod(path) return ImageKit.fromHtml(path, pageSize=(80, 150))
def _create_default_db(db_name, username, password): ctx.logger.info( 'Creating default postgresql database: {0}...'.format(db_name)) ps_config_source = 'components/postgresql/config/create_default_db.sh' ps_config_destination = join(tempfile.gettempdir(), 'create_default_db.sh') ctx.download_resource(source=ps_config_source, destination=ps_config_destination) utils.chmod('+x', ps_config_destination) # TODO: Can't we use a rest call here? Is there such a thing? utils.sudo('su - postgres -c "{cmd} {db} {user} {password}"'.format( cmd=ps_config_destination, db=db_name, user=username, password=password))
def prepare_snapshot_permissions(): # TODO: See if all of this is necessary pgpass_location = '/root/.pgpass' destination = join(utils.CLOUDIFY_HOME_DIR, '.pgpass') utils.chmod('400', pgpass_location) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, pgpass_location) utils.sudo(['mv', pgpass_location, destination]) utils.sudo(['chgrp', CLOUDIFY_GROUP, '/opt/manager']) utils.sudo(['chmod', 'g+rw', '/opt/manager']) utils.sudo(['chgrp', '-R', CLOUDIFY_GROUP, utils.SSL_CERTS_TARGET_DIR]) utils.sudo([ 'chgrp', CLOUDIFY_GROUP, dirname(utils.SSL_CERTS_TARGET_DIR)]) utils.sudo(['chmod', '-R', 'g+rw', utils.SSL_CERTS_TARGET_DIR]) utils.sudo(['chmod', 'g+rw', dirname(utils.SSL_CERTS_TARGET_DIR)])
def transfer(sources=list(), destination="", doc_id="", rename=True): # TODO: documentation for transfer function """ This function create the asset user path of a particular asset. :param doc_id: The asset code. :type doc_id: str :returns: str/bool -- Return the created path else False. **Example:** >>> createWorkspace ( doc_id = "prod_chr_mickey_mod_a" ) >>> '/homeworks/users/jdoe/projects/prod/chr/mickey/mod/a' """ # Check the sources type is a list if isinstance(sources, str): sources = list([sources]) files = dict() # Iterate over the file to transfer for src in sources: # Check if the source file exists if os.path.exists(src): # TODO: Make it simpler # Create the destination path basename = os.path.basename(src) filename = basename.replace(basename.split(".")[0], doc_id) # Set filename as key value for source file files[src] = os.path.join(destination, filename) else: print "Warning: %s doesn't exist" % src # Set the permission file utils.chmod(destination, 755) # Iterate over files for fil in files: dirname = os.path.dirname(files[fil]) if not os.path.exists(dirname): os.makedirs(dirname) utils.cp(fil, files[fil])
def configure_logstash(): logstash_conf_path = '/etc/logstash/conf.d' runtime_properties = ctx.instance.runtime_properties rabbitmq_username = runtime_properties.get('rabbitmq_username') rabbitmq_password = runtime_properties.get('rabbitmq_password') # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Deploying Logstash configuration...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file( 'sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.debug('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def saveGetUrlImpl(self, pathname, url, force=False): if not force and self.isLocal and os.path.exists(pathname): return 1 r = Network.get(url) if r is None: return -1 # TODO: add other judgement for http response with open(pathname, 'wb') as fp: fp.write(r.content) chmod(pathname) return 0
def _install_composer(): composer_source_url = ctx_properties['composer_tar_source_url'] if not utils.resource_factory.local_resource_exists(composer_source_url): ctx.logger.info('Composer package not found in manager resources ' 'package. Composer will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(COMPOSER_USER, COMPOSER_GROUP, HOME_DIR) # adding cfyuser to the composer group so that its files are r/w for # replication and snapshots (restart of mgmtworker necessary for change # to take effect) utils.sudo(['usermod', '-aG', COMPOSER_GROUP, utils.CLOUDIFY_USER]) # This makes sure that the composer folders will be writable after # snapshot restore utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, COMPOSER_USER]) utils.systemd.restart('mgmtworker') ctx.logger.info('Installing Cloudify Composer...') composer_tar = utils.download_cloudify_resource(composer_source_url, SERVICE_NAME) utils.untar(composer_tar, HOME_DIR) utils.remove(composer_tar) ctx.logger.info('Fixing permissions...') utils.chown(COMPOSER_USER, COMPOSER_GROUP, HOME_DIR) utils.chown(COMPOSER_USER, COMPOSER_GROUP, LOG_DIR) utils.chmod('g+w', CONF_DIR) utils.chmod('g+w', dirname(CONF_DIR)) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call('cd {}; {} run db-migrate'.format( HOME_DIR, npm_path), shell=True)
def saveImage(self, path, skuid): url = self.getImageUrl(skuid) if url is None: return False r = requests.get(url) if 200 != r.status_code: print 'Unable to get image data for "', skuid, '" with an error (', r.status_code, '):\n', r.text return False with codecs.open(path, 'wb') as fp: fp.write(r.content) chmod(path) return True
def saveHttpDataImpl(self, pathname, url, host, force=False): if not force and self.isLocal and os.path.exists(pathname): return 1 if None == host: start = url.find('//') + 2 end = url[start:].find('/') host = url[start:start+end] url = url[start+end:] for i in range(0, 3): conn = httplib.HTTPConnection(host, timeout=10) try: conn.request("GET", url) res = conn.getresponse() if 200 != res.status: print res.status, res.reason continue data = res.read() except Exception: print 'Timeout, try it again. NO. ', i+1 # Sleep a while time.sleep(30 * i) continue finally: conn.close() fp = open(pathname, 'w') fp.write(data) fp.close() chmod(pathname) return 0 return -1
def install_consul(): consul_binary = join(HOME_DIR, 'consul') utils.mkdir(dirname(consul_binary)) utils.mkdir(CONFIG_DIR) consul_package = \ utils.download_cloudify_resource(ctx_properties['consul_package_url'], SERVICE_NAME) temp_dir = tempfile.mkdtemp() try: with zipfile.ZipFile(consul_package) as consul_archive: consul_archive.extractall(temp_dir) utils.move(join(temp_dir, 'consul'), consul_binary) utils.chmod('+x', consul_binary) finally: utils.remove(temp_dir)
def install_consul(): consul_binary = join(HOME_DIR, 'consul') utils.mkdir(dirname(consul_binary)) utils.mkdir(CONFIG_DIR) consul_package = \ utils.download_cloudify_resource(ctx_properties['consul_package_url'], SERVICE_NAME) temp_dir = tempfile.mkdtemp() try: with zipfile.ZipFile(consul_package) as consul_archive: consul_archive.extractall(temp_dir) utils.move(join(temp_dir, 'consul'), consul_binary) utils.chmod('+x', consul_binary) finally: utils.remove(temp_dir)
def install_consul(): consul_binary = '/opt/cloudify/consul/consul' consul_config_dir = '/etc/consul.d' utils.mkdir(dirname(consul_binary)) utils.mkdir(consul_config_dir) consul_package = \ utils.download_cloudify_resource(ctx_properties['consul_package_url'], CONSUL_SERVICE_NAME) temp_dir = tempfile.mkdtemp() try: with zipfile.ZipFile(consul_package) as consul_archive: consul_archive.extractall(temp_dir) utils.move(join(temp_dir, 'consul'), consul_binary) utils.chmod('+x', consul_binary) finally: utils.remove(temp_dir)
def install_consul(): consul_binary = '/opt/cloudify/consul/consul' consul_config_dir = '/etc/consul.d' utils.mkdir(dirname(consul_binary)) utils.mkdir(consul_config_dir) consul_package = \ utils.download_cloudify_resource(ctx_properties['consul_package_url'], CONSUL_SERVICE_NAME) temp_dir = tempfile.mkdtemp() try: with zipfile.ZipFile(consul_package) as consul_archive: consul_archive.extractall(temp_dir) utils.move(join(temp_dir, 'consul'), consul_binary) utils.chmod('+x', consul_binary) finally: utils.remove(temp_dir)
def copy_start_script(): try: with open('/opt/manager/rest-security.conf') as security_config_file: security_config_content = security_config_file.read() security_config = json.loads(security_config_content) params = { 'username': security_config['admin_username'], 'password': security_config['admin_password'] } script_name = 'config_local_cfy.sh' script_destination = join(utils.get_exec_tempdir(), script_name) ctx.download_resource_and_render(join(CONFIG_PATH, script_name), script_destination, params) utils.sudo(['mv', script_destination, join(utils.CLOUDIFY_HOME_DIR, script_name)]) utils.chmod('+x', join(utils.CLOUDIFY_HOME_DIR, script_name)) except Exception as ex: ctx.logger.warn('Failed to deploy local cli config script. ' 'Error: {0}'.format(ex))
def _create_postgres_pass_file(host, db_name, username, password): pgpass_path = '/root/.pgpass' ctx.logger.info('Creating postgresql pgpass file: {0}'.format(pgpass_path)) postgresql_default_port = 5432 pgpass_content = '{host}:{port}:{db_name}:{user}:{password}'.format( host=host, port=postgresql_default_port, db_name=db_name, user=username, password=password) # .pgpass file used by mgmtworker in snapshot workflow, # and will be moved and have correct ownership assigned by the # management worker if os.path.isfile(pgpass_path): ctx.logger.debug('Deleting {0} file..'.format(pgpass_path)) os.remove(pgpass_path) with tempfile.NamedTemporaryFile(delete=False) as temp_file: temp_file.write(pgpass_content) utils.chmod('0600', temp_file.name) utils.move(source=temp_file.name, destination=pgpass_path, rename_only=True) ctx.logger.debug('Postgresql pass file {0} created'.format(pgpass_path))
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ['ES_ENDPOINT_IP'] elasticsearch_props = utils.ctx_factory.get('elasticsearch') ctx.instance.runtime_properties['es_endpoint_port'] = \ elasticsearch_props['es_endpoint_port'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props['rabbitmq_username'] ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props['rabbitmq_password'] # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file( 'sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ['ES_ENDPOINT_IP'] elasticsearch_props = utils.ctx_factory.get('elasticsearch') ctx.instance.runtime_properties['es_endpoint_port'] = \ elasticsearch_props['es_endpoint_port'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props['rabbitmq_username'] ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props['rabbitmq_password'] # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash configuration...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file('sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.debug('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def create_sudoers_file_and_disable_sudo_requiretty(): utils.sudo(['touch', utils.CLOUDIFY_SUDOERS_FILE]) utils.chmod('440', utils.CLOUDIFY_SUDOERS_FILE) entry = 'Defaults:{user} !requiretty'.format(user=utils.CLOUDIFY_USER) description = 'Disable sudo requiretty for {0}'.format(utils.CLOUDIFY_USER) utils.add_entry_to_sudoers(entry, description)
def create_sudoers_file_and_disable_sudo_requiretty(): utils.sudo(['touch', utils.CLOUDIFY_SUDOERS_FILE]) utils.chmod('440', utils.CLOUDIFY_SUDOERS_FILE) entry = 'Defaults:{user} !requiretty'.format(user=utils.CLOUDIFY_USER) description = 'Disable sudo requiretty for {0}'.format(utils.CLOUDIFY_USER) utils.add_entry_to_sudoers(entry, description)