def create_mount_volumes(self): fmt = 'Enabling mount volumes for cluster {} (stack id {})' logger.info(fmt.format(self.dns_address, self.aws_stack_id)) import enable_mount_volumes # fabric spams to stdout, which causes problems with launch_ccm_cluster. # force total redirect to stderr: enable_mount_volumes.main(self.aws_stack_id, stdout=sys.stderr)
def create_mount_volumes(self): fmt = 'Enabling mount volumes for cluster {} (stack id {})' logger.info(fmt.format(self.dns_address, self.aws_stack_id)) import enable_mount_volumes # fabric spams to stdout, which causes problems with launch_ccm_cluster. # force total redirect to stderr: enable_mount_volumes.main(self.aws_stack_id, stdout=sys.stderr)
def _start(self, config): is_17_cluster = config.ccm_channel in self._DCOS_17_CHANNELS if is_17_cluster: hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos' elif config.cf_template.startswith('ee.'): hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos-enterprise' else: hostrepo = 's3-us-west-2.amazonaws.com/downloads.dcos.io/dcos' template_url = 'https://{}/{}/cloudformation/{}'.format( hostrepo, config.ccm_channel, config.cf_template) if config.template_url: template_url = config.template_url cluster_name = config.name_prefix + self._rand_str(8) payload = { 'template_url': template_url, 'name': cluster_name, 'cluster_desc': config.description, 'time': config.duration_mins, 'private_agents': str(config.private_agents), 'public_agents': str(config.public_agents), 'pre_1_8_cluster': is_17_cluster, 'adminlocation': config.admin_location, 'cloud_provider': config.cloud_provider, 'region': config.aws_region } logger.info('''Launching cluster: name={} agents={} private/{} public duration={} minutes mountvols={} permissions={} channel={} template={}'''.format(cluster_name, config.private_agents, config.public_agents, config.duration_mins, config.mount_volumes, config.permissions, config.ccm_channel, config.cf_template)) response = self._query_http('POST', self._CCM_PATH, request_json_payload=payload) if not response: raise Exception('CCM cluster creation request failed') response_content = response.read().decode('utf-8') response_json = json.loads(response_content) logger.info('Launch response:\n{}'.format( pprint.pformat(response_json))) cluster_id = int(response_json.get('id', 0)) if not cluster_id: raise Exception( 'No Cluster ID returned in cluster creation response: {}'. format(response_content)) stack_id = response_json.get('stack_id', '') if not stack_id: raise Exception( 'No Stack ID returned in cluster creation response: {}'.format( response_content)) cluster_info = self.wait_for_status(cluster_id, 'CREATING', 'RUNNING', config.start_timeout_mins) if not cluster_info: raise Exception('CCM cluster creation failed or timed out') dns_address = cluster_info.get('DnsAddress', '') if not dns_address: raise Exception( 'CCM cluster_info is missing DnsAddress: {}'.format( cluster_info)) logger.info('Cluster is now RUNNING: {}'.format(cluster_info)) if config.mount_volumes: logger.info( 'Enabling mount volumes for cluster {} (stack id {})'.format( cluster_id, stack_id)) # fabric spams to stdout, which causes problems with launch_ccm_cluster. # force total redirect to stderr: stdout = sys.stdout sys.stdout = sys.stderr import enable_mount_volumes enable_mount_volumes.main(stack_id) sys.stdout = stdout # we fetch the token once up-front because on Open clusters it must be reused. # given that, we may as well use the same flow across both Open and EE. logger.info('Fetching auth token') dcos_url = 'https://' + dns_address auth_token = dcos_login.DCOSLogin(dcos_url).get_acs_token() if config.permissions: logger.info( 'Setting up permissions for cluster {} (stack id {})'.format( cluster_id, stack_id)) def run_script(scriptname, args=[]): logger.info('Command: {} {}'.format(scriptname, ' '.join(args))) script_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), scriptname) # redirect stdout to stderr: subprocess.check_call(['bash', script_path] + args, stdout=sys.stderr) run_script('create_service_account.sh', [dcos_url, auth_token, '--strict']) # Examples of what individual tests should run. See respective projects' "test.sh": #run_script('setup_permissions.sh', 'nobody cassandra-role'.split()) #run_script('setup_permissions.sh', 'nobody hdfs-role'.split()) #run_script('setup_permissions.sh', 'nobody kafka-role'.split()) #run_script('setup_permissions.sh', 'nobody spark-role'.split()) return {'id': cluster_id, 'url': dcos_url, 'auth_token': auth_token}
def _start(self, config): is_17_cluster = config.ccm_channel in self._DCOS_17_CHANNELS if is_17_cluster: hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos' elif config.cf_template.startswith('ee.'): hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos-enterprise' else: hostrepo = 's3-us-west-2.amazonaws.com/downloads.dcos.io/dcos' template_url = 'https://{}/{}/cloudformation/{}'.format( hostrepo, config.ccm_channel, config.cf_template) if config.template_url: template_url = config.template_url cluster_name = config.name_prefix + self._rand_str(8) payload = { 'template_url': template_url, 'name': cluster_name, 'cluster_desc': config.description, 'time': config.duration_mins, 'private_agents': str(config.private_agents), 'public_agents': str(config.public_agents), 'pre_1_8_cluster': is_17_cluster, 'adminlocation': config.admin_location, 'cloud_provider': config.cloud_provider, 'region': config.aws_region } logger.info('''Launching cluster: name={} agents={} private/{} public duration={} minutes mountvols={} channel={} template={}'''.format(cluster_name, config.private_agents, config.public_agents, config.duration_mins, config.mount_volumes, config.ccm_channel, config.cf_template)) response = self._query_http('POST', self._CCM_PATH, request_json_payload=payload) if not response: raise Exception('CCM cluster creation request failed') response_content = response.read().decode('utf-8') response_json = json.loads(response_content) logger.info('Launch response:\n{}'.format( pprint.pformat(response_json))) cluster_id = int(response_json.get('id', 0)) if not cluster_id: raise Exception( 'No Cluster ID returned in cluster creation response: {}'. format(response_content)) stack_id = response_json.get('stack_id', '') if not stack_id: raise Exception( 'No Stack ID returned in cluster creation response: {}'.format( response_content)) cluster_info = self.wait_for_status(cluster_id, 'CREATING', 'RUNNING', config.start_timeout_mins) if not cluster_info: raise Exception('CCM cluster creation failed or timed out') dns_address = cluster_info.get('DnsAddress', '') if not dns_address: raise Exception( 'CCM cluster_info is missing DnsAddress: {}'.format( cluster_info)) if config.mount_volumes: logger.info( 'Enabling mount volumes for cluster {} (stack id {})'.format( cluster_id, stack_id)) # fabric spams to stdout, which causes problems with launch_ccm_cluster. # force total redirect to stderr: stdout = sys.stdout sys.stdout = sys.stderr import enable_mount_volumes enable_mount_volumes.main(stack_id) sys.stdout = stdout # we fetch the token once up-front because on Open clusters it must be reused. # given that, we may as well use the same flow across both Open and EE. logger.info('Fetching auth token') dcos_url = 'https://' + dns_address auth_token = dcos_login.DCOSLogin(dcos_url).get_acs_token() return {'id': cluster_id, 'url': dcos_url, 'auth_token': auth_token}
def _start(self, config): is_17_cluster = config.ccm_channel in self._DCOS_17_CHANNELS if is_17_cluster: hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos' elif config.cf_template.startswith('ee.'): hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos-enterprise' else: hostrepo = 's3-us-west-2.amazonaws.com/downloads.dcos.io/dcos' template_url = 'https://{}/{}/cloudformation/{}'.format( hostrepo, config.ccm_channel, config.cf_template) if config.template_url: template_url = config.template_url cluster_name = config.name_prefix + self._rand_str(8) payload = { 'template_url': template_url, 'name': cluster_name, 'cluster_desc': config.description, 'time': config.duration_mins, 'private_agents': str(config.private_agents), 'public_agents': str(config.public_agents), 'pre_1_8_cluster': is_17_cluster, 'adminlocation': config.admin_location, 'cloud_provider': config.cloud_provider, 'region': config.aws_region } logger.info('''Launching cluster: name={} agents={} private/{} public duration={} minutes mountvols={} channel={} template={}'''.format( cluster_name, config.private_agents, config.public_agents, config.duration_mins, config.mount_volumes, config.ccm_channel, config.cf_template)) response = self._query_http('POST', self._CCM_PATH, request_json_payload=payload) if not response: raise Exception('CCM cluster creation request failed') response_content = response.read().decode('utf-8') response_json = json.loads(response_content) logger.info('Launch response:\n{}'.format(pprint.pformat(response_json))) cluster_id = int(response_json.get('id', 0)) if not cluster_id: raise Exception('No Cluster ID returned in cluster creation response: {}'.format(response_content)) stack_id = response_json.get('stack_id', '') if not stack_id: raise Exception('No Stack ID returned in cluster creation response: {}'.format(response_content)) cluster_info = self.wait_for_status(cluster_id, 'CREATING', 'RUNNING', config.start_timeout_mins) if not cluster_info: raise Exception('CCM cluster creation failed or timed out') dns_address = cluster_info.get('DnsAddress', '') if not dns_address: raise Exception('CCM cluster_info is missing DnsAddress: {}'.format(cluster_info)) if config.mount_volumes: logger.info('Enabling mount volumes for cluster {} (stack id {})'.format(cluster_id, stack_id)) # fabric spams to stdout, which causes problems with launch_ccm_cluster. # force total redirect to stderr: stdout = sys.stdout sys.stdout = sys.stderr import enable_mount_volumes enable_mount_volumes.main(stack_id) sys.stdout = stdout # we fetch the token once up-front because on Open clusters it must be reused. # given that, we may as well use the same flow across both Open and EE. logger.info('Fetching auth token') dcos_url = 'https://' + dns_address auth_token = dcos_login.DCOSLogin(dcos_url).get_acs_token() return { 'id': cluster_id, 'url': dcos_url, 'auth_token': auth_token }
def _start(self, config): is_17_cluster = config.ccm_channel in self._DCOS_17_CHANNELS if is_17_cluster: hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos' elif config.cf_template.startswith('ee.'): hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos-enterprise' else: hostrepo = 's3-us-west-2.amazonaws.com/downloads.dcos.io/dcos' template_url = 'https://{}/{}/cloudformation/{}'.format( hostrepo, config.ccm_channel, config.cf_template) if config.template_url: template_url = config.template_url cluster_name = config.name_prefix + self._rand_str(8) payload = { 'template_url': template_url, 'name': cluster_name, 'cluster_desc': config.description, 'time': config.duration_mins, 'private_agents': str(config.private_agents), 'public_agents': str(config.public_agents), 'pre_1_8_cluster': is_17_cluster, 'adminlocation': config.admin_location, 'cloud_provider': config.cloud_provider, 'region': config.aws_region } logger.info('''Launching cluster: name={} agents={} private/{} public duration={} minutes mountvols={} permissions={} channel={} template={}'''.format( cluster_name, config.private_agents, config.public_agents, config.duration_mins, config.mount_volumes, config.permissions, config.ccm_channel, config.cf_template)) response = self._query_http('POST', self._CCM_PATH, request_json_payload=payload) if not response: raise Exception('CCM cluster creation request failed') response_content = response.read().decode('utf-8') response_json = json.loads(response_content) logger.info('Launch response:\n{}'.format(pprint.pformat(response_json))) cluster_id = int(response_json.get('id', 0)) if not cluster_id: raise Exception('No Cluster ID returned in cluster creation response: {}'.format(response_content)) stack_id = response_json.get('stack_id', '') if not stack_id: raise Exception('No Stack ID returned in cluster creation response: {}'.format(response_content)) cluster_info = self.wait_for_status(cluster_id, 'CREATING', 'RUNNING', config.start_timeout_mins) if not cluster_info: raise Exception('CCM cluster creation failed or timed out') dns_address = cluster_info.get('DnsAddress', '') if not dns_address: raise Exception('CCM cluster_info is missing DnsAddress: {}'.format(cluster_info)) logger.info('Cluster is now RUNNING: {}'.format(cluster_info)) if config.mount_volumes: logger.info('Enabling mount volumes for cluster {} (stack id {})'.format(cluster_id, stack_id)) # fabric spams to stdout, which causes problems with launch_ccm_cluster. # force total redirect to stderr: stdout = sys.stdout sys.stdout = sys.stderr import enable_mount_volumes enable_mount_volumes.main(stack_id) sys.stdout = stdout if config.permissions: logger.info('Setting up permissions for cluster {} (stack id {})'.format(cluster_id, stack_id)) def run_script(scriptname, args = []): logger.info('Command: {} {}'.format(scriptname, ' '.join(args))) # force total redirect to stderr: stdout = sys.stdout sys.stdout = sys.stderr script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), scriptname) subprocess.check_call(['bash', script_path] + args) sys.stdout = stdout run_script('create_service_account.sh', ['--strict']) # Examples of what individual tests should run. See respective projects' "test.sh": #run_script('setup_permissions.sh', 'nobody cassandra-role'.split()) #run_script('setup_permissions.sh', 'nobody hdfs-role'.split()) #run_script('setup_permissions.sh', 'nobody kafka-role'.split()) #run_script('setup_permissions.sh', 'nobody spark-role'.split()) # we fetch the token once up-front because on Open clusters it must be reused. # given that, we may as well use the same flow across both Open and EE. logger.info('Fetching auth token') dcos_url = 'https://' + dns_address auth_token = dcos_login.DCOSLogin(dcos_url).get_acs_token() return { 'id': cluster_id, 'url': dcos_url, 'auth_token': auth_token }