def pemfile(request): run_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' dump_key' \ ' %(cluster-id)s' run_command = run_command % request logger.info('Executing: %s', run_command) response = execute.run(run_command) return response
def start_opscenter(access_logger, postvars, reservation_id): if postvars['opscenter-install'] == 'yes': ec2.tag_reservation(reservation_id, 'status', 'Starting opscenter...') start_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' start' \ ' %(full_name)s' \ ' opscenter' start_command = start_command % postvars msg(access_logger, start_command) logger.info('Executing: %s', start_command) response = execute.run(start_command) if response.stderr: return response
def start(access_logger, postvars, reservation_id): ec2.tag_reservation(reservation_id, 'status', 'Starting %(product-name)s...' % postvars) start_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' start' \ ' %(full_name)s' \ ' %(product-name)s' start_command = start_command % postvars msg(access_logger, start_command) logger.info('Executing: %s', start_command) response = execute.run(start_command) if response.stderr: return response
def start_agent(access_logger, postvars, reservation_id): if postvars['opscenter-install'] == 'yes': ec2.tag_reservation(reservation_id, 'status', 'Starting DataStax Agents...') start_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' run' \ ' %(full_name)s' \ ' all' \ ' "sudo service datastax-agent start"' start_command = start_command % postvars msg(access_logger, start_command) logger.info('Executing: %s', start_command) response = execute.run(start_command) if response.stderr: return response
def install_opscenter(access_logger, postvars, reservation_id): if postvars['opscenter-install'] == 'yes': ec2.tag_reservation(reservation_id, 'status', 'Installing opscenter...') install_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo qa' \ ' --version_or_branch %(opscenter-version)s' \ ' %(full_name)s' \ ' opscenter' install_command = install_command % postvars msg(access_logger, install_command) logger.info('Executing: %s', install_command) response = execute.run(install_command) if response.stderr: return response
def launch(access_logger, postvars): # currently chose shell commands to teach presales ctool in a more # relatable fashion launch_command = 'ctool' \ ' --log-dir /portal/demo-portal/automaton_logs/' \ '%(clean_email)s' \ ' --log-file %(log_file)s' \ ' --provider %(cloud-option)s' \ ' launch' \ ' --instance-type %(instance-type)s' \ ' --platform %(platform)s' \ ' --tags \'%(tags)s\'' \ ' %(full_name)s' \ ' %(num_nodes)s' launch_command = launch_command % postvars msg(access_logger, launch_command) logger.info('Executing: %s', launch_command) response = execute.run(launch_command) if response.stderr: return response
def install(access_logger, postvars, reservation_id): ec2.tag_reservation(reservation_id, 'user', remove=True) ec2.tag_reservation(reservation_id, 'status', 'Installing %(product-name)s...' % postvars) postvars['spark_hadoop'] = '--spark-hadoop' \ if 'spark-and-hadoop' in postvars else '' postvars['tarball'] = '--install_type tar' \ if not postvars['dse-version'].endswith('-1') else '' if len(postvars['advanced_nodes']['cluster']['nodes']) == 0: # calculate install values postvars['percent_analytics'] = float(postvars['hadoop-nodes']) / \ postvars['num_nodes'] postvars['percent_search'] = float(postvars['search-nodes']) / \ postvars['num_nodes'] postvars['percent_spark'] = float(postvars['spark-nodes']) / \ postvars['num_nodes'] install_command = 'ctool ' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo qa' \ ' --percent-analytics %(percent_analytics)s' \ ' --percent-search %(percent_search)s' \ ' --percent-spark %(percent_spark)s' \ ' %(spark_hadoop)s' \ ' %(tarball)s' \ ' --version_or_branch %(dse-version)s' \ ' --num-tokens %(num-of-tokens)s' \ ' %(full_name)s' \ ' %(product-name)s' install_command = install_command % postvars logger.info('Executing: %s', install_command) response = execute.run(install_command) msg(access_logger, install_command) else: with NamedTemporaryFile() as f: postvars['config_file'] = f.name f.write( json.dumps(postvars['advanced_nodes'], indent=4, sort_keys=True)) f.flush() install_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo qa' \ ' --config-file %(config_file)s' \ ' %(spark_hadoop)s' \ ' --version_or_branch %(dse-version)s' \ ' --num-tokens %(num-of-tokens)s' \ ' %(full_name)s' \ ' %(product-name)s' install_command = install_command % postvars logger.info('Executing: %s', install_command) logger.debug('With config-file: \n%s', f.read()) response = execute.run(install_command) msg(access_logger, install_command) msg(access_logger, '--config-file: %s' % json.dumps(postvars['advanced_nodes'])) if response.stderr: return response
def install(access_logger, postvars, reservation_id): ec2.tag_reservation(reservation_id, 'user', remove=True) ec2.tag_reservation(reservation_id, 'status', 'Installing %(product-name)s...' % postvars) postvars['spark_hadoop'] = '--spark-hadoop' \ if 'spark-and-hadoop' in postvars else '' postvars['tarball'] = '--install_type tar' \ if not postvars['dse-version'].endswith('-1') else '' if len(postvars['advanced_nodes']['cluster']['nodes']) == 0: # calculate install values postvars['percent_analytics'] = float(postvars['hadoop-nodes']) / \ postvars['num_nodes'] postvars['percent_search'] = float(postvars['search-nodes']) / \ postvars['num_nodes'] postvars['percent_spark'] = float(postvars['spark-nodes']) / \ postvars['num_nodes'] install_command = 'ctool ' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo qa' \ ' --percent-analytics %(percent_analytics)s' \ ' --percent-search %(percent_search)s' \ ' --percent-spark %(percent_spark)s' \ ' %(spark_hadoop)s' \ ' %(tarball)s' \ ' --version_or_branch %(dse-version)s' \ ' --num-tokens %(num-of-tokens)s' \ ' %(full_name)s' \ ' %(product-name)s' install_command = install_command % postvars logger.info('Executing: %s', install_command) response = execute.run(install_command) msg(access_logger, install_command) else: with NamedTemporaryFile() as f: postvars['config_file'] = f.name f.write(json.dumps(postvars['advanced_nodes'], indent=4, sort_keys=True)) f.flush() install_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo qa' \ ' --config-file %(config_file)s' \ ' %(spark_hadoop)s' \ ' --version_or_branch %(dse-version)s' \ ' --num-tokens %(num-of-tokens)s' \ ' %(full_name)s' \ ' %(product-name)s' install_command = install_command % postvars logger.info('Executing: %s', install_command) logger.debug('With config-file: \n%s', f.read()) response = execute.run(install_command) msg(access_logger, install_command) msg(access_logger, '--config-file: %s' % json.dumps(postvars['advanced_nodes'])) if response.stderr: return response