def get_bucket_name(self): should_create = cli.InputPrompt( 'Create S3 bucket for file ingest (Y/n)?', 'y').show() if should_create == 'n': return None bucket_name = cli.InputPrompt('Ingest bucket name').show() return bucket_name
def do_new(self, cmd_args): '''Creates a new Kinesis stream spec for generating a Terraform file. ''' abort_message = 'cancelling stream creation.' try: stream_name = None resource_name = None with mandatory_input( cli.InputPrompt('Kinesis stream name'), max_retries=1, warning_message='a Kinesis stream requires a name.', failure_message=abort_message) as input_result: stream_name = input_result.data with mandatory_input( cli.InputPrompt('Terraform resource name'), max_retries=1, warning_message= 'a Kinesis stream requires a Terraform resource name.', failure_message=abort_message) as input_result: resource_name = input_result.data # TODO: add mandatory_type and mandatory_format context managers, factor into snap.cli module shard_count = cli.InputPrompt('shard count', '1').show() retention_period = cli.InputPrompt( 'data retention period in hours', '24').show() stream_spec = StreamSpec(stream_name, resource_name, shard_count, retention_period) self.stream_specs.append(stream_spec) except MissingInput as err: print(err) return
def create_bucket(self): bucket_name = None quota = None num_replicas = None with mandatory_input(cli.InputPrompt('bucket name'), warning_message='a bucket must have a name', failure_message='bad or missing bucket name') as input_result: bucket_name = input_result.data bucket_type = cli.MenuPrompt('bucket type', BUCKET_TYPE_OPTIONS).show() with required_input_format(INT_REGEX, cli.InputPrompt('bucket RAM quota (GB)'), warning_message='RAM quota must be a positive integer', failure_message='bad RAM quota value') as input_result: quota = input_result.data value_test_func = lambda x: re.compile(INT_REGEX).match(x) and int(x) < 5 with constrained_input_value(value_test_func, cli.InputPrompt('number of replicas'), warning_message='# of replicas must be less than 5', failure_message='# of replicas outside of allowed limits') as input_result: num_replicas = input_result.data return CouchbaseBucketSpec(name=bucket_name, type=bucket_type, ram_quota=quota, num_replicas=num_replicas)
def get_couchbase_cluster_size(self): should_create = cli.InputPrompt('Create Couchbase cluster (Y/n)?', 'y').show() if should_create == 'n': return 0 cluster_size = cli.InputPrompt( 'Please enter the desired Couchbase cluster size').show() return int(cluster_size)
def get_es_cluster_size(self): should_create = cli.InputPrompt('Create Elasticsearch cluster (Y/n)?', 'y').show() if should_create == 'n': return 0 cluster_size = cli.InputPrompt( 'Please enter the desired Elasticsearch cluster size').show() return int(cluster_size)
def do_save(self, cmd_args): '''Saves all created Kinesis stream specs to the designated output .tf file. ''' if not len(self.stream_specs): should_save = cli.InputPrompt( 'No Kinesis streams defined. Are you sure (Y/n)?', 'y').show() if should_save != 'y': print('\nCancelling save.\n') return if os.path.isfile(self.output_file): print('designated output file already exists.') return if not len(self.stream_specs): output_data = '# Intentionally empty file. No Kinesis streams defined for project %s.' % self.project_name else: j2env = jinja2.Environment() template_mgr = common.JinjaTemplateManager(j2env) streamdef_template = j2env.from_string(KINESIS_STREAM_TEMPLATE) output_data = streamdef_template.render(streams=self.stream_specs) with open(self.output_file, 'w') as f: f.write(output_data) print('\nSaved Terraform resources to output file %s.\n' % self.output_file)
def configure_datasource(self, cmd_args): '''Configures a new lookup datasource.''' if not self.project_home_contains_python_source(): print('No python source files in project home directory "%s".' % self.project_home) return None if not self.datasource_module: print( 'The lookup datasource module has not been configured for this project.' ) should_set = cli.InputPrompt('Set it now (Y/n)?', 'y').show() if should_set == 'y': self.do_globals({'update': True}) if not self.datasource_module: print('project datasource module not updated.') return source_module_name = self.datasource_module print('scanning python module %s for datasources...' % source_module_name) class_options = generate_class_options_from_module(source_module_name) if not len(class_options): print( '\nThe python module "%s" contains no eligible types. Please check your code.\n' % source_module_name) return class_name = cli.MenuPrompt('Datasource name', class_options).show() if not class_name: return print('\nUsing datasource class %s from module %s.\n' % (class_name, source_module_name)) datasource_label = cli.InputPrompt( 'Enter an alias for this datasource').show() if not datasource_label: return should_create = cli.InputPrompt( 'Register datasource "%s" using class %s (Y/n)?' % (datasource_label, class_name), 'y').show() if should_create == 'y': return DatasourceSpec(name=datasource_label, klass=class_name)
def do_new(self, cmd_args): '''Usage: new (cluster | bucket | node) ''' if cmd_args['cluster']: admin_username = self.get_admin_username() admin_password = self.get_admin_password() cluster_ram_quota = self.get_cluster_ram_quota() self.cluster_config['admin_username'] = admin_username self.cluster_config['admin_password'] = admin_password self.cluster_config['cluster_ram_quota'] = cluster_ram_quota print('\n+++ Couchbase cluster settings specified.\n') buckets = [] should_add = cli.InputPrompt('Add bucket to cluster spec (Y/n)?').show() if should_add == 'n': return while True: bucket_spec = self.create_bucket() if bucket_spec: self.cluster_config['buckets'].append(bucket_spec) print('\n+++added bucket "%s" to cluster config\n' % bucket_spec.name) should_continue = cli.InputPrompt('Create another bucket (Y/n)?', 'y').show() if should_continue == 'n': break return elif cmd_args['bucket']: bucket_spec = self.create_bucket() while True: if bucket_spec: self.cluster_config['buckets'].append(bucket_spec) print('\n+++added bucket "%s" to cluster config\n' % bucket_spec.name) should_continue = cli.InputPrompt('Create another bucket (Y/n)?', 'y').show() if should_continue == 'n': break
def get_new_project_setting(self, setting_name, current_value=None): value = None if setting_name == 'project_home': value = cli.InputPrompt(setting_name, current_value).show() if setting_name == 'datasource_module': value = cli.MenuPrompt( 'Datasource module', generate_module_options_from_directory(os.getcwd())).show() if setting_name == 'service_module': value = cli.MenuPrompt( 'Service module', generate_module_options_from_directory(os.getcwd())).show() return value
def do_save(self, cmd_args): '''Save all datamaps to a file. ''' if not len(self.map_specs): print( '\nNo datamaps registered. Create one or more datamaps first.\n' ) return output_filename = cli.InputPrompt('output filename').show() global_tbl = named_tuple_array_to_dict(self.globals, key_name='name', value_name='value') template_data = { 'project_home': global_tbl['project_home'], 'datasource_module': global_tbl['datasource_module'], 'service_module': global_tbl['service_module'], 'datasources': self.datasource_specs, 'map_specs': self.map_specs } j2env = jinja2.Environment() template_mgr = common.JinjaTemplateManager(j2env) map_template = j2env.from_string(MAP_CONFIG_TEMPLATE) output_data = map_template.render(**template_data) #print(yaml.dump(data)) should_save = cli.InputPrompt( 'Save this datamap configuration to %s (Y/n)?' % output_filename, 'y').show() if should_save == 'y': with open(output_filename, 'w') as f: #f.write(map_template.render(**template_data)) #yaml.dump(output_data, output_data, f, default_flow_style=False) f.write(output_data) print('\nSaved datamap config to output file %s.\n' % output_filename)
def do_delete(self, cmd_args): '''Usage: delete (cluster | bucket) ''' if cmd_args['cluster']: print('### This action will delete your cluster config and all bucket configurations.') should_clear = cli.InputPrompt('Are you sure (y/N)', 'n').show() if should_clear == 'y': self.cluster_config = {} elif cmd_args['bucket']: if not len(self.cluster_config['buckets']): print('\nNo bucket specs created.') return options = self.generate_bucket_options() bucket_name = cli.MenuPrompt('Bucket to delete', options).show() print('### This action will delete the bucket config "%s".' % bucket_name) should_delete = cli.InputPrompt('Are you sure (y/N)', 'n').show() if should_delete == 'y': index = self.get_bucket_index_by_name(bucket_name) self.cluster_config['buckets'].pop(index)
def configure_datasource(self, cmd_args): '''Configures a new lookup datasource.''' if not self.project_home_contains_python_source(): print('No python source files in project home directory "%s".' % self.globals['project_home']) return None # TODO: parameterize source dir for modules source_module = cli.MenuPrompt('datasource_module', generate_module_options( os.getcwd())).show() if source_module: source_name = cli.InputPrompt('Datasource name').show() if source_name: self.datasources[source_name] = source_module
def do_save(self, cmd_args): '''Usage: save [cluster | buckets | nodes] as (script | playbook) save config ''' if not self.cluster_config: print('\n### No cluster configuration has been created.') return if not len(self.cluster_config['buckets']): print('A working couchbase cluster must have at least one bucket.') should_create = cli.InputPrompt('Create bucket (Y/n?', 'y').show() if should_create == 'n': return bucket_spec = self.create_bucket() if bucket_spec: self.cluster_config['buckets'].append(bucket_spec) else: return j2env = jinja2.Environment() if cmd_args['config']: print('\n### Placeholder for save-to-YAML functionality\n') return if cmd_args['script']: template_string = None if cmd_args['cluster']: template_string = templates.COUCHBASE_SETUP_SHELL_SCRIPT script_filename = self.generate_cluster_script_filename() elif cmd_args['buckets']: template_string = '#!/bin/bash\n%s' % templates.COUCHBASE_BUCKETS_ONLY_SHELL_SCRIPT script_filename = self.generate_bucket_script_filename() script_template = j2env.from_string(template_string) with open(script_filename, 'w+') as f: f.write(script_template.render(cluster_spec=self.cluster_config)) print('generated script %s.' % script_filename) if cmd_args['playbook']: print('\n### Placeholder for save-to-Ansible-playbook functionality\n')
def do_save(self, cmd_args): '''Saves the current configuration to the designated output file. ''' if os.path.isfile(self.output_file): prompt_text = 'output file "%s" already exists. Overwrite (Y/n)?' % self.output_file should_overwrite = cli.InputPrompt(prompt_text, 'y').show() if should_overwrite == 'n': return j2env = jinja2.Environment() template_mgr = common.JinjaTemplateManager(j2env) var_template = j2env.from_string(PROJECT_VAR_TEMPLATE) output_data = var_template.render(project_vars=self.project_var_specs) with open(self.output_file, 'w') as f: f.write(output_data) print('\n+++ Terraform project vars written to %s.\n' % self.output_file)
input_lines.append(result) if len(input_lines): return '\n'.join(input_lines) return None parameter_create_sequence = { 'marquee': ''' +++ Add init parameter ''', 'steps': [{ 'type': 'gate', 'prompt': cli.InputPrompt('add an init parameter (Y/n)?', 'y'), 'evaluator': lambda response: True if response.lower() == 'y' else False, }, { 'type': 'static_prompt', 'field_name': 'value', 'prompt': cli.InputPrompt('parameter value'), 'required': True }] } service_object_param_sequence = { 'marquee': ''' +++ Add init parameter to service object ''',
def do_new(self, cmd_args): '''Create a new datamap or map-related object. Usage: new (map | datasource) ''' if not self.project_home: print( '\nTo create a new map or datasource, you must specify a valid project home directory.' ) should_update_globals = cli.InputPrompt( 'Run globals command now (Y/n)?', 'y').show() if should_update_globals == 'y': self.do_globals({'update': True}) if not self.project_home: print('project home not updated.') return print('Returning to "new map" command...') else: return if not self.datasource_module: print( '\nTo create a new map or datasource, you must specify the Python module containing your lookup-datasource class.' ) should_update_globals = cli.InputPrompt('Configure now (Y/n)?', 'y').show() if should_update_globals == 'y': self.do_globals({'update': True}) if not self.datasource_module: print('datasource module not set.') return print('Returning to "new map" command...') else: return if cmd_args['datasource']: source_spec = self.configure_datasource(cmd_args) if source_spec: self.datasource_specs.append(source_spec) elif cmd_args['map']: if self.initial_datafile: print( '\nThis mkmap CLI session was initialized with datafile %s.\n' % self.initial_datafile) should_generate_from_datafile = cli.InputPrompt( 'Use this datafile to generate a map (Y/n)?', 'y').show() if should_generate_from_datafile == 'y': separator_char = cli.InputPrompt( 'separator character used in this file').show() if not len( separator_char ): # in case whitespace input (which is valid) is stripped print( 'Cannot create a CSV map without specifying a separator character.' ) return print('Will use separator character: ' + separator_char) should_create_source = False if not self.get_current_project_setting( 'datasource_module'): should_create_source = True print( '\nPlease set the lookup source to a valid Python module containing at least one Datasource class.\n' ) datasource_module = cli.MenuPrompt( 'Datasource module', generate_module_options_from_directory( os.getcwd())) if not datasource_module: print( 'Cannot continue without setting the project-wide datasource module.\n' ) return self.update_project_setting('datasource_module', datasource_module) if not len(self.datasource_specs): create_response = cli.InputPrompt( 'No datasources registered. Register one now (Y/n)?', 'y').show() if create_response == 'y': should_create_source = True else: print('Cannot create a map without a datasource.') return if should_create_source: datasource_spec = self.configure_datasource(cmd_args) if not datasource_spec: print('Cannot create a map without a datasource.') return print('\nRegistered new datasource %s: %s.\n' % (datasource_spec.name, datasource_spec.klass)) self.datasource_specs.append(datasource_spec) datasource_options = self.generate_datasource_options() selected_source = cli.MenuPrompt( 'Select a datasource', datasource_options).show() if not selected_source: print('Cannot create a map without a datasource.') return print('\nSelected datasource "%s" for this datamap.\n' % (selected_source)) map_name = cli.InputPrompt( 'Please enter a name for this datamap').show() if not map_name: return print( 'Scanning initial file with separator character: %s' % separator_char) mapspec = create_default_map_from_csv_file( self.initial_datafile, map_name, selected_source, separator_char) if not mapspec: return confirm = cli.InputPrompt( 'Create datamap "%s" (Y/n)?' % map_name, 'y').show() if confirm == 'y': print('\nDatamap "%s" created.\n' % map_name) self.map_specs.append(mapspec) # set this to None so that we don't visit this path again self.initial_datafile = None else: self.initial_datafile = None else: map_name = cli.InputPrompt('alias for this new map').show() if not map_name: return mapspec = self.configure_map(map_name) if not mapspec: return self.map_specs.append(mapspec) '''
def process_create_sequence(self, init_context=None, **sequence): print(sequence['marquee']) context = {} if init_context: context.update(init_context) if sequence.get('inputs'): context.update(sequence['inputs']) for step in sequence['steps']: step_type = step['type'] if step_type not in UISEQUENCE_STEP_TYPES: raise Exception( 'found a UISequence step of an unsupported type [%s].' % step_type) if step_type == 'gate': '''fields: type, prompt, evaluator''' print('placeholder for gate-type sequence step') elif step_type == 'sequence_select': prompt = step['prompt'] user_choice = prompt.show() if not user_choice: return None if not step['conditions'].get(user_choice): raise Exception( 'No sequence registered in this step for user selection "%s".' % user_choice) next_sequence = step['conditions'][user_choice]['sequence'] sequence_output = self.create(**next_sequence) if sequence_output: context[step['field_name']] = sequence_output elif step_type == 'dyn_prompt': prompt_create_func = step['prompt_creator'] answer = prompt_create_func(self.configuration).show() context[step['field_name']] = answer elif step_type == 'static_prompt': # follow the prompt -- but override the one in the UI sequence if one was passed to us # in our constructor prompt = self.create_prompts.get(step['field_name'], step['prompt']) answer = prompt.show() if not answer and step['required'] == True: return None if not answer and hasattr(step, 'default'): answer = step['default'] context[step['field_name']] = answer elif step_type == 'static_sequence_trigger': next_sequence = step['sequence'] is_repeating_step = step.get('repeat', False) while True: sequence_output = self.create(**next_sequence) if not sequence_output: break if is_repeating_step: if not context.get(step['field_name']): context[step['field_name']] = [] context[step['field_name']].append(sequence_output) else: context[step['field_name']] = sequence_output if is_repeating_step: repeat_prompt = step.get( 'repeat_prompt', cli.InputPrompt('create another (Y/n)', 'y')) should_repeat = repeat_prompt.show().lower() if should_repeat == 'n': break else: break elif step_type == 'dyn_sequence_trigger': if not 'selector_func' in step.keys(): raise Exception( 'a step of type "dyn_sequence_trigger" must specify a selector_func field.' ) prompt = step['prompt_type'] selector_func = step['selector_func'] menudata = self.edit_menus.get( step['field_name']) or step.get('menu_data') if menudata is None: raise Exception( 'a step of type "dyn_sequence_trigger" must have a menu_data field OR pass it in the UISequence constructor.' ) # skip if there are no entries to select if not len(menudata): continue prompt_text = step['prompt_text'].format(**context) args = [prompt_text, menudata] # retrieve user input selection = prompt(*args).show() if not selection: continue # dynamic dispatch; use user input to determine the next sequence # (selector_func() MUST return a live UISequence dictionary) next_sequence = selector_func(selection) if not next_sequence: continue sequence_output = self.create(**next_sequence) if sequence_output: context[step['field_name']] = sequence_output return context
def get_cluster_ram_quota(self): with required_input_format(INT_REGEX, cli.InputPrompt('cluster RAM quota (GB)'), warning_message='cluster RAM quota must be an integer', failure_message='bad or missing cluster RAM quota') as input_result: return input_result.data
def get_admin_password(self): with mandatory_input(cli.InputPrompt('admin password'), warning_message='you must provide an admin password', failure_message='no admin password provided.') as input_result: return input_result.data
def get_admin_username(self): with mandatory_input(cli.InputPrompt('admin username'), max_retries=1, warning_message='you must provide the admin user name for the cluster', failure_message='cannot create a cluster without a valid name') as input_result: return input_result.data