def init(blueprint_path, inputs, install_plugins_): if os.path.isdir(_storage_dir()): shutil.rmtree(_storage_dir()) if not utils.is_initialized(): aria.init(reset_config=False, skip_logging=True) try: common.initialize_blueprint( blueprint_path=blueprint_path, name=_NAME, inputs=inputs, storage=_storage(), install_plugins=install_plugins_, resolver=utils.get_import_resolver() ) except ImportError as e: e.possible_solutions = [ "Run 'aria init --install-plugins -p {0}'" .format(blueprint_path), "Run 'aria install-plugins -p {0}'" .format(blueprint_path) ] raise logger.get_logger().info( "Initiated {0}\nIf you make changes to the " "blueprint, " "Run 'aria init -p {0}' " "again to apply them" .format(blueprint_path))
def init(reset_config, skip_logging=False): if os.path.exists(os.path.join( utils.get_cwd(), constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME, constants.CLOUDIFY_WD_SETTINGS_FILE_NAME)): if not reset_config: msg = 'Current directory is already initialized' error = exceptions.CloudifyCliError(msg) error.possible_solutions = [ "Run 'cfy init -r' to force re-initialization " "(might overwrite existing " "configuration files if exist) " ] raise error else: shutil.rmtree(os.path.join( utils.get_cwd(), constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME)) settings = utils.CloudifyWorkingDirectorySettings() utils.dump_cloudify_working_dir_settings(settings) utils.dump_configuration_file() configure_loggers() if not skip_logging: get_logger().info('Initialization completed successfully')
def install_blueprint_plugins(blueprint_path): requirements = create_requirements( blueprint_path=blueprint_path ) if requirements: # validate we are inside a virtual env if not utils.is_virtual_env(): raise exceptions.AriaCliError( 'You must be running inside a ' 'virtualenv to install blueprint plugins') runner = futures.aria_side_utils.LocalCommandRunner( logger.get_logger()) # dump the requirements to a file # and let pip install it. # this will utilize pip's mechanism # of cleanup in case an installation fails. tmp_path = tempfile.mkstemp(suffix='.txt', prefix='requirements_')[1] utils.dump_to_file(collection=requirements, file_path=tmp_path) runner.run(command='pip install -r {0}'.format(tmp_path), stdout_pipe=False) else: logger.get_logger().debug('There are no plugins to install..')
def instances(node_id): env = _load_env() node_instances = env.storage.get_node_instances() if node_id: node_instances = [instance for instance in node_instances if instance.node_id == node_id] if not node_instances: raise exceptions.AriaCliError('No node with id: {0}' .format(node_id)) logger.get_logger().info( json.dumps(node_instances, sort_keys=True, indent=2))
def recommend(possible_solutions): from aria_cli.logger import get_logger logger = get_logger() logger.info('Possible solutions:') for solution in possible_solutions: logger.info(' - {0}'.format(solution))
def validate(blueprint_path=None): try: return futures.aria_dsl_parser.parse_from_path( str(blueprint_path) if not isinstance(blueprint_path, file) else blueprint_path.name) except futures.aria_dsl_exceptions.DSLParsingException as e: _logger = logger.get_logger() _logger.error(str(e)) raise Exception("Failed to validate blueprint. %s", str(e))
def install_blueprint_plugins(blueprint_path): requirements = create_requirements(blueprint_path=blueprint_path) if requirements: # validate we are inside a virtual env if not utils.is_virtual_env(): raise exceptions.CloudifyCliError( 'You must be running inside a ' 'virtualenv to install blueprint plugins') runner = LocalCommandRunner(get_logger()) # dump the requirements to a file # and let pip install it. # this will utilize pip's mechanism # of cleanup in case an installation fails. tmp_path = tempfile.mkstemp(suffix='.txt', prefix='requirements_')[1] utils.dump_to_file(collection=requirements, file_path=tmp_path) runner.run(command='pip install -r {0}'.format(tmp_path), stdout_pipe=False) else: get_logger().debug('There are no plugins to install..')
def validate(blueprint_path): logger = get_logger() logger.info( messages.VALIDATING_BLUEPRINT.format(blueprint_path.name)) try: resolver = utils.get_import_resolver() parse_from_path(dsl_file_path=blueprint_path.name, resolver=resolver) except DSLParsingException as ex: msg = (messages.VALIDATING_BLUEPRINT_FAILED .format(blueprint_path.name, str(ex))) raise CloudifyCliError(msg) logger.info(messages.VALIDATING_BLUEPRINT_SUCCEEDED)
def instances(node_id): logger = get_logger() env = _load_env() node_instances = env.storage.get_node_instances() if node_id: node_instances = [instance for instance in node_instances if instance.node_id == node_id] if not node_instances: raise exceptions.CloudifyCliError('No node with id: {0}' .format(node_id)) logger.info(json.dumps(node_instances, sort_keys=True, indent=2))
def create_requirements(blueprint_path, output): if output and os.path.exists(output): raise exceptions.AriaCliError('output path already exists : {0}' .format(output)) requirements = common.create_requirements( blueprint_path=blueprint_path ) if output: utils.dump_to_file(requirements, output) logger.get_logger().info( 'Requirements created successfully --> {0}' .format(output)) else: # we don't want to use just lgr # since we want this output to be prefix free. # this will make it possible to pipe the # output directly to pip for requirement in requirements: print(requirement) logger.get_logger().info(requirement)
def init(blueprint_path, inputs, install_plugins_): if os.path.isdir(_storage_dir()): shutil.rmtree(_storage_dir()) if not utils.is_initialized(): aria_init.init(reset_config=False, skip_logging=True) try: common.initialize_blueprint( blueprint_path=blueprint_path, name=_NAME, inputs=inputs, storage=_storage(), install_plugins=install_plugins_, resolver=utils.get_import_resolver() ) except ImportError as e: # import error indicates # some plugin modules are missing # TODO - consider adding an error code to # TODO - all of our exceptions. so that we # TODO - easily identify them here e.possible_solutions = [ "Run 'aria init --install-plugins -p {0}'" .format(blueprint_path), "Run 'aria install-plugins -p {0}'" .format(blueprint_path) ] raise get_logger().info("Initiated {0}\nIf you make changes to the " "blueprint, " "run 'aria init -p {0}' " "again to apply them" .format(blueprint_path))
def new_excepthook(tpe, value, tb): from aria_cli.logger import get_logger logger = get_logger() prefix = None server_traceback = None output_message = True if issubclass(tpe, CloudifyClientError): server_traceback = value.server_traceback # this means we made a server call and it failed. # we should include this information in the error prefix = 'An error occurred on the server' if issubclass(tpe, SuppressedCloudifyCliError): output_message = False if issubclass(tpe, CloudifyBootstrapError): output_message = False if verbose_output: # print traceback if verbose s_traceback = StringIO.StringIO() traceback.print_exception( etype=tpe, value=value, tb=tb, file=s_traceback) logger.error(s_traceback.getvalue()) if server_traceback: logger.error('Server Traceback (most recent call last):') # No need for print_tb since this exception # is already formatted by the server logger.error(server_traceback) if output_message and not verbose_output: # if we output the traceback # we output the message too. # print_exception does that. # here we just want the message (non verbose) if prefix: logger.error('{0}: {1}'.format(prefix, value)) else: logger.error(value) if hasattr(value, 'possible_solutions'): recommend(getattr(value, 'possible_solutions'))
def execute(workflow_id, parameters, allow_custom_parameters, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() parameters = utils.inputs_to_dict(parameters, 'parameters') env = _load_env() result = env.execute(workflow=workflow_id, parameters=parameters, allow_custom_parameters=allow_custom_parameters, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) if result is not None: logger.info(json.dumps(result, sort_keys=True, indent=2))
def new_excepthook(tpe, value, tb): from aria_cli.logger import get_logger logger = get_logger() prefix = None server_traceback = None output_message = True if issubclass(tpe, CloudifyClientError): server_traceback = value.server_traceback # this means we made a server call and it failed. # we should include this information in the error prefix = 'An error occurred on the server' if issubclass(tpe, SuppressedCloudifyCliError): output_message = False if issubclass(tpe, CloudifyBootstrapError): output_message = False if verbose_output: # print traceback if verbose s_traceback = StringIO.StringIO() traceback.print_exception(etype=tpe, value=value, tb=tb, file=s_traceback) logger.error(s_traceback.getvalue()) if server_traceback: logger.error('Server Traceback (most recent call last):') # No need for print_tb since this exception # is already formatted by the server logger.error(server_traceback) if output_message and not verbose_output: # if we output the traceback # we output the message too. # print_exception does that. # here we just want the message (non verbose) if prefix: logger.error('{0}: {1}'.format(prefix, value)) else: logger.error(value) if hasattr(value, 'possible_solutions'): recommend(getattr(value, 'possible_solutions'))
def create_requirements(blueprint_path, output): logger = get_logger() if output and os.path.exists(output): raise exceptions.CloudifyCliError('output path already exists : {0}' .format(output)) requirements = common.create_requirements( blueprint_path=blueprint_path ) if output: utils.dump_to_file(requirements, output) logger.info('Requirements created successfully --> {0}' .format(output)) else: # we don't want to use just lgr # since we want this output to be prefix free. # this will make it possible to pipe the # output directly to pip for requirement in requirements: print(requirement) logger.info(requirement)
def outputs(): logger = get_logger() env = _load_env() logger.info(json.dumps(env.outputs() or {}, sort_keys=True, indent=2))