def install_blueprint_plugins(blueprint_path): requirements = create_requirements( blueprint_path=blueprint_path ) if requirements: # validate we are inside a virtual env if not utils.is_virtual_env(): raise exceptions.CloudifyCliError( 'You must be running inside a ' 'virtualenv to install blueprint plugins') runner = LocalCommandRunner(get_logger()) # dump the requirements to a file # and let pip install it. # this will utilize pip's mechanism # of cleanup in case an installation fails. output = tempfile.NamedTemporaryFile(mode='w', delete=True, suffix='.txt', prefix='requirements_') utils.dump_to_file(collection=requirements, file_path=output.name) runner.run(command='pip install -r {0}'.format(output.name), stdout_pipe=False) else: get_logger().debug('There are no plugins to install..')
def install_blueprint_plugins(blueprint_path): requirements = create_requirements( blueprint_path=blueprint_path ) if requirements: # validate we are inside a virtual env if not utils.is_virtual_env(): raise exceptions.CloudifyCliError( 'You must be running inside a ' 'virtualenv to install blueprint plugins') runner = LocalCommandRunner(get_logger()) # dump the requirements to a file # and let pip install it. # this will utilize pip's mechanism # of cleanup in case an installation fails. tmp_path = tempfile.mkstemp(suffix='.txt', prefix='requirements_')[1] utils.dump_to_file(collection=requirements, file_path=tmp_path) command_parts = [sys.executable, '-m', 'pip', 'install', '-r', tmp_path] runner.run(command=' '.join(command_parts), stdout_pipe=False) else: get_logger().debug('There are no plugins to install')
def init(reset_config, skip_logging=False): if os.path.exists( os.path.join(utils.get_cwd(), constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME, constants.CLOUDIFY_WD_SETTINGS_FILE_NAME)): if not reset_config: msg = 'Current directory is already initialized' error = exceptions.CloudifyCliError(msg) error.possible_solutions = [ "Run 'cfy init -r' to force re-initialization " "(might overwrite existing " "configuration files if exist) " ] raise error else: shutil.rmtree( os.path.join(utils.get_cwd(), constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME)) settings = utils.CloudifyWorkingDirectorySettings() utils.dump_cloudify_working_dir_settings(settings) utils.dump_configuration_file() configure_loggers() if not skip_logging: get_logger().info('Initialization completed successfully')
def init(blueprint_path, inputs, install_plugins_): if os.path.isdir(_storage_dir()): shutil.rmtree(_storage_dir()) try: common.initialize_blueprint(blueprint_path=blueprint_path, name=_NAME, inputs=inputs, storage=_storage(), install_plugins=install_plugins_) except ImportError as e: # import error indicates # some plugin modules are missing # TODO - consider adding an error code to # TODO - all of our exceptions. so that we # TODO - easily identify them here e.possible_solutions = [ "Run 'cfy local init --install-plugins -p {0}'".format( blueprint_path), "Run 'cfy local install-plugins -p {0}'".format(blueprint_path) ] raise get_logger().info("Initiated {0}\nIf you make changes to the " "blueprint, " "run 'cfy local init -p {0}' " "again to apply them".format(blueprint_path))
def init(blueprint_path, inputs, install_plugins_): if os.path.isdir(_storage_dir()): shutil.rmtree(_storage_dir()) try: common.initialize_blueprint( blueprint_path=blueprint_path, name=_NAME, inputs=inputs, storage=_storage(), install_plugins=install_plugins_ ) except ImportError as e: # import error indicates # some plugin modules are missing # TODO - consider adding an error code to # TODO - all of our exceptions. so that we # TODO - easily identify them here e.possible_solutions = [ "Run 'cfy local init --install-plugins -p {0}'" .format(blueprint_path), "Run 'cfy local install-plugins -p {0}'" .format(blueprint_path) ] raise get_logger().info("Initiated {0}\nIf you make changes to the " "blueprint, " "run 'cfy local init -p {0}' " "again to apply them" .format(blueprint_path))
def init(blueprint_path, inputs, install_plugins): if os.path.isdir(_storage_dir()): shutil.rmtree(_storage_dir()) if not utils.is_initialized(): cfy_init(reset_config=False, skip_logging=True) try: common.initialize_blueprint( blueprint_path=blueprint_path, name=_NAME, inputs=inputs, storage=_storage(), install_plugins=install_plugins, resolver=utils.get_import_resolver() ) except ImportError as e: # import error indicates # some plugin modules are missing # TODO - consider adding an error code to # TODO - all of our exceptions. so that we # TODO - easily identify them here e.possible_solutions = [ "Run `cfy local init --install-plugins -p {0}`" .format(blueprint_path), "Run `cfy local install-plugins -p {0}`" .format(blueprint_path) ] raise get_logger().info("Initiated {0}\nIf you make changes to the " "blueprint, run `cfy local init -p {0}` " "again to apply them".format(blueprint_path))
def init(provider, reset_config): if provider is not None: return provider_common.provider_init(provider, reset_config) if os.path.exists(os.path.join( utils.get_cwd(), constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME, constants.CLOUDIFY_WD_SETTINGS_FILE_NAME)): if not reset_config: msg = 'Current directory is already initialized' error = exceptions.CloudifyCliError(msg) error.possible_solutions = [ "Run 'cfy init -r' to force re-initialization " "(might overwrite existing " "configuration files if exist) " ] raise error else: shutil.rmtree(os.path.join( utils.get_cwd(), constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME)) settings = utils.CloudifyWorkingDirectorySettings() utils.dump_cloudify_working_dir_settings(settings) utils.dump_configuration_file() configure_loggers() get_logger().info('Initialization completed successfully')
def validate(plugin_path): logger = get_logger() logger.info( messages.VALIDATING_PLUGIN.format(plugin_path.name)) if not tarfile.is_tarfile(plugin_path.name): raise CloudifyCliError('Archive {0} is of an unsupported archive type.' ' Only tar.gz is allowed' .format(plugin_path.name)) with tarfile.open(plugin_path.name, 'r') as tar: tar_members = tar.getmembers() package_json_path = '{0}/package.json'.format(tar_members[0].name) try: package_member = tar.getmember(package_json_path) except KeyError: raise CloudifyCliError(messages.VALIDATING_PLUGIN_FAILED .format(plugin_path, 'package.json was not ' 'found in archive')) try: tar.extractfile(package_member).read() except: raise CloudifyCliError(messages.VALIDATING_PLUGIN_FAILED .format(plugin_path, 'unable to read ' 'package.json')) logger.info(messages.VALIDATING_PLUGIN_SUCCEEDED)
def get(node_instance_id): logger = get_logger() rest_host = utils.get_rest_host() client = utils.get_rest_client(rest_host) logger.info('Retrieving node instance with ID: \'{0}\' [manager={1}]' .format(node_instance_id, rest_host)) try: node_instance = client.node_instances.get(node_instance_id) except CloudifyClientError as e: if e.status_code != 404: raise raise CloudifyCliError('Node instance {0} not found') columns = ['id', 'deployment_id', 'host_id', 'node_id', 'state'] pt = utils.table(columns, [node_instance]) pt.max_width = 50 utils.print_table('Instance:', pt) # print node instance runtime properties logger.info('Instance runtime properties:') for prop_name, prop_value in utils.decode_dict( node_instance.runtime_properties).iteritems(): logger.info('\t{0}: {1}'.format(prop_name, prop_value)) logger.info('')
def use(management_ip, provider, rest_port): logger = get_logger() # first check this server is available. client = utils.get_rest_client(manager_ip=management_ip, rest_port=rest_port) try: status_result = client.manager.get_status() except CloudifyClientError: status_result = None if not status_result: msg = ("Can't use management server {0}: No response.".format( management_ip)) raise CloudifyCliError(msg) # check if cloudify was initialized. if not utils.is_initialized(): utils.dump_cloudify_working_dir_settings() utils.dump_configuration_file() try: response = utils.get_rest_client(management_ip).manager.get_context() provider_name = response['name'] provider_context = response['context'] except CloudifyClientError: provider_name = None provider_context = None with utils.update_wd_settings() as wd_settings: wd_settings.set_management_server(management_ip) wd_settings.set_provider_context(provider_context) wd_settings.set_provider(provider_name) wd_settings.set_rest_port(rest_port) wd_settings.set_is_provider_config(provider) logger.info('Using management server {0} with port {1}'.format( management_ip, rest_port))
def cancelByExecutionId(execution_id, force, wait, timeout=900): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip) logger.info('{0}Cancelling execution {1} on management server {2}'.format( 'Force-' if force else '', execution_id, management_ip)) execution = client.executions.cancel(execution_id, force) logger.info( 'A cancel request for execution {0} has been sent to management ' "server {1}. To track the execution's status, use:\n" "cfy executions get -e {0}".format(execution_id, management_ip)) if wait: try: logger.info( 'Waiting for execution {0} to finish being cancelled'.format( execution_id)) execution = wait_for_cancel(client, execution, timeout=timeout) if execution.error: logger.info("Cancellation of execution '{0}' " "failed. [error={2}]".format( execution_id, execution.error)) raise SuppressedCloudifyCliError() else: logger.info( "Finished cancelling execution '{0}'".format(execution_id)) except ExecutionTimeoutError, e: logger.info("Cancellation of execution '{0}' timed out. ".format( e.execution_id)) raise SuppressedCloudifyCliError()
def cancelByExecutionId(execution_id, force, wait, timeout=900): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip) logger.info( '{0}Cancelling execution {1} on management server {2}' .format('Force-' if force else '', execution_id, management_ip)) execution = client.executions.cancel(execution_id, force) logger.info( 'A cancel request for execution {0} has been sent to management ' "server {1}. To track the execution's status, use:\n" "cfy executions get -e {0}" .format(execution_id, management_ip)) if wait: try: logger.info('Waiting for execution {0} to finish being cancelled' .format(execution_id)) execution = wait_for_cancel(client, execution, timeout=timeout) if execution.error: logger.info("Cancellation of execution '{0}' " "failed. [error={2}]" .format(execution_id, execution.error)) raise SuppressedCloudifyCliError() else: logger.info("Finished cancelling execution '{0}'" .format(execution_id)) except ExecutionTimeoutError, e: logger.info("Cancellation of execution '{0}' timed out. " .format(e.execution_id)) raise SuppressedCloudifyCliError()
def recover(force, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() if not force: msg = ("This action requires additional " "confirmation. Add the '-f' or '--force' " "flags to your command if you are certain " "this command should be executed.") raise exceptions.CloudifyCliError(msg) if CLOUDIFY_MANAGER_PK_PATH_ENVAR not in os.environ: if not os.path.isfile(os.path.expanduser(utils.get_management_key())): raise RuntimeError("Can't find manager private key file. Set the " "path to it using the {0} environment variable" .format(CLOUDIFY_MANAGER_PK_PATH_ENVAR)) logger.info('Recovering manager deployment') settings = utils.load_cloudify_working_dir_settings() provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) bs.recover(task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) logger.info('Successfully recovered manager deployment')
def ls(blueprint_id, sort_by=None, descending=False): logger = get_logger() rest_host = utils.get_rest_host() client = utils.get_rest_client(rest_host) if blueprint_id: logger.info("Listing deployments for blueprint: " "'{0}'... [manager={1}]" .format(blueprint_id, rest_host)) else: logger.info('Listing all deployments...[manager={0}]' .format(rest_host)) deployments = client.deployments.list( sort=sort_by, is_descending=descending) if blueprint_id: deployments = filter(lambda deployment: deployment['blueprint_id'] == blueprint_id, deployments) pt = utils.table( ['id', 'blueprint_id', 'created_at', 'updated_at'], deployments) utils.print_table('Deployments:', pt)
def publish_archive(archive_location, blueprint_filename, blueprint_id): logger = get_logger() management_ip = utils.get_management_server_ip() for archive_type in SUPPORTED_ARCHIVE_TYPES: if archive_location.endswith('.{0}'.format(archive_type)): break else: raise CloudifyCliError( "Can't publish archive {0} - it's of an unsupported archive type. " "Supported archive types: {1}".format(archive_location, SUPPORTED_ARCHIVE_TYPES)) archive_location_type = 'URL' if not urlparse.urlparse(archive_location).scheme: # archive_location is not a URL - validate it's a file path if not os.path.isfile(archive_location): raise CloudifyCliError( "Can't publish archive {0} - it's not a valid URL nor a path " "to an archive file".format(archive_location)) archive_location_type = 'path' archive_location = os.path.expanduser(archive_location) logger.info('Publishing blueprint archive from {0} {1} to management ' 'server {2}'.format(archive_location_type, archive_location, management_ip)) client = utils.get_rest_client(management_ip) blueprint = client.blueprints.publish_archive(archive_location, blueprint_id, blueprint_filename) logger.info("Published blueprint archive, blueprint's id is: {0}".format( blueprint.id))
def recover(force, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() if not force: msg = ("This action requires additional " "confirmation. Add the '-f' or '--force' " "flags to your command if you are certain " "this command should be executed.") raise exceptions.CloudifyCliError(msg) if CLOUDIFY_MANAGER_PK_PATH_ENVAR in os.environ: # user defined the key file path inside an env variable. # validate the existence of the keyfile because it will later be # used in a fabric task to ssh to the manager key_path = os.path.expanduser(os.environ[ CLOUDIFY_MANAGER_PK_PATH_ENVAR]) if not os.path.isfile(key_path): raise exceptions.CloudifyValidationError( "Cannot perform recovery. manager private key file " "defined in {0} environment variable does not " "exist: {1}".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR, key_path) ) else: # try retrieving the key file from the local context try: key_path = os.path.expanduser(utils.get_management_key()) if not os.path.isfile(key_path): # manager key file path exists in context but does not exist # in the file system. fail now. raise exceptions.CloudifyValidationError( "Cannot perform recovery. manager key file does not " "exist: {0}. Set the manager private key path via the {1} " "environment variable" .format(key_path, CLOUDIFY_MANAGER_PK_PATH_ENVAR) ) # in this case, the recovery is executed from the same directory # that the bootstrap was executed from. we should not have # problems except exceptions.CloudifyCliError: # manager key file path does not exist in the context. this # means the recovery is executed from a different directory than # the bootstrap one. is this case the user must set the # environment variable to continue. raise exceptions.CloudifyValidationError( "Cannot perform recovery. manager key file not found. Set " "the manager private key path via the {0} environment " "variable".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR) ) logger.info('Recovering manager deployment') settings = utils.load_cloudify_working_dir_settings() provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) bs.recover(task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) logger.info('Successfully recovered manager deployment')
def use(management_ip, rest_port): logger = get_logger() # first check this server is available. client = utils.get_rest_client( manager_ip=management_ip, rest_port=rest_port) try: status_result = client.manager.get_status() except CloudifyClientError: status_result = None if not status_result: msg = ("Can't use management server {0}: No response." .format(management_ip)) raise CloudifyCliError(msg) # check if cloudify was initialized. if not utils.is_initialized(): utils.dump_cloudify_working_dir_settings() utils.dump_configuration_file() try: response = utils.get_rest_client( management_ip).manager.get_context() provider_context = response['context'] except CloudifyClientError: provider_context = None with utils.update_wd_settings() as wd_settings: wd_settings.set_management_server(management_ip) wd_settings.set_provider_context(provider_context) wd_settings.set_rest_port(rest_port) logger.info('Using management server {0} with port {1}' .format(management_ip, rest_port)) # delete the previous manager deployment if exists. bs.delete_workdir()
def get(execution_id): logger = get_logger() rest_host = utils.get_rest_host() client = utils.get_rest_client(rest_host) try: logger.info('Getting execution: ' '\'{0}\' [manager={1}]' .format(execution_id, rest_host)) execution = client.executions.get(execution_id) except exceptions.CloudifyClientError as e: if e.status_code != 404: raise raise CloudifyCliError('Execution {0} not found'.format(execution_id)) pt = utils.table(['id', 'workflow_id', 'status', 'deployment_id', 'created_at', 'error'], [execution]) pt.max_width = 50 utils.print_table('Executions:', pt) # print execution parameters logger.info('Execution Parameters:') for param_name, param_value in utils.decode_dict( execution.parameters).iteritems(): logger.info('\t{0}: \t{1}'.format(param_name, param_value)) if execution.status in (execution.CANCELLING, execution.FORCE_CANCELLING): logger.info(_STATUS_CANCELING_MESSAGE) logger.info('')
def _get_provider_name_and_context(mgmt_ip): logger = get_logger() # trying to retrieve provider context from server try: response = utils.get_rest_client(mgmt_ip).manager.get_context() return response['name'], response['context'] except rest_exception.CloudifyClientError as e: logger.warn('Failed to get provider context from server: {0}'.format( str(e))) # using the local provider context instead (if it's relevant for the # target server) cosmo_wd_settings = utils.load_cloudify_working_dir_settings() if cosmo_wd_settings.get_provider_context(): default_mgmt_server_ip = cosmo_wd_settings.get_management_server() if default_mgmt_server_ip == mgmt_ip: provider_name = utils.get_provider() return provider_name, cosmo_wd_settings.get_provider_context() else: # the local provider context data is for a different server msg = "Failed to get provider context from target server" else: msg = "Provider context is not set in working directory settings (" \ "The provider is used during the bootstrap and teardown " \ "process. This probably means that the manager was started " \ "manually, without the bootstrap command therefore calling " \ "teardown is not supported)." raise RuntimeError(msg)
def __init__(self, provider_config, is_verbose_output): cli.set_global_verbosity_level(is_verbose_output) self.provider_config = provider_config self.is_verbose_output = is_verbose_output self.keep_up_on_failure = False self.logger = get_logger()
def provider_init(provider, reset_config): logger = get_logger() provider_deprecation_notice() if os.path.exists( os.path.join(utils.get_cwd(), constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME, constants.CLOUDIFY_WD_SETTINGS_FILE_NAME)): if not reset_config: msg = ('Current directory is already initialized. ' 'Use the "-r" flag to force ' 'reinitialization (might overwrite ' 'provider configuration files if exist).') raise exceptions.CloudifyCliError(msg) else: # resetting provider configuration logger.debug('resetting configuration...') _provider_init(provider, reset_config) logger.info("Configuration reset complete") return logger.info("Initializing Cloudify") provider_module_name = _provider_init(provider, reset_config) settings = utils.CloudifyWorkingDirectorySettings() settings.set_provider(provider_module_name) settings.set_is_provider_config(True) utils.dump_cloudify_working_dir_settings(settings) utils.dump_configuration_file() logger.info("Initialization complete")
def publish_archive(archive_location, blueprint_filename, blueprint_id): logger = get_logger() management_ip = utils.get_management_server_ip() for archive_type in SUPPORTED_ARCHIVE_TYPES: if archive_location.endswith('.{0}'.format(archive_type)): break else: raise CloudifyCliError( "Can't publish archive {0} - it's of an unsupported archive type. " "Supported archive types: {1}".format(archive_location, SUPPORTED_ARCHIVE_TYPES)) archive_location_type = 'URL' if not urlparse.urlparse(archive_location).scheme: # archive_location is not a URL - validate it's a file path if not os.path.isfile(archive_location): raise CloudifyCliError( "Can't publish archive {0} - it's not a valid URL nor a path " "to an archive file".format(archive_location)) archive_location_type = 'path' archive_location = os.path.expanduser(archive_location) logger.info('Publishing blueprint archive from {0} {1} to management ' 'server {2}' .format(archive_location_type, archive_location, management_ip)) client = utils.get_rest_client(management_ip) blueprint = client.blueprints.publish_archive( archive_location, blueprint_id, blueprint_filename) logger.info("Published blueprint archive, blueprint's id is: {0}" .format(blueprint.id))
def ls(execution_id, include_logs): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info("Getting events from management server {0} for " "execution id '{1}' " "[include_logs={2}]".format(management_ip, execution_id, include_logs)) client = utils.get_rest_client(management_ip) try: execution_events = ExecutionEventsFetcher( client, execution_id, include_logs=include_logs) events = execution_events.fetch_all() events_logger = get_events_logger() events_logger(events) logger.info('\nTotal events: {0}'.format(len(events))) except CloudifyClientError, e: if e.status_code != 404: raise msg = ("Execution '{0}' not found on management server" .format(execution_id)) raise CloudifyCliError(msg)
def upload_plugin(plugin_path, rest_client, validate): logger = get_logger() validate(plugin_path) logger.info('Uploading plugin {0} to management server {1}' .format(plugin_path.name, rest_client.host)) plugin = rest_client.plugins.upload(plugin_path.name) logger.info("Plugin uploaded. The plugin's id is {0}".format(plugin.id))
def ls(deployment_id, include_system_workflows, sort_by=None, descending=False): logger = get_logger() rest_host = utils.get_rest_host() client = utils.get_rest_client(rest_host) try: if deployment_id: logger.info('Listing executions for deployment: \'{0}\' ' '[manager={1}]'.format(deployment_id, rest_host)) else: logger.info( 'Listing all executions: [manager={0}]'.format( rest_host)) executions = client.executions.list( deployment_id=deployment_id, include_system_workflows=include_system_workflows, sort=sort_by, is_descending=descending) except exceptions.CloudifyClientError as e: if e.status_code != 404: raise raise CloudifyCliError('Deployment {0} does not exist'.format( deployment_id)) columns = ['id', 'workflow_id', 'deployment_id', 'status', 'created_at'] pt = utils.table(columns, executions) utils.print_table('Executions:', pt) if any(execution.status in (execution.CANCELLING, execution.FORCE_CANCELLING) for execution in executions): logger.info(_STATUS_CANCELING_MESSAGE)
def get(blueprint_id): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip) logger.info('Getting blueprint: ' '\'{0}\' [manager={1}]' .format(blueprint_id, management_ip)) blueprint = client.blueprints.get(blueprint_id) deployments = client.deployments.list(_include=['id'], blueprint_id=blueprint_id) blueprint['#deployments'] = len(deployments) pt = utils.table(['id', 'main_file_name', 'created_at', 'updated_at', '#deployments'], [blueprint]) pt.max_width = 50 utils.print_table('Blueprint:', pt) logger.info('Description:') logger.info('{0}\n'.format(blueprint['description'] if blueprint['description'] is not None else '')) logger.info('Existing deployments:') logger.info('{0}\n'.format(json.dumps([d['id'] for d in deployments])))
def upload_plugin(plugin_path, management_ip, rest_client, validate): logger = get_logger() validate(plugin_path) logger.info(messages.UPLOADING_PLUGIN .format(plugin_path.name, management_ip)) plugin = rest_client.plugins.upload(plugin_path.name) logger.info(messages.UPLOADING_PLUGIN_SUCCEEDED.format(plugin.id))
def delete(blueprint_id): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info('Deleting blueprint {0}...'.format(blueprint_id)) client = utils.get_rest_client(management_ip) client.blueprints.delete(blueprint_id) logger.info('Blueprint deleted')
def install_blueprint_plugins(blueprint_path): requirements = create_requirements( blueprint_path=blueprint_path ) # validate we are inside a virtual env if not utils.is_virtual_env(): raise exceptions.CloudifyCliError( 'You must be running inside a ' 'virtualenv to install blueprint plugins') runner = LocalCommandRunner(get_logger()) # dump the requirements to a file # and let pip install it. # this will utilize pip's mechanism # of cleanup in case an installation fails. output = tempfile.NamedTemporaryFile(mode='w', delete=True, suffix='.txt', prefix='requirements_') utils.dump_to_file(collection=requirements, file_path=output.name) runner.run(command='pip install -r {0}'.format(output.name), stdout_pipe=False)
def ls(sort_by=None, descending=False): logger = get_logger() rest_host = utils.get_rest_host() client = utils.get_rest_client(rest_host) logger.info('Getting blueprints list... [manager={0}]' .format(rest_host)) def trim_description(blueprint): if blueprint['description'] is not None: if len(blueprint['description']) >= DESCRIPTION_LIMIT: blueprint['description'] = '{0}..'.format( blueprint['description'][:DESCRIPTION_LIMIT - 2]) else: blueprint['description'] = '' return blueprint blueprints = [trim_description(b) for b in client.blueprints.list( sort=sort_by, is_descending=descending)] pt = utils.table(['id', 'description', 'main_file_name', 'created_at', 'updated_at'], data=blueprints) utils.print_table('Available blueprints:', pt)
def get(blueprint_id): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip) logger.info('Getting blueprint: ' '\'{0}\' [manager={1}]'.format(blueprint_id, management_ip)) blueprint = client.blueprints.get(blueprint_id) deployments = client.deployments.list(_include=['id'], blueprint_id=blueprint_id) blueprint['#deployments'] = len(deployments) pt = utils.table( ['id', 'main_file_name', 'created_at', 'updated_at', '#deployments'], [blueprint]) pt.max_width = 50 utils.print_table('Blueprint:', pt) logger.info('Description:') logger.info('{0}\n'.format(blueprint['description'] if blueprint['description'] is not None else '')) logger.info('Existing deployments:') logger.info('{0}\n'.format(json.dumps([d['id'] for d in deployments])))
def create(blueprint_id, deployment_id, inputs): logger = get_logger() rest_host = utils.get_rest_host() inputs = utils.inputs_to_dict(inputs, 'inputs') logger.info('Creating new deployment from blueprint {0} at ' 'management server {1}' .format(blueprint_id, rest_host)) client = utils.get_rest_client(rest_host) try: deployment = client.deployments.create(blueprint_id, deployment_id, inputs=inputs) except MissingRequiredDeploymentInputError as e: logger.info('Unable to create deployment. Not all ' 'required inputs have been specified...') _print_deployment_inputs(client, blueprint_id) raise SuppressedCloudifyCliError(str(e)) except UnknownDeploymentInputError as e: logger.info( 'Unable to create deployment, an unknown input was specified...') _print_deployment_inputs(client, blueprint_id) raise SuppressedCloudifyCliError(str(e)) logger.info("Deployment created. The deployment's id is {0}".format( deployment.id))
def download(blueprint_id, output): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info('Downloading blueprint {0}...'.format(blueprint_id)) client = utils.get_rest_client(management_ip) target_file = client.blueprints.download(blueprint_id, output) logger.info('Blueprint downloaded as {1}'.format(target_file))
def provider_init(provider, reset_config): logger = get_logger() provider_deprecation_notice() if os.path.exists(os.path.join( utils.get_cwd(), constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME, constants.CLOUDIFY_WD_SETTINGS_FILE_NAME)): if not reset_config: msg = ('Current directory is already initialized. ' 'Use the "-r" flag to force ' 'reinitialization (might overwrite ' 'provider configuration files if exist).') raise exceptions.CloudifyCliError(msg) else: # resetting provider configuration logger.debug('resetting configuration...') _provider_init(provider, reset_config) logger.info("Configuration reset complete") return logger.info("Initializing Cloudify") provider_module_name = _provider_init(provider, reset_config) settings = utils.CloudifyWorkingDirectorySettings() settings.set_provider(provider_module_name) settings.set_is_provider_config(True) utils.dump_cloudify_working_dir_settings(settings) utils.dump_configuration_file() logger.info("Initialization complete")
def download(snapshot_id, output): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info('Downloading snapshot {0}...'.format(snapshot_id)) client = utils.get_rest_client(management_ip) target_file = client.snapshots.download(snapshot_id, output) logger.info('Snapshot downloaded as {0}'.format(target_file))
def ssh(ssh_plain_mode, ssh_command): logger = get_logger() ssh_path = spawn.find_executable('ssh') logger.debug('SSH executable path: {0}'.format(ssh_path or 'Not found')) if not ssh_path and platform.system() == 'Windows': msg = messages.SSH_WIN_NOT_FOUND raise CloudifyCliError(msg) elif not ssh_path: msg = messages.SSH_LINUX_NOT_FOUND raise CloudifyCliError(msg) else: command = [ ssh_path, '{0}@{1}'.format(get_management_user(), get_management_server_ip()) ] if get_global_verbosity(): command.append('-v') if not ssh_plain_mode: command.extend(['-i', os.path.expanduser(get_management_key())]) if ssh_command: command.extend(['--', ssh_command]) logger.debug('executing command: {0}'.format(' '.join(command))) logger.info('Trying to connect...') from subprocess import call call(command)
def delete(snapshot_id): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info('Deleting snapshot {0}...'.format(snapshot_id)) client = utils.get_rest_client(management_ip) client.snapshots.delete(snapshot_id) logger.info('Snapshot deleted successfully')
def status(): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info('Getting management services status... [ip={0}]' .format(management_ip)) client = utils.get_rest_client(management_ip) try: status_result = client.manager.get_status() except UserUnauthorizedError: logger.info("Can't query management server status: User is " "unauthorized") return False except CloudifyClientError: logger.info('REST service at management server ' '{0} is not responding!' .format(management_ip)) return False services = [] for service in status_result['services']: state = service['instances'][0]['state'] \ if 'instances' in service and \ len(service['instances']) > 0 else 'unknown' services.append({ 'service': service['display_name'].ljust(30), 'status': state }) pt = utils.table(['service', 'status'], data=services) utils.print_table('Services:', pt) return True
def download(plugin_id, output): logger = get_logger() rest_host = utils.get_rest_host() logger.info("Downloading plugin '{0}' from management server {1}...".format(plugin_id, rest_host)) client = utils.get_rest_client(rest_host) target_file = client.plugins.download(plugin_id, output) logger.info("Plugin downloaded as {0}".format(target_file))
def _upload_dsl_resources(dsl_resources, temp_dir, fabric_env, retries, wait_interval, timeout): """ Uploads dsl resources to the manager. :param dsl_resources: all of the dsl_resources. :param temp_dir: the dir to push the resources to. :param fabric_env: fabric env in order to upload the dsl_resources. :param retries: number of retries per resource download. :param wait_interval: interval between download retries. :param timeout: timeout for uploading a dsl resource. :return: """ logger = get_logger() remote_plugins_folder = '/opt/manager/resources/' @retry(wait_fixed=wait_interval*1000, stop_func=partial(_stop_retries, retries, wait_interval), retry_on_exception=lambda e: isinstance(e, RecoverableError)) def upload_dsl_resource(local_path, remote_path): remote_dir = os.path.dirname(remote_path) logger.info('Uploading resources from {0} to {1}' .format(local_path, remote_dir)) fabric.run('sudo mkdir -p {0}'.format(remote_dir)) fabric.put(local_path, remote_path, use_sudo=True) for dsl_resource in dsl_resources: source_plugin_yaml_path = dsl_resource.get('source_path') destination_plugin_yaml_path = dsl_resource.get('destination_path') if not source_plugin_yaml_path or not destination_plugin_yaml_path: missing_fields = [] if source_plugin_yaml_path is None: missing_fields.append('source_path') if destination_plugin_yaml_path is None: missing_fields.append('destination_path') raise CloudifyBootstrapError( 'The following fields are missing: {0}.' .format(','.join(missing_fields))) if destination_plugin_yaml_path.startswith('/'): destination_plugin_yaml_path = destination_plugin_yaml_path[1:] local_plugin_yaml_path = \ _get_resource_into_dir(temp_dir, source_plugin_yaml_path, retries, wait_interval, timeout) fab_env = copy.deepcopy(fabric_env) fab_env['abort_exception'] = RecoverableError with fabric.settings(**fab_env): remote_plugin_yaml_file_path = \ "{0}{1}".format(remote_plugins_folder, destination_plugin_yaml_path) upload_dsl_resource(local_path=local_plugin_yaml_path, remote_path=remote_plugin_yaml_file_path)
def recommend(possible_solutions): from cloudify_cli.logger import get_logger logger = get_logger() logger.info('Possible solutions:') for solution in possible_solutions: logger.info(' - {0}'.format(solution))
def upload(snapshot_path, snapshot_id): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info("Uploading snapshot '{0}' to management server {1}".format( snapshot_path.name, management_ip)) client = utils.get_rest_client(management_ip) snapshot = client.snapshots.upload(snapshot_path.name, snapshot_id) logger.info("Uploaded snapshot with id: {0}".format(snapshot.id))
def delete(deployment_id, ignore_live_nodes): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info('Deleting deployment {0} from management server {1}' .format(deployment_id, management_ip)) client = utils.get_rest_client(management_ip) client.deployments.delete(deployment_id, ignore_live_nodes) logger.info("Deleted deployment successfully")
def delete(blueprint_id): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info('Deleting blueprint {0} from management server {1}' .format(blueprint_id, management_ip)) client = utils.get_rest_client(management_ip) client.blueprints.delete(blueprint_id) logger.info('Deleted blueprint successfully')
def delete(snapshot_id): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info("Deleting snapshot '{0}' from management server {1}".format( snapshot_id, management_ip)) client = utils.get_rest_client(management_ip) client.snapshots.delete(snapshot_id) logger.info('Deleted snapshot successfully')
def delete(blueprint_id): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info('Deleting blueprint {0} from management server {1}'.format( blueprint_id, management_ip)) client = utils.get_rest_client(management_ip) client.blueprints.delete(blueprint_id) logger.info('Deleted blueprint successfully')
def delete(plugin_id): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip) logger.info(messages.PLUGIN_DELETE.format(plugin_id, management_ip)) client.plugins.delete(plugin_id) logger.info(messages.PLUGIN_DELETE_SUCCEEDED.format(plugin_id))
def ls(): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip) logger.info( 'Retrieving snapshots list... [manager={0}]'.format(management_ip)) pt = utils.table(['id', 'created_at', 'status', 'error'], data=client.snapshots.list()) print_table('Snapshots:', pt)
def create(snapshot_id, include_metrics, exclude_credentials): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info("Creating snapshot '{0}' to management server {1}".format( snapshot_id, management_ip)) client = utils.get_rest_client(management_ip) execution = client.snapshots.create(snapshot_id, include_metrics, not exclude_credentials) logger.info('Started workflow\'s execution id: {0}'.format(execution.id))
def restore(snapshot_id, without_deployments_envs, force): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info("Restoring snapshot '{0}' at management server {1}".format( snapshot_id, management_ip)) client = utils.get_rest_client(management_ip) execution = client.snapshots.restore(snapshot_id, not without_deployments_envs, force) logger.info('Started workflow\'s execution id: {0}'.format(execution.id))
def download(plugin_id, output): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info(messages.DOWNLOADING_PLUGIN.format(plugin_id)) client = utils.get_rest_client(management_ip) target_file = client.plugins.download(plugin_id, output) logger.info(messages.DOWNLOADING_PLUGIN_SUCCEEDED.format(plugin_id, target_file))
def download(blueprint_id, output): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info(messages.DOWNLOADING_BLUEPRINT.format(blueprint_id)) client = utils.get_rest_client(management_ip) target_file = client.blueprints.download(blueprint_id, output) logger.info( messages.DOWNLOADING_BLUEPRINT_SUCCEEDED.format( blueprint_id, target_file))
def _update_local_provider_context(management_ip): logger = get_logger() try: use(management_ip, utils.get_rest_port()) except BaseException as e: logger.warning('Failed retrieving provider context: {0}. This ' 'may cause a leaking management server ' 'in case it has gone through a ' 'recovery process'.format(str(e)))
def download(snapshot_id, output): logger = get_logger() management_ip = utils.get_management_server_ip() logger.info("Downloading snapshot '{0}'... [manager={1}]".format( snapshot_id, management_ip)) client = utils.get_rest_client(management_ip) target_file = client.snapshots.download(snapshot_id, output) logger.info( "Snapshot '{0}' has been downloaded successfully as '{1}'".format( snapshot_id, target_file))
def get(plugin_id): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip) logger.info(messages.PLUGINS_GET.format(plugin_id, management_ip)) plugin = client.plugins.get(plugin_id, _include=fields) pt = utils.table(fields, data=[plugin]) print_table('Plugin:', pt)
def ls(): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip) logger.info(messages.PLUGINS_LIST.format(management_ip)) plugins = client.plugins.list(_include=fields) pt = utils.table(fields, data=plugins) print_table('Plugins:', pt)