def init(blueprint_path, inputs, install_plugins): if os.path.isdir(_storage_dir()): shutil.rmtree(_storage_dir()) if not utils.is_initialized(): cfy_init(reset_config=False, skip_logging=True) try: common.initialize_blueprint( blueprint_path=blueprint_path, name=_NAME, inputs=inputs, storage=_storage(), install_plugins=install_plugins, resolver=utils.get_import_resolver() ) except ImportError as e: # import error indicates # some plugin modules are missing # TODO - consider adding an error code to # TODO - all of our exceptions. so that we # TODO - easily identify them here e.possible_solutions = [ "Run `cfy local init --install-plugins -p {0}`" .format(blueprint_path), "Run `cfy local install-plugins -p {0}`" .format(blueprint_path) ] raise get_logger().info("Initiated {0}\nIf you make changes to the " "blueprint, run `cfy local init -p {0}` " "again to apply them".format(blueprint_path))
def init(blueprint_path, inputs, install_plugins_): if os.path.isdir(_storage_dir()): shutil.rmtree(_storage_dir()) try: common.initialize_blueprint( blueprint_path=blueprint_path, name=_NAME, inputs=inputs, storage=_storage(), install_plugins=install_plugins_ ) except ImportError as e: # import error indicates # some plugin modules are missing # TODO - consider adding an error code to # TODO - all of our exceptions. so that we # TODO - easily identify them here e.possible_solutions = [ "Run 'cfy local init --install-plugins -p {0}'" .format(blueprint_path), "Run 'cfy local install-plugins -p {0}'" .format(blueprint_path) ] raise get_logger().info("Initiated {0}\nIf you make changes to the " "blueprint, " "run 'cfy local init -p {0}' " "again to apply them" .format(blueprint_path))
def init(blueprint_path, inputs, install_plugins_): if os.path.isdir(_storage_dir()): shutil.rmtree(_storage_dir()) try: common.initialize_blueprint(blueprint_path=blueprint_path, name=_NAME, inputs=inputs, storage=_storage(), install_plugins=install_plugins_) except ImportError as e: # import error indicates # some plugin modules are missing # TODO - consider adding an error code to # TODO - all of our exceptions. so that we # TODO - easily identify them here e.possible_solutions = [ "Run 'cfy local init --install-plugins -p {0}'".format( blueprint_path), "Run 'cfy local install-plugins -p {0}'".format(blueprint_path) ] raise get_logger().info("Initiated {0}\nIf you make changes to the " "blueprint, " "run 'cfy local init -p {0}' " "again to apply them".format(blueprint_path))
def bootstrap_validation(blueprint_path, name='manager', inputs=None, task_retries=5, task_retry_interval=30, task_thread_pool_size=1, install_plugins=False): try: env = common.initialize_blueprint(blueprint_path, name=name, inputs=inputs, storage=None, install_plugins=install_plugins) except ImportError as e: e.possible_solutions = [ "Run 'cfy local install-plugins -p {0}'".format(blueprint_path), "Run 'cfy bootstrap --install-plugins -p {0}'".format( blueprint_path) ] raise env.execute( workflow='execute_operation', parameters={'operation': 'cloudify.interfaces.validation.creation'}, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size)
def bootstrap_validation(blueprint_path, name='manager', inputs=None, task_retries=5, task_retry_interval=30, task_thread_pool_size=1, install_plugins=False, resolver=None): try: env = common.initialize_blueprint( blueprint_path, name=name, inputs=inputs, storage=None, install_plugins=install_plugins, resolver=resolver ) except ImportError as e: e.possible_solutions = [ "Run 'cfy local install-plugins -p {0}'" .format(blueprint_path), "Run 'cfy bootstrap --install-plugins -p {0}'" .format(blueprint_path) ] raise env.execute(workflow='execute_operation', parameters={'operation': 'cloudify.interfaces.validation.creation'}, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size)
def bootstrap(blueprint_path, name='manager', inputs=None, task_retries=5, task_retry_interval=30, task_thread_pool_size=1, install_plugins=False): storage = local.FileStorage(storage_dir=_workdir()) try: env = common.initialize_blueprint( blueprint_path, name=name, inputs=inputs, storage=storage, install_plugins=install_plugins ) except ImportError as e: e.possible_solutions = [ "Run 'cfy local install-plugins -p {0}'" .format(blueprint_path), "Run 'cfy bootstrap --install-plugins -p {0}'" .format(blueprint_path) ] raise env.execute(workflow='install', task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) node_instances = env.storage.get_node_instances() nodes_by_id = {node.id: node for node in env.storage.get_nodes()} manager_node_instance = \ next(node_instance for node_instance in node_instances if 'cloudify.nodes.CloudifyManager' in nodes_by_id[node_instance.node_id].type_hierarchy) provider_context = \ manager_node_instance.runtime_properties[PROVIDER_RUNTIME_PROPERTY] manager_ip = \ manager_node_instance.runtime_properties[MANAGER_IP_RUNTIME_PROPERTY] manager_user = \ manager_node_instance.runtime_properties[MANAGER_USER_RUNTIME_PROPERTY] manager_key_path = manager_node_instance.runtime_properties[ MANAGER_KEY_PATH_RUNTIME_PROPERTY] rest_port = \ manager_node_instance.runtime_properties[REST_PORT] protocol = constants.SECURED_PROTOCOL \ if rest_port == constants.SECURED_REST_PORT \ else constants.DEFAULT_PROTOCOL return { 'provider_name': 'provider', 'provider_context': provider_context, 'manager_ip': manager_ip, 'manager_user': manager_user, 'manager_key_path': manager_key_path, 'rest_port': rest_port, 'protocol': protocol }
def bootstrap(blueprint_path, name='manager', inputs=None, task_retries=5, task_retry_interval=30, task_thread_pool_size=1, install_plugins=False): storage = local.FileStorage(storage_dir=_workdir()) try: env = common.initialize_blueprint(blueprint_path, name=name, inputs=inputs, storage=storage, install_plugins=install_plugins) except ImportError as e: e.possible_solutions = [ "Run 'cfy local install-plugins -p {0}'".format(blueprint_path), "Run 'cfy bootstrap --install-plugins -p {0}'".format( blueprint_path) ] raise env.execute(workflow='install', task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) node_instances = env.storage.get_node_instances() nodes_by_id = {node.id: node for node in env.storage.get_nodes()} manager_node_instance = \ next(node_instance for node_instance in node_instances if 'cloudify.nodes.CloudifyManager' in nodes_by_id[node_instance.node_id].type_hierarchy) provider_context = \ manager_node_instance.runtime_properties[PROVIDER_RUNTIME_PROPERTY] manager_ip = \ manager_node_instance.runtime_properties[MANAGER_IP_RUNTIME_PROPERTY] manager_user = \ manager_node_instance.runtime_properties[MANAGER_USER_RUNTIME_PROPERTY] manager_key_path = manager_node_instance.runtime_properties[ MANAGER_KEY_PATH_RUNTIME_PROPERTY] return { 'provider_name': 'provider', 'provider_context': provider_context, 'manager_ip': manager_ip, 'manager_user': manager_user, 'manager_key_path': manager_key_path }
def rollback(blueprint_path, inputs, install_plugins, task_retries, task_retry_interval): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip, skip_version_check=True) verify_and_wait_for_maintenance_mode_activation(client) inputs = update_inputs(inputs) env_name = 'manager-rollback' # init local workflow execution environment env = common.initialize_blueprint(blueprint_path, storage=None, install_plugins=install_plugins, name=env_name, inputs=json.dumps(inputs)) logger.info('Starting Manager rollback process...') put_workflow_state_file(is_upgrade=False, key_filename=inputs['ssh_key_filename'], user=inputs['ssh_user'], port=inputs['ssh_port']) logger.info('Executing Manager rollback...') try: env.execute('install', task_retries=task_retries, task_retry_interval=task_retry_interval) except Exception as e: msg = 'Failed to rollback Manager upgrade. Error: {0}'.format(e) raise exceptions.CloudifyCliError(msg) logger.info('Rollback complete. Management server is up at {0}' .format(inputs['public_ip']))
def bootstrap_validation(blueprint_path, name='manager', inputs=None, task_retries=5, task_retry_interval=30, task_thread_pool_size=1, install_plugins=False, resolver=None): validate_manager_deployment_size(blueprint_path=blueprint_path) try: env = common.initialize_blueprint( blueprint_path, name=name, inputs=inputs, storage=None, install_plugins=install_plugins, resolver=resolver ) except ImportError as e: e.possible_solutions = [ "Run 'cfy local install-plugins -p {0}'" .format(blueprint_path), "Run 'cfy bootstrap --install-plugins -p {0}'" .format(blueprint_path) ] raise manager_config_node = env.storage.get_node('manager_configuration') security_enabled = manager_config_node.properties['security']['enabled'] if security_enabled: _validate_credentials_are_set() env.execute(workflow='execute_operation', parameters={'operation': 'cloudify.interfaces.validation.creation'}, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size)
def bootstrap(blueprint_path, name='manager', inputs=None, task_retries=5, task_retry_interval=30, task_thread_pool_size=1, install_plugins=False): def get_protocol(rest_port): return constants.SECURED_PROTOCOL \ if str(rest_port) == str(constants.SECURED_REST_PORT) \ else constants.DEFAULT_PROTOCOL storage = local.FileStorage(storage_dir=_workdir()) try: env = common.initialize_blueprint( blueprint_path, name=name, inputs=inputs, storage=storage, install_plugins=install_plugins, resolver=utils.get_import_resolver() ) except ImportError as e: e.possible_solutions = [ "Run 'cfy local install-plugins -p {0}'" .format(blueprint_path), "Run 'cfy bootstrap --install-plugins -p {0}'" .format(blueprint_path) ] raise env.execute(workflow='install', task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) nodes = env.storage.get_nodes() node_instances = env.storage.get_node_instances() nodes_by_id = {node.id: node for node in nodes} try: manager_node_instance = \ next(node_instance for node_instance in node_instances if 'cloudify.nodes.CloudifyManager' in nodes_by_id[node_instance.node_id].type_hierarchy) except Exception: manager_node_instance = \ next(node_instance for node_instance in node_instances if 'cloudify.nodes.MyCloudifyManager' in nodes_by_id[node_instance.node_id].type_hierarchy) manager_node = nodes_by_id['manager_configuration'] if manager_node_instance.runtime_properties.get('provider'): provider_context = \ manager_node_instance.runtime_properties[ PROVIDER_RUNTIME_PROPERTY] manager_ip = \ manager_node_instance.runtime_properties[ MANAGER_IP_RUNTIME_PROPERTY] manager_user = \ manager_node_instance.runtime_properties[ MANAGER_USER_RUNTIME_PROPERTY] manager_key_path = manager_node_instance.runtime_properties[ MANAGER_KEY_PATH_RUNTIME_PROPERTY] rest_port = \ manager_node_instance.runtime_properties[REST_PORT] else: manager_ip = env.outputs()['manager_ip'] manager_user = manager_node.properties['ssh_user'] manager_key_path = manager_node.properties['ssh_key_filename'] rest_port = manager_node_instance.runtime_properties[REST_PORT] fabric_env = { "host_string": manager_ip, "user": manager_user, "key_filename": manager_key_path } agent_remote_key_path = _handle_agent_key_file(fabric_env, manager_node) provider_context = _handle_provider_context( agent_remote_key_path=agent_remote_key_path, fabric_env=fabric_env, manager_node=manager_node, manager_node_instance=manager_node_instance) _upload_resources(manager_node, fabric_env, manager_ip, rest_port, get_protocol(rest_port)) protocol = get_protocol(rest_port) return { 'provider_name': 'provider', 'provider_context': provider_context, 'manager_ip': manager_ip, 'manager_user': manager_user, 'manager_key_path': manager_key_path, 'rest_port': rest_port, 'protocol': protocol }
def upgrade(validate_only, skip_validations, blueprint_path, inputs, install_plugins, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() management_ip = utils.get_management_server_ip() client = utils.get_rest_client(management_ip, skip_version_check=True) verify_and_wait_for_maintenance_mode_activation(client) inputs = update_inputs(inputs) env_name = 'manager-upgrade' # init local workflow execution environment env = common.initialize_blueprint(blueprint_path, storage=None, install_plugins=install_plugins, name=env_name, inputs=json.dumps(inputs)) logger.info('Upgrading manager...') put_workflow_state_file(is_upgrade=True, key_filename=inputs['ssh_key_filename'], user=inputs['ssh_user']) if not skip_validations: logger.info('Executing upgrade validations...') env.execute(workflow='execute_operation', parameters={'operation': 'cloudify.interfaces.validation.creation'}, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) logger.info('Upgrade validation completed successfully') if not validate_only: try: logger.info('Executing manager upgrade...') env.execute('install', task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) except Exception as e: msg = 'Upgrade failed! ({0})'.format(e) error = exceptions.CloudifyCliError(msg) error.possible_solutions = [ "Rerun upgrade: `cfy upgrade`", "Execute rollback: `cfy rollback`" ] raise error manager_node = next(node for node in env.storage.get_nodes() if node.id == 'manager_configuration') upload_resources = \ manager_node.properties['cloudify'].get('upload_resources', {}) dsl_resources = upload_resources.get('dsl_resources', ()) if dsl_resources: fetch_timeout = upload_resources.get('parameters', {}) \ .get('fetch_timeout', 30) fabric_env = bs.build_fabric_env(management_ip, inputs['ssh_user'], inputs['ssh_key_filename']) temp_dir = tempfile.mkdtemp() try: logger.info('Uploading dsl resources...') bs.upload_dsl_resources(dsl_resources, temp_dir=temp_dir, fabric_env=fabric_env, retries=task_retries, wait_interval=task_retry_interval, timeout=fetch_timeout) finally: shutil.rmtree(temp_dir, ignore_errors=True) plugin_resources = upload_resources.get('plugin_resources', ()) if plugin_resources: logger.warn('Plugins upload is not supported for upgrade. Plugins ' '{0} will not be uploaded' .format(plugin_resources)) logger.info('Upgrade complete') logger.info('Manager is up at {0}'.format( utils.get_management_server_ip()))
def bootstrap(blueprint_path, name='manager', inputs=None, task_retries=5, task_retry_interval=30, task_thread_pool_size=1, install_plugins=False): storage = local.FileStorage(storage_dir=_workdir()) try: env = common.initialize_blueprint( blueprint_path, name=name, inputs=inputs, storage=storage, install_plugins=install_plugins, resolver=utils.get_import_resolver() ) except ImportError as e: e.possible_solutions = [ "Run 'cfy local install-plugins -p {0}'" .format(blueprint_path), "Run 'cfy bootstrap --install-plugins -p {0}'" .format(blueprint_path) ] raise env.execute(workflow='install', task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) nodes = env.storage.get_nodes() node_instances = env.storage.get_node_instances() nodes_by_id = {node.id: node for node in nodes} try: manager_node_instance = \ next(node_instance for node_instance in node_instances if 'cloudify.nodes.CloudifyManager' in nodes_by_id[node_instance.node_id].type_hierarchy) except Exception: manager_node_instance = \ next(node_instance for node_instance in node_instances if 'cloudify.nodes.MyCloudifyManager' in nodes_by_id[node_instance.node_id].type_hierarchy) manager_node = nodes_by_id['manager_configuration'] rest_port = manager_node_instance.runtime_properties[ constants.REST_PORT_RUNTIME_PROPERTY] rest_protocol = manager_node_instance.runtime_properties[ constants.REST_PROTOCOL_RUNTIME_PROPERTY] if manager_node_instance.runtime_properties.get('provider'): provider_context = \ manager_node_instance.runtime_properties[ PROVIDER_RUNTIME_PROPERTY] manager_ip = \ manager_node_instance.runtime_properties[ MANAGER_IP_RUNTIME_PROPERTY] manager_user = \ manager_node_instance.runtime_properties[ MANAGER_USER_RUNTIME_PROPERTY] manager_port = \ manager_node_instance.runtime_properties[ MANAGER_PORT_RUNTIME_PROPERTY] manager_key_path = manager_node_instance.runtime_properties[ MANAGER_KEY_PATH_RUNTIME_PROPERTY] else: manager_ip = env.outputs()['manager_ip'] manager_user = manager_node.properties['ssh_user'] manager_port = manager_node.properties['ssh_port'] manager_key_path = manager_node.properties['ssh_key_filename'] fabric_env = build_fabric_env( manager_ip, manager_user, manager_key_path, manager_port) agent_remote_key_path = _handle_agent_key_file(fabric_env, manager_node) # dump public rest certificate to a local file for future # communication with the rest server rest_public_cert = env.outputs()['rest_server_public_certificate'] if rest_public_cert: cert_path = utils.get_default_rest_cert_local_path() with open(cert_path, 'w') as cert_file: cert_file.write(rest_public_cert) rest_client = utils.get_rest_client(rest_host=manager_ip, rest_port=rest_port, rest_protocol=rest_protocol, username=utils.get_username(), password=utils.get_password(), skip_version_check=True) provider_context = _handle_provider_context( rest_client=rest_client, remote_agents_private_key_path=agent_remote_key_path, manager_node=manager_node, manager_node_instance=manager_node_instance) _upload_resources(manager_node, fabric_env, rest_client, task_retries, task_retry_interval) _perform_sanity(env=env, manager_ip=manager_ip, fabric_env=fabric_env, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) return { 'provider_name': 'provider', 'provider_context': provider_context, 'manager_ip': manager_ip, 'manager_user': manager_user, 'manager_port': manager_port, 'manager_key_path': manager_key_path, 'rest_port': rest_port, 'rest_protocol': rest_protocol }