def _uninstall_mpack(mpack_name, mpack_version): """ Uninstall specific management pack :param mpack_name: Management pack name :param mpack_version: Management pack version """ print_info_msg("Uninstalling management pack {0}-{1}".format(mpack_name, mpack_version)) # Get ambari mpack properties stack_location, extension_location, service_definitions_location, mpacks_staging_location, dashboard_location = get_mpack_properties() found = False if os.path.exists(mpacks_staging_location) and os.path.isdir(mpacks_staging_location): staged_mpack_dirs = sorted(os.listdir(mpacks_staging_location)) for dir in staged_mpack_dirs: if dir == MPACKS_CACHE_DIRNAME: continue staged_mpack_dir = os.path.join(mpacks_staging_location, dir) if os.path.isdir(staged_mpack_dir): staged_mpack_metadata = read_mpack_metadata(staged_mpack_dir) if not staged_mpack_metadata: print_error_msg("Skipping malformed management pack {0}-{1}. Metadata file missing!".format( staged_mpack_name, staged_mpack_version)) continue staged_mpack_name = staged_mpack_metadata.name staged_mpack_version = staged_mpack_metadata.version if mpack_name == staged_mpack_name and compare_versions(staged_mpack_version, mpack_version, format=True) == 0: print_info_msg("Removing management pack staging location {0}".format(staged_mpack_dir)) sudo.rmtree(staged_mpack_dir) remove_symlinks(stack_location, extension_location, service_definitions_location, dashboard_location, staged_mpack_dir) found = True break if not found: print_error_msg("Management pack {0}-{1} is not installed!".format(mpack_name, mpack_version)) else: print_info_msg("Management pack {0}-{1} successfully uninstalled!".format(mpack_name, mpack_version))
def expand_mpack(archive_path): """ Expand management pack :param archive_path: Local path to management pack :return: Path where the management pack was expanded """ tmpdir = _get_temp_dir() archive_root_dir = get_archive_root_dir(archive_path) if not archive_root_dir: print_error_msg("Malformed management pack. Root directory missing!") raise FatalException( -1, 'Malformed management pack. Root directory missing!') # Expand management pack in temp directory tmp_root_dir = os.path.join(tmpdir, archive_root_dir) print_info_msg( "Expand management pack at temp location {0}".format(tmp_root_dir)) if os.path.exists(tmp_root_dir): sudo.rmtree(tmp_root_dir) extract_archive(archive_path, tmpdir) if not os.path.exists(tmp_root_dir): print_error_msg( "Malformed management pack. Failed to expand management pack!") raise FatalException( -1, 'Malformed management pack. Failed to expand management pack!') return tmp_root_dir
def action_delete(self): path = self.resource.path if sudo.path_exists(path): if not sudo.path_isdir(path): raise Fail("Applying %s failed, %s is not a directory" % (self.resource, path)) Logger.info("Removing directory %s and all its content" % self.resource) sudo.rmtree(path)
def purge_stacks_and_mpacks(purge_list, replay_mode=False): """ Purge all stacks and management packs :param replay_mode: Flag to indicate if purging in replay mode """ # Get ambari mpacks config properties stack_location, extension_location, service_definitions_location, mpacks_staging_location, dashboard_location = get_mpack_properties() print_info_msg("Purging existing stack definitions and management packs") if not purge_list: print_info_msg("Nothing to purge") return # Don't delete default stack_advisor.py (stacks/stack_advisor.py) if STACK_DEFINITIONS_RESOURCE_NAME in purge_list and os.path.exists(stack_location): print_info_msg("Purging stack location: " + stack_location) for file in sorted(os.listdir(stack_location)): path = os.path.join(stack_location, file) if(os.path.isdir(path)): sudo.rmtree(path) if EXTENSION_DEFINITIONS_RESOURCE_NAME in purge_list and os.path.exists(extension_location): print_info_msg("Purging extension location: " + extension_location) sudo.rmtree(extension_location) if SERVICE_DEFINITIONS_RESOURCE_NAME in purge_list and os.path.exists(service_definitions_location): print_info_msg("Purging service definitions location: " + service_definitions_location) sudo.rmtree(service_definitions_location) # Don't purge mpacks staging directory in replay mode if MPACKS_RESOURCE_NAME in purge_list and not replay_mode and os.path.exists(mpacks_staging_location): print_info_msg("Purging mpacks staging location: " + mpacks_staging_location) sudo.rmtree(mpacks_staging_location) sudo.makedir(mpacks_staging_location, 0755)
def main(): properties = get_ambari_properties() if properties == -1: print >> sys.stderr, "Error getting ambari properties" return -1 resources_location = get_resources_location(properties) views_dir = os.path.join(resources_location, "views") for file in os.listdir(views_dir): path = os.path.join(views_dir, file) if os.path.isfile(path): if "ambari-admin" in path or "storm-view" in path: print "Keeping views jar : " + path else: print "Deleting views jar : " + path sudo.unlink(path) else: print "Deleting views directory : " + path sudo.rmtree(path) return 0
def purge_stacks_and_mpacks(): """ Purge all stacks and management packs """ # Get ambari mpacks config properties stack_location, service_definitions_location, mpacks_staging_location = get_mpack_properties( ) print_info_msg("Purging existing stack definitions and management packs") if os.path.exists(stack_location): print_info_msg("Purging stack location: " + stack_location) sudo.rmtree(stack_location) if os.path.exists(service_definitions_location): print_info_msg("Purging service definitions location: " + service_definitions_location) sudo.rmtree(service_definitions_location) if os.path.exists(mpacks_staging_location): print_info_msg("Purging mpacks staging location: " + mpacks_staging_location) sudo.rmtree(mpacks_staging_location) sudo.makedir(mpacks_staging_location, 0755)
def _prepare_tez_tarball(): """ Prepares the Tez tarball by adding the Hadoop native libraries found in the mapreduce tarball. It's very important to use the version of mapreduce which matches tez here. Additionally, this will also copy native LZO to the tez tarball if LZO is enabled and the GPL license has been accepted. :return: the full path of the newly created tez tarball to use """ import tempfile Logger.info("Preparing the Tez tarball...") # get the mapreduce tarball which matches the version of tez # tez installs the mapreduce tar, so it should always be present _, mapreduce_source_file, _, _ = get_tarball_paths("mapreduce") _, tez_source_file, _, _ = get_tarball_paths("tez") temp_dir = Script.get_tmp_dir() # create the temp staging directories ensuring that non-root agents using tarfile can work with them mapreduce_temp_dir = tempfile.mkdtemp(prefix="mapreduce-tarball-", dir=temp_dir) tez_temp_dir = tempfile.mkdtemp(prefix="tez-tarball-", dir=temp_dir) sudo.chmod(mapreduce_temp_dir, 0777) sudo.chmod(tez_temp_dir, 0777) Logger.info("Extracting {0} to {1}".format(mapreduce_source_file, mapreduce_temp_dir)) tar_archive.extract_archive(mapreduce_source_file, mapreduce_temp_dir) Logger.info("Extracting {0} to {1}".format(tez_source_file, tez_temp_dir)) tar_archive.untar_archive(tez_source_file, tez_temp_dir) hadoop_lib_native_dir = os.path.join(mapreduce_temp_dir, "hadoop", "lib", "native") tez_lib_dir = os.path.join(tez_temp_dir, "lib") if not os.path.exists(hadoop_lib_native_dir): raise Fail( "Unable to seed the Tez tarball with native libraries since the source Hadoop native lib directory {0} does not exist" .format(hadoop_lib_native_dir)) if not os.path.exists(tez_lib_dir): raise Fail( "Unable to seed the Tez tarball with native libraries since the target Tez lib directory {0} does not exist" .format(tez_lib_dir)) # copy native libraries from hadoop to tez Execute(("cp", "-a", hadoop_lib_native_dir, tez_lib_dir), sudo=True) # if enabled, LZO GPL libraries must be copied as well if lzo_utils.should_install_lzo(): stack_root = Script.get_stack_root() service_version = component_version.get_component_repository_version( service_name="TEZ") # some installations might not have Tez, but MapReduce2 should be a fallback to get the LZO libraries from if service_version is None: Logger.warning( "Tez does not appear to be installed, using the MapReduce version to get the LZO libraries" ) service_version = component_version.get_component_repository_version( service_name="MAPREDUCE2") hadoop_lib_native_lzo_dir = os.path.join(stack_root, service_version, "hadoop", "lib", "native") if not sudo.path_isdir(hadoop_lib_native_lzo_dir): Logger.warning( "Unable to located native LZO libraries at {0}, falling back to hadoop home" .format(hadoop_lib_native_lzo_dir)) hadoop_lib_native_lzo_dir = os.path.join(stack_root, "current", "hadoop-client", "lib", "native") if not sudo.path_isdir(hadoop_lib_native_lzo_dir): raise Fail( "Unable to seed the Tez tarball with native libraries since LZO is enabled but the native LZO libraries could not be found at {0}" .format(hadoop_lib_native_lzo_dir)) Execute(("cp", "-a", hadoop_lib_native_lzo_dir, tez_lib_dir), sudo=True) # ensure that the tez/lib directory is readable by non-root (which it typically is not) Directory(tez_lib_dir, mode=0755, cd_access='a', recursive_ownership=True) # create the staging directory so that non-root agents can write to it tez_native_tarball_staging_dir = os.path.join( temp_dir, "tez-native-tarball-staging") if not os.path.exists(tez_native_tarball_staging_dir): Directory(tez_native_tarball_staging_dir, mode=0777, cd_access='a', create_parents=True, recursive_ownership=True) tez_tarball_with_native_lib = os.path.join(tez_native_tarball_staging_dir, "tez-native.tar.gz") Logger.info("Creating a new Tez tarball at {0}".format( tez_tarball_with_native_lib)) # tar up Tez, making sure to specify nothing for the arcname so that it does not include an absolute path with closing(tarfile.open(tez_tarball_with_native_lib, "w:gz")) as new_tez_tarball: new_tez_tarball.add(tez_temp_dir, arcname=os.path.sep) # ensure that the tarball can be read and uploaded sudo.chmod(tez_tarball_with_native_lib, 0744) # cleanup sudo.rmtree(mapreduce_temp_dir) sudo.rmtree(tez_temp_dir) return tez_tarball_with_native_lib
def _prepare_mapreduce_tarball(): """ Prepares the mapreduce tarball by including the native LZO libraries if necessary. If LZO is not enabled or has not been opted-in, then this will do nothing and return the original tarball to upload to HDFS. :return: the full path of the newly created mapreduce tarball to use or the original path if no changes were made """ # get the mapreduce tarball to crack open and add LZO libraries to _, mapreduce_source_file, _, _ = get_tarball_paths("mapreduce") if not lzo_utils.should_install_lzo(): return mapreduce_source_file Logger.info("Preparing the mapreduce tarball with native LZO libraries...") temp_dir = Script.get_tmp_dir() # create the temp staging directories ensuring that non-root agents using tarfile can work with them mapreduce_temp_dir = tempfile.mkdtemp(prefix="mapreduce-tarball-", dir=temp_dir) sudo.chmod(mapreduce_temp_dir, 0777) # calculate the source directory for LZO hadoop_lib_native_source_dir = os.path.join( os.path.dirname(mapreduce_source_file), "lib", "native") if not sudo.path_exists(hadoop_lib_native_source_dir): raise Fail( "Unable to seed the mapreduce tarball with native LZO libraries since the source Hadoop native lib directory {0} does not exist" .format(hadoop_lib_native_source_dir)) Logger.info("Extracting {0} to {1}".format(mapreduce_source_file, mapreduce_temp_dir)) tar_archive.extract_archive(mapreduce_source_file, mapreduce_temp_dir) mapreduce_lib_dir = os.path.join(mapreduce_temp_dir, "hadoop", "lib") # copy native libraries from source hadoop to target Execute(("cp", "-af", hadoop_lib_native_source_dir, mapreduce_lib_dir), sudo=True) # ensure that the hadoop/lib/native directory is readable by non-root (which it typically is not) Directory(mapreduce_lib_dir, mode=0755, cd_access='a', recursive_ownership=True) # create the staging directory so that non-root agents can write to it mapreduce_native_tarball_staging_dir = os.path.join( temp_dir, "mapreduce-native-tarball-staging") if not os.path.exists(mapreduce_native_tarball_staging_dir): Directory(mapreduce_native_tarball_staging_dir, mode=0777, cd_access='a', create_parents=True, recursive_ownership=True) mapreduce_tarball_with_native_lib = os.path.join( mapreduce_native_tarball_staging_dir, "mapreduce-native.tar.gz") Logger.info("Creating a new mapreduce tarball at {0}".format( mapreduce_tarball_with_native_lib)) # tar up mapreduce, making sure to specify nothing for the arcname so that it does not include an absolute path with closing(tarfile.open(mapreduce_tarball_with_native_lib, "w:gz")) as new_tarball: new_tarball.add(mapreduce_temp_dir, arcname=os.path.sep) # ensure that the tarball can be read and uploaded sudo.chmod(mapreduce_tarball_with_native_lib, 0744) # cleanup sudo.rmtree(mapreduce_temp_dir) return mapreduce_tarball_with_native_lib
def _install_mpack(options, replay_mode=False, is_upgrade=False): """ Install management pack :param options: Command line options :param replay_mode: Flag to indicate if executing command in replay mode """ mpack_path = options.mpack_path if not mpack_path: print_error_msg("Management pack not specified!") raise FatalException(-1, 'Management pack not specified!') print_info_msg("Installing management pack {0}".format(mpack_path)) # Download management pack to a temp location tmp_archive_path = download_mpack(mpack_path) if not (tmp_archive_path and os.path.exists(tmp_archive_path)): print_error_msg("Management pack could not be downloaded!") raise FatalException(-1, 'Management pack could not be downloaded!') # Expand management pack in temp directory tmp_root_dir = expand_mpack(tmp_archive_path) # Read mpack metadata mpack_metadata = read_mpack_metadata(tmp_root_dir) if not mpack_metadata: raise FatalException( -1, 'Malformed management pack {0}. Metadata file missing!'.format( mpack_path)) # Validate management pack prerequisites # Skip validation in replay mode if not replay_mode: validate_mpack_prerequisites(mpack_metadata) if is_upgrade: # Execute pre upgrade hook _execute_hook(mpack_metadata, BEFORE_UPGRADE_HOOK_NAME, tmp_root_dir) else: # Execute pre install hook _execute_hook(mpack_metadata, BEFORE_INSTALL_HOOK_NAME, tmp_root_dir) # Purge previously installed stacks and management packs if options.purge and options.purge_list: purge_stacks_and_mpacks(options.purge_list.split(","), replay_mode) # Get ambari mpack properties stack_location, extension_location, service_definitions_location, mpacks_staging_location = get_mpack_properties( ) mpacks_cache_location = os.path.join(mpacks_staging_location, MPACKS_CACHE_DIRNAME) # Create directories if not os.path.exists(stack_location): sudo.makedir(stack_location, 0755) if not os.path.exists(extension_location): sudo.makedir(extension_location, 0755) if not os.path.exists(service_definitions_location): sudo.makedir(service_definitions_location, 0755) if not os.path.exists(mpacks_staging_location): sudo.makedir(mpacks_staging_location, 0755) if not os.path.exists(mpacks_cache_location): sudo.makedir(mpacks_cache_location, 0755) # Stage management pack (Stage at /var/lib/ambari-server/resources/mpacks/mpack_name-mpack_version) mpack_name = mpack_metadata.name mpack_version = mpack_metadata.version mpack_dirname = mpack_name + "-" + mpack_version mpack_staging_dir = os.path.join(mpacks_staging_location, mpack_dirname) mpack_archive_path = os.path.join(mpacks_cache_location, os.path.basename(tmp_archive_path)) print_info_msg( "Stage management pack {0}-{1} to staging location {2}".format( mpack_name, mpack_version, mpack_staging_dir)) if os.path.exists(mpack_staging_dir): if options.force: print_info_msg( "Force removing previously installed management pack from {0}". format(mpack_staging_dir)) sudo.rmtree(mpack_staging_dir) else: error_msg = "Management pack {0}-{1} already installed!".format( mpack_name, mpack_version) print_error_msg(error_msg) raise FatalException(-1, error_msg) shutil.move(tmp_root_dir, mpack_staging_dir) shutil.move(tmp_archive_path, mpack_archive_path) # Process setup steps for all artifacts (stack-definitions, extension-definitions, # service-definitions, stack-addon-service-definitions) in the management pack for artifact in mpack_metadata.artifacts: # Artifact name (Friendly name) artifact_name = artifact.name # Artifact type (stack-definitions, extension-definitions, service-definitions, etc) artifact_type = artifact.type # Artifact directory with contents of the artifact artifact_source_dir = os.path.join(mpack_staging_dir, artifact.source_dir) print_info_msg("Processing artifact {0} of type {1} in {2}".format( artifact_name, artifact_type, artifact_source_dir)) if artifact.type == STACK_DEFINITIONS_ARTIFACT_NAME: process_stack_definitions_artifact(artifact, artifact_source_dir, options) elif artifact.type == EXTENSION_DEFINITIONS_ARTIFACT_NAME: process_extension_definitions_artifact(artifact, artifact_source_dir, options) elif artifact.type == SERVICE_DEFINITIONS_ARTIFACT_NAME: process_service_definitions_artifact(artifact, artifact_source_dir, options) elif artifact.type == STACK_ADDON_SERVICE_DEFINITIONS_ARTIFACT_NAME: process_stack_addon_service_definitions_artifact( artifact, artifact_source_dir, options) else: print_info_msg("Unknown artifact {0} of type {1}".format( artifact_name, artifact_type)) print_info_msg("Management pack {0}-{1} successfully installed!".format( mpack_name, mpack_version)) return mpack_metadata, mpack_name, mpack_version, mpack_staging_dir, mpack_archive_path
def install_mpack(options): """ Install management pack :param options: Command line options """ mpack_path = options.mpack_path if not mpack_path: print_error_msg("Management pack not specified!") raise FatalException(-1, 'Management pack not specified!') print_info_msg("Installing management pack {0}".format(mpack_path)) # Download management pack to a temp location tmp_archive_path = download_mpack(mpack_path) # Expand management pack in temp directory tmp_root_dir = expand_mpack(tmp_archive_path) # Read mpack metadata mpack_metadata = read_mpack_metadata(tmp_root_dir) if not mpack_metadata: raise FatalException( -1, 'Malformed management pack {0}. Metadata file missing!'.format( mpack_path)) # Validate management pack prerequisites validate_mpack_prerequisites(mpack_metadata) # Purge previously installed stacks and management packs if options.purge: purge_stacks_and_mpacks() # Get ambari mpack properties stack_location, service_definitions_location, mpacks_staging_location = get_mpack_properties( ) # Create directories if not os.path.exists(stack_location): sudo.makedir(stack_location, 0755) if not os.path.exists(service_definitions_location): sudo.makedir(service_definitions_location, 0755) if not os.path.exists(mpacks_staging_location): sudo.makedir(mpacks_staging_location, 0755) # Stage management pack (Stage at /var/lib/ambari-server/resources/mpacks/mpack_name-mpack_version) mpack_name = mpack_metadata.name mpack_version = mpack_metadata.version mpack_dirname = mpack_name + "-" + mpack_version mpack_staging_dir = os.path.join(mpacks_staging_location, mpack_dirname) print_info_msg( "Stage management pack {0}-{1} to staging location {2}".format( mpack_name, mpack_version, mpack_staging_dir)) if os.path.exists(mpack_staging_dir): if options.force: print_info_msg( "Force removing previously installed management pack from {0}". format(mpack_staging_dir)) sudo.rmtree(mpack_staging_dir) else: error_msg = "Management pack {0}-{1} already installed!".format( mpack_name, mpack_version) print_error_msg(error_msg) raise FatalException(-1, error_msg) shutil.move(tmp_root_dir, mpack_staging_dir) # Process setup steps for all artifacts (stack-definitions, service-definitions, stack-extension-definitions) # in the management pack for artifact in mpack_metadata.artifacts: # Artifact name (Friendly name) artifact_name = artifact.name # Artifact type (stack-definitions, service-definitions, stack-extension-definitions etc) artifact_type = artifact.type # Artifact directory with contents of the artifact artifact_source_dir = os.path.join(mpack_staging_dir, artifact.source_dir) print_info_msg("Processing artifact {0} of type {1} in {2}".format( artifact_name, artifact_type, artifact_source_dir)) if artifact.type == "stack-definitions": process_stack_definitions_artifact(artifact, artifact_source_dir, options) elif artifact.type == "stack-definition": process_stack_definition_artifact(artifact, artifact_source_dir, options) elif artifact.type == "service-definitions": process_service_definitions_artifact(artifact, artifact_source_dir, options) elif artifact.type == "service-definition": process_service_definition_artifact(artifact, artifact_source_dir, options) elif artifact.type == "stack-extension-definitions": process_stack_extension_definitions_artifact( artifact, artifact_source_dir, options) elif artifact.type == "stack-extension-definition": process_stack_extension_definition_artifact( artifact, artifact_source_dir, options) else: print_info_msg("Unknown artifact {0} of type {1}".format( artifact_name, artifact_type)) print_info_msg("Management pack {0}-{1} successfully installed!".format( mpack_name, mpack_version)) return mpack_name, mpack_version, mpack_staging_dir
def _install_mpack(options, replay_mode=False, is_upgrade=False): """ Install management pack :param options: Command line options :param replay_mode: Flag to indicate if executing command in replay mode """ mpack_path = options.mpack_path if not mpack_path: print_error_msg("Management pack not specified!") raise FatalException(-1, 'Management pack not specified!') print_info_msg("Installing management pack {0}".format(mpack_path)) # Download management pack to a temp location tmp_archive_path = download_mpack(mpack_path) if not (tmp_archive_path and os.path.exists(tmp_archive_path)): print_error_msg("Management pack could not be downloaded!") raise FatalException(-1, 'Management pack could not be downloaded!') # Expand management pack in temp directory tmp_root_dir = expand_mpack(tmp_archive_path) # Read mpack metadata mpack_metadata = read_mpack_metadata(tmp_root_dir) if not mpack_metadata: raise FatalException(-1, 'Malformed management pack {0}. Metadata file missing!'.format(mpack_path)) # Validate management pack prerequisites # Skip validation in replay mode if not replay_mode: validate_mpack_prerequisites(mpack_metadata) if is_upgrade: # Execute pre upgrade hook _execute_hook(mpack_metadata, BEFORE_UPGRADE_HOOK_NAME, tmp_root_dir) else: # Execute pre install hook _execute_hook(mpack_metadata, BEFORE_INSTALL_HOOK_NAME, tmp_root_dir) # Purge previously installed stacks and management packs if options.purge and options.purge_list: purge_resources = options.purge_list.split(",") validate_purge(options, purge_resources, tmp_root_dir, mpack_metadata, replay_mode) purge_stacks_and_mpacks(purge_resources, replay_mode) adjust_ownership_list = [] change_ownership_list = [] # Get ambari mpack properties stack_location, extension_location, service_definitions_location, mpacks_staging_location, dashboard_location = get_mpack_properties() mpacks_cache_location = os.path.join(mpacks_staging_location, MPACKS_CACHE_DIRNAME) # Create directories if not os.path.exists(stack_location): sudo.makedir(stack_location, 0755) adjust_ownership_list.append((stack_location, "0755", "{0}", True)) change_ownership_list.append((stack_location,"{0}",True)) if not os.path.exists(extension_location): sudo.makedir(extension_location, 0755) adjust_ownership_list.append((extension_location, "0755", "{0}", True)) change_ownership_list.append((extension_location,"{0}",True)) if not os.path.exists(service_definitions_location): sudo.makedir(service_definitions_location, 0755) adjust_ownership_list.append((service_definitions_location, "0755", "{0}", True)) change_ownership_list.append((service_definitions_location,"{0}",True)) if not os.path.exists(mpacks_staging_location): sudo.makedir(mpacks_staging_location, 0755) adjust_ownership_list.append((mpacks_staging_location, "0755", "{0}", True)) change_ownership_list.append((mpacks_staging_location,"{0}",True)) if not os.path.exists(mpacks_cache_location): sudo.makedir(mpacks_cache_location, 0755) adjust_ownership_list.append((mpacks_cache_location, "0755", "{0}", True)) change_ownership_list.append((mpacks_cache_location,"{0}",True)) if not os.path.exists(dashboard_location): sudo.makedir(dashboard_location, 0755) sudo.makedir(os.path.join(dashboard_location, GRAFANA_DASHBOARDS_DIRNAME), 0755) sudo.makedir(os.path.join(dashboard_location, SERVICE_METRICS_DIRNAME), 0755) adjust_ownership_list.append((dashboard_location, "0755", "{0}", True)) change_ownership_list.append((dashboard_location,"{0}",True)) # Stage management pack (Stage at /var/lib/ambari-server/resources/mpacks/mpack_name-mpack_version) mpack_name = mpack_metadata.name mpack_version = mpack_metadata.version mpack_dirname = mpack_name + "-" + mpack_version mpack_staging_dir = os.path.join(mpacks_staging_location, mpack_dirname) mpack_archive_path = os.path.join(mpacks_cache_location, os.path.basename(tmp_archive_path)) print_info_msg("Stage management pack {0}-{1} to staging location {2}".format( mpack_name, mpack_version, mpack_staging_dir)) if os.path.exists(mpack_staging_dir): if options.force: print_info_msg("Force removing previously installed management pack from {0}".format(mpack_staging_dir)) sudo.rmtree(mpack_staging_dir) else: error_msg = "Management pack {0}-{1} already installed!".format(mpack_name, mpack_version) print_error_msg(error_msg) raise FatalException(-1, error_msg) shutil.move(tmp_root_dir, mpack_staging_dir) shutil.move(tmp_archive_path, mpack_archive_path) # Process setup steps for all artifacts (stack-definitions, extension-definitions, # service-definitions, stack-addon-service-definitions) in the management pack for artifact in mpack_metadata.artifacts: # Artifact name (Friendly name) artifact_name = artifact.name # Artifact type (stack-definitions, extension-definitions, service-definitions, etc) artifact_type = artifact.type # Artifact directory with contents of the artifact artifact_source_dir = os.path.join(mpack_staging_dir, artifact.source_dir) print_info_msg("Processing artifact {0} of type {1} in {2}".format( artifact_name, artifact_type, artifact_source_dir)) if artifact.type == STACK_DEFINITIONS_ARTIFACT_NAME: process_stack_definitions_artifact(artifact, artifact_source_dir, options) elif artifact.type == EXTENSION_DEFINITIONS_ARTIFACT_NAME: process_extension_definitions_artifact(artifact, artifact_source_dir, options) elif artifact.type == SERVICE_DEFINITIONS_ARTIFACT_NAME: process_service_definitions_artifact(artifact, artifact_source_dir, options) elif artifact.type == STACK_ADDON_SERVICE_DEFINITIONS_ARTIFACT_NAME: process_stack_addon_service_definitions_artifact(artifact, artifact_source_dir, options) else: print_info_msg("Unknown artifact {0} of type {1}".format(artifact_name, artifact_type)) ambari_user = read_ambari_user() if ambari_user: # This is required when a non-admin user is configured to setup ambari-server print_info_msg("Adjusting file permissions and ownerships") for pack in adjust_ownership_list: file = pack[0] mod = pack[1] user = pack[2].format(ambari_user) recursive = pack[3] logger.info("Setting file permissions: {0} {1} {2} {3}".format(file, mod, user, recursive)) set_file_permissions(file, mod, user, recursive) for pack in change_ownership_list: path = pack[0] user = pack[1].format(ambari_user) recursive = pack[2] logger.info("Changing ownership: {0} {1} {2}".format(path, user, recursive)) change_owner(path, user, recursive) print_info_msg("Management pack {0}-{1} successfully installed! Please restart ambari-server.".format(mpack_name, mpack_version)) return mpack_metadata, mpack_name, mpack_version, mpack_staging_dir, mpack_archive_path