def restore_inter_deployment_dependencies(deployment_id, update_service_composition, tenant_name): with get_storage_manager_instance() as sm: set_tenant_in_app(get_tenant_by_name(tenant_name)) deployment = sm.get(models.Deployment, deployment_id) blueprint = deployment.blueprint app_dir = join(FILE_SERVER_BLUEPRINTS_FOLDER, utils.current_tenant.name, blueprint.id) app_blueprint = blueprint.main_file_name logger.info('{0}: BP in {1}/{2}'.format(deployment_id, app_dir, app_blueprint)) parsed_deployment = rest_utils.get_parsed_deployment( blueprint, app_dir, app_blueprint) deployment_plan = rest_utils.get_deployment_plan( parsed_deployment, deployment.inputs) logger.info('{0}: Parsed plan'.format(deployment.id)) rest_utils.update_deployment_dependencies_from_plan( deployment.id, deployment_plan, sm, lambda *_: True) logger.info('{0}: Updated dependencies from plan'.format( deployment.id)) if update_service_composition: create_service_composition_dependencies(deployment_plan, deployment, sm)
def main(): tenant_name = sys.argv[1] update_service_composition = (sys.argv[2] == 'True') with get_storage_manager_instance() as sm: set_tenant_in_app(get_tenant_by_name(tenant_name)) blueprints = sm.list(models.Blueprint) idd_capable_blueprints = [] for bp in blueprints: if is_capable_for_idd(bp.plan): idd_capable_blueprints.append(bp.id) logger.info( 'IDD capable blueprints: {0}'.format(idd_capable_blueprints)) if not idd_capable_blueprints: return deployments = sm.list(models.Deployment, filters={'blueprint_id': idd_capable_blueprints}) deployments_queue = queue.Queue() failed_deployments_queue = queue.Queue() for dep in deployments: deployments_queue.put((dep.id, dep.tenant.name)) logger.info('IDD capable deployments: {0}'.format( [dep.id for dep in deployments])) for i in range(min(NUM_THREADS, deployments_queue.qsize())): t = threading.Thread(target=create_inter_deployment_dependencies, args=(deployments_queue, failed_deployments_queue, update_service_composition, tenant_name)) t.start() deployments_queue.join() if not failed_deployments_queue.empty(): deployments = list(failed_deployments_queue.queue) logger.critical('Failed create the inter deployment ' 'dependencies from the following ' 'deployments {0}. See exception ' 'tracebacks logged above for more ' 'details'.format(deployments)) exit(1)
def setup_flask_app(tenant_name): app = flask_utils.setup_flask_app() flask_utils.set_admin_current_user(app) tenant = flask_utils.get_tenant_by_name(tenant_name) flask_utils.set_tenant_in_app(tenant)
def setup_flask_app(tenant_name): app = flask_utils.setup_flask_app() config.instance.load_configuration() flask_utils.set_admin_current_user(app) tenant = flask_utils.get_tenant_by_name(tenant_name) flask_utils.set_tenant_in_app(tenant)
def main(tenant_names, all_tenants, plugin_names, blueprint_ids, mapping_file, correct): def update_suggestions(new_suggestion: dict): for plugin_name, plugin_version in new_suggestion.items(): if plugin_name not in install_suggestions: install_suggestions[plugin_name] = [] if plugin_version not in install_suggestions[plugin_name]: install_suggestions[plugin_name].append(plugin_version) if all_tenants and tenant_names: print('--all-tenants and --tenant-name options are mutually exclusive') exit(1) if not tenant_names: tenant_names = (DEFAULT_TENANT, ) setup_environment() set_tenant_in_app(get_tenant_by_name(DEFAULT_TENANT)) sm = get_storage_manager() # Prepare the resolver cloudify_section = sm.get(models.ProviderContext, PROVIDER_CONTEXT_ID).\ context.get(CLOUDIFY, {}) resolver_section = cloudify_section.get(IMPORT_RESOLVER_KEY, {}) resolver_section.setdefault( 'implementation', 'manager_rest.' 'resolver_with_catalog_support:ResolverWithCatalogSupport') resolver = dsl_parser_utils.create_import_resolver(resolver_section) if all_tenants: tenants = sm.list(models.Tenant, get_all_results=True) else: tenants = [get_tenant_by_name(name) for name in tenant_names] blueprint_filter = {'tenant': None} if blueprint_ids: blueprint_filter['id'] = blueprint_ids mappings = load_mappings(mapping_file) if correct else {} stats, install_suggestions, blueprints_processed = {}, {}, 0 for tenant in tenants: set_tenant_in_app(get_tenant_by_name(tenant.name)) sm = get_storage_manager() blueprint_filter['tenant'] = tenant blueprints = sm.list(models.Blueprint, filters=blueprint_filter, get_all_results=True) for blueprint in blueprints: print('Processing blueprint of {0}: {1} '.format( tenant.name, blueprint.id)) blueprint_identifier = '{0}::{1}'.format(tenant.name, blueprint.id) if correct: try: result = correct_blueprint( blueprint, plugin_names, mappings.get(tenant.name, {}).get(blueprint.id)) except UpdateException as ex: print(ex) else: blueprints_processed += 1 if result not in stats: stats[result] = [blueprint_identifier] else: stats[result].append(blueprint_identifier) else: try: a_mapping, a_statistic, a_suggestion = \ scan_blueprint(resolver, blueprint, plugin_names) except UpdateException as ex: print(ex) else: blueprints_processed += 1 if a_mapping: if tenant.name not in mappings: mappings[tenant.name] = {} mappings[tenant.name][blueprint.id] = a_mapping if a_statistic: stats[blueprint_identifier] = a_statistic if a_suggestion: update_suggestions(a_suggestion) # Wrap it up if correct: printout_correction_stats(stats) else: with open(mapping_file, 'w') as output_file: yaml.dump(mappings, output_file, default_flow_style=False) chmod(mapping_file, 0o644) print('\nSaved mapping file to the {0}'.format(mapping_file)) printout_scanning_stats(blueprints_processed, mappings, stats) printout_install_suggestions(install_suggestions)