class SampleDslProject(Project): """Sample DSL Project with environments""" providers = [ Provider.Ntnx( account=Ref.Account(NTNX_ACCOUNT_1_NAME), subnets=[ Ref.Subnet( name=NTNX_ACCOUNT_1_SUBNET_2, cluster=NTNX_ACCOUNT_1_SUBNET_2_CLUSTER, ), Ref.Subnet( name=NTNX_ACCOUNT_1_SUBNET_1, cluster=NTNX_ACCOUNT_1_SUBNET_1_CLUSTER, ), ], ), Provider.Gcp(account=Ref.Account(GCP_ACCOUNT_NAME)), Provider.Vmware(account=Ref.Account(VMWARE_ACCOUNT_NAME)), Provider.K8s(account=Ref.Account(K8S_ACCOUNT_NAME)), Provider.Aws(account=Ref.Account(AWS_ACCOUNT_NAME)), Provider.Azure(account=Ref.Account(AZURE_ACCOUNT_NAME)), ] users = [Ref.User(USER_NAME)] quotas = { "vcpus": 1, "storage": 2, "memory": 1, }
class TestDslProjectWithEnv(Project): """Sample DSL Project with environments""" providers = [ Provider.Ntnx( account=Ref.Account("NTNX_LOCAL_AZ"), subnets=[Ref.Subnet(name="vlan.0", cluster="calmdev1")], ), Provider.Aws(account=Ref.Account("AWS account")), Provider.Azure(account=Ref.Account("AZURE_account")), Provider.Gcp(account=Ref.Account("GCP Account")), Provider.Vmware(account=Ref.Account("Vmware Account")), Provider.K8s(account=Ref.Account("K8S_account_basic_auth")), ] users = [ Ref.User(name="*****@*****.**"), ] envs = [ProjEnvironment] groups = [ Ref.Group(name="cn=sspgroup1,ou=pc,dc=systest,dc=nutanix,dc=com"), ] quotas = { "vcpus": 1, "storage": 2, "memory": 1, }
class DSL_PROJECT(Project): """Test project""" providers = [ Provider.Ntnx( account=Ref.Account(ACCOUNT_NAME), subnets=[Ref.Subnet(name=SUBNET_NAME, cluster=CLUSTER_NAME)], ) ] quotas = {"vcpus": VCPUS, "storage": STORAGE, "memory": MEMORY}
class TestDemoProject(Project): """Test project""" providers = [ Provider.Ntnx( account=Ref.Account(ACCOUNT), subnets=[Ref.Subnet(name=SUBNET, cluster=CLUSTER)], ) ] users = [Ref.User(name=USER)] groups = [Ref.Group(name=GROUP)] quotas = {"vcpus": VCPUS, "storage": STORAGE, "memory": MEMORY}
class ProjEnvironment2(Environment): substrates = [AhvWindowsVmSubstrate] credentials = [Centos] providers = [ Provider.Ntnx( account=Ref.Account(NTNX_ACCOUNT_2_NAME), subnets=[ Ref.Subnet( name=NTNX_ACCOUNT_2_SUBNET_1, cluster=NTNX_ACCOUNT_2_SUBNET_1_CLUSTER, ) ], ), Provider.Gcp(account=Ref.Account(GCP_ACCOUNT_NAME)), ]
class AhvVmSmallProfile(VmProfile): """Small Ahv Vm Profile""" # Profile variables nameserver = Var(DNS_SERVER, label="Local DNS resolver") # VM Spec for Substrate provider_spec = ahv_vm(resources=SmallAhvVmResources, name="SmallAhvVm") # Readiness probe for substrate (disabled is set to false, for enabling check login) readiness_probe = readiness_probe(credential=ref(Centos), disabled=False) environments = [Ref.Environment(name=ENV_NAME)] # Only actions under Packages, Substrates and Profiles are allowed @action def __install__(): Task.Exec.ssh(name="Task1", filename=os.path.join("scripts", "mysql_install_script.sh")) @action def __pre_create__(): Task.Exec.escript(name="Pre Create Task", script="print 'Hello!'") @action def test_profile_action(): Task.Exec.ssh(name="Task9", script='echo "Hello"')
def delete_group(group_names): """deletes user-group on pc""" client = get_api_client() for name in group_names: group_ref = Ref.Group(name) res, err = client.group.delete(group_ref["uuid"]) if err: raise Exception("[{}] - {}".format(err["code"], err["error"])) LOG.info("Polling on user-group deletion task") res = res.json() task_state = watch_task( res["status"]["execution_context"]["task_uuid"], poll_interval=5 ) if task_state in ERGON_TASK.FAILURE_STATES: LOG.exception( "User-Group deletion task went to {} state".format(task_state) ) sys.exit(-1) # Update user-groups in cache LOG.info("Updating user-groups cache ...") Cache.sync_table(cache_type=CACHE.ENTITY.USER_GROUP) LOG.info("[Done]")
class HelloProfile(Profile): # Deployments under this profile deployments = [HelloDeployment] restore_configs = [AppProtection.RestoreConfig("r1")] snapshot_configs = [AppProtection.SnapshotConfig("s1")] environments = [Ref.Environment(name="env1")]
class ProjEnvironment1(Environment): substrates = [AhvVmSubstrate] credentials = [Centos] providers = [ Provider.Ntnx( account=Ref.Account(NTNX_ACCOUNT_1_NAME), subnets=[ Ref.Subnet( name=NTNX_ACCOUNT_1_SUBNET_1, cluster=NTNX_ACCOUNT_1_SUBNET_1_CLUSTER, ) ], ), Provider.Aws(account=Ref.Account(AWS_ACCOUNT_NAME)), Provider.Azure(account=Ref.Account(AZURE_ACCOUNT_NAME)), ]
class AhvVmSubstrate(Substrate): """AHV VM config given by reading a spec file""" provider_type = "AHV_VM" vm_recovery_spec = ahv_vm_recovery_spec( recovery_point=Ref.RecoveryPoint(name=VM_RECOVERY_POINT_NAME), vm_name="AhvRestoredVm", vm_override_resources=MyAhvVmResources, )
class SampleDslEnvironment(Environment): substrates = [AhvVmSubstrate] credentials = [Centos] providers = [ Provider.Ntnx( account=Ref.Account(NTNX_ACCOUNT_1_NAME), subnets=[ Ref.Subnet( name=NTNX_ACCOUNT_1_SUBNET_1, cluster=NTNX_ACCOUNT_1_SUBNET_1_CLUSTER, ) ], ), Provider.Aws(account=Ref.Account(AWS_ACCOUNT_NAME)), Provider.Gcp(account=Ref.Account(GCP_ACCOUNT_NAME)), Provider.Vmware(account=Ref.Account(VMWARE_ACCOUNT_NAME)), ]
def delete_group(group_names): client = get_api_client() for name in group_names: group_ref = Ref.Group(name) res, err = client.group.delete(group_ref["uuid"]) if err: raise Exception("[{}] - {}".format(err["code"], err["error"])) LOG.info("Group '{}' deleted".format(name)) LOG.warning("Please update cache.")
class SampleDslProject(Project): """ Sample DSL Project with environments NOTE: AWS account is added to environment module and not to project module, By default project command will also attach the accounts attached to any environment to this project """ providers = [ Provider.Ntnx( account=Ref.Account(NTNX_ACCOUNT_NAME), subnets=[ Ref.Subnet( name=NTNX_SUBNET, cluster=NTNX_SUBNET_CLUSTER, ) ], ), Provider.Gcp(account=Ref.Account(GCP_ACCOUNT_NAME)), Provider.Vmware(account=Ref.Account(VMWARE_ACCOUNT_NAME)), Provider.K8s(account=Ref.Account(K8S_ACCOUNT_NAME)), ] users = [Ref.User(USER)] envs = [ProjEnvironment1] default_environment = ref(ProjEnvironment1) quotas = { "vcpus": 1, "storage": 2, "memory": 1, }
def create_user(name, directory_service): client = get_api_client() params = {"length": 1000} user_name_uuid_map = client.user.get_name_uuid_map(params) if user_name_uuid_map.get("name"): LOG.error("User with name {} already exists".format(name)) sys.exit(-1) user_payload = { "spec": { "resources": { "directory_service_user": { "user_principal_name": name, "directory_service_reference": Ref.DirectoryService(directory_service), } } }, "metadata": { "kind": "user", "spec_version": 0 }, } res, err = client.user.create(user_payload) if err: LOG.error(err) sys.exit(-1) res = res.json() stdout_dict = { "name": name, "uuid": res["metadata"]["uuid"], "execution_context": res["status"]["execution_context"], } click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) LOG.info("Polling on user creation task") task_state = watch_task(res["status"]["execution_context"]["task_uuid"], poll_interval=5) if task_state in ERGON_TASK.FAILURE_STATES: LOG.exception("User creation task went to {} state".format(task_state)) sys.exit(-1) # Update users in cache LOG.info("Updating users cache ...") Cache.sync_table(cache_type=CACHE.ENTITY.USER) LOG.info("[Done]")
class AhvVmProfile2(Profile): """Sample application profile with variables""" nameserver = CalmVariable.Simple("10.40.64.15", label="Local DNS resolver") foo1 = CalmVariable.Simple("bar1", runtime=True) foo2 = CalmVariable.Simple("bar2", runtime=True) deployments = [AhvVmDeployment2] environments = [Ref.Environment(name=ENV_NAME)] @action def test_profile_action(): """Sample description for a profile action""" CalmTask.Exec.ssh(name="Task5", script='echo "Hello"', target=ref(AhvVmService))
class DefaultProfile(Profile): """Sample application profile with variables""" nameserver = CalmVariable.Simple(DNS_SERVER, label="Local DNS resolver") foo1 = CalmVariable.Simple("bar1", runtime=True) foo2 = CalmVariable.Simple("bar2", runtime=True) deployments = [MySQLDeployment, PHPDeployment] environments = [ Ref.Environment(name=ENV_NAME), ] @action def test_profile_action(): """Sample description for a profile action""" CalmTask.Exec.ssh(name="Task5", script='echo "Hello"', target=ref(MySQLService)) PHPService.test_action(name="Task6")
class TestDslProject(Project): """Sample DSL Project""" providers = [ Provider.Ntnx( account=Ref.Account(NTNX_ACCOUNT_NAME), subnets=[ Ref.Subnet(name=NTNX_SUBNET, cluster=NTNX_SUBNET_CLUSTER) ], ), Provider.Aws(account=Ref.Account(AWS_ACCOUNT_NAME)), Provider.Azure(account=Ref.Account(AZURE_ACCOUNT_NAME)), Provider.Gcp(account=Ref.Account(GCP_ACCOUNT_NAME)), Provider.Vmware(account=Ref.Account(VMWARE_ACCOUNT_NAME)), Provider.K8s(account=Ref.Account(K8S_ACCOUNT_NAME)), ] users = [Ref.User(name=USER_NAME)] quotas = {"vcpus": 1, "storage": 2, "memory": 1}
class HelloProfile(Profile): # Deployments under this profile deployments = [HelloDeployment] restore_configs = [ AppProtection.RestoreConfig("r1", target=ref(HelloDeployment)) ] snapshot_configs = [ AppProtection.SnapshotConfig( "s1", policy=AppProtection.ProtectionPolicy( "p221", rule_name="rule_0bb6745f8a104e5e3791bbfd7413f5d5")) ] environments = [Ref.Environment(name="env1")] @action def custom_action(): Task.ConfigExec(config=ref(HelloProfile.restore_configs[0]), name="Execute restore config task")
def create_user(name, directory_service): client = get_api_client() params = {"length": 1000} user_name_uuid_map = client.user.get_name_uuid_map(params) if user_name_uuid_map.get("name"): LOG.error("User with name {} already exists".format(name)) sys.exit(-1) user_payload = { "spec": { "resources": { "directory_service_user": { "user_principal_name": name, "directory_service_reference": Ref.DirectoryService(directory_service), } } }, "metadata": { "kind": "user", "spec_version": 0 }, } res, err = client.user.create(user_payload) if err: LOG.error(err) sys.exit(-1) res = res.json() stdout_dict = { "name": name, "uuid": res["metadata"]["uuid"], "execution_context": res["status"]["execution_context"], } click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": ")))
class BpMetadata(Metadata): project = Ref.Project(PROJECT_NAME)
class AhvVmSubstrate2(Substrate): """AHV VM config given by reading a spec file""" provider_spec = MyAhvVm2 account = Ref.Account(ACCOUNT_NAME)
class SingleVmBpMetadata(Metadata): project = Ref.Project(PROJECT_NAME)
class SingleVmBlueprint(SimpleBlueprint): """Single VM blueprint""" credentials = [Centos] deployments = [VmDeployment] environments = [Ref.Environment(name=ENV_NAME, )]
class SingleVmBpMetadata(Metadata): project = Ref.Project(PROJECT_NAME) categories = {"TemplateType": "Vm"}
def create_acp(role, project, acp_users, acp_groups, name): if not (acp_users or acp_groups): LOG.error("Atleast single user/group should be given") sys.exit(-1) client = get_api_client() acp_name = name or "nuCalmAcp-{}".format(str(uuid.uuid4())) # Check whether there is an existing acp with this name params = {"filter": "name=={}".format(acp_name)} res, err = client.acp.list(params=params) if err: return None, err response = res.json() entities = response.get("entities", None) if entities: LOG.error("ACP {} already exists.".format(acp_name)) sys.exit(-1) params = {"length": 1000} project_name_uuid_map = client.project.get_name_uuid_map(params) project_uuid = project_name_uuid_map.get(project, "") if not project_uuid: LOG.error("Project '{}' not found".format(project)) sys.exit(-1) LOG.info("Fetching project '{}' details".format(project)) ProjectInternalObj = get_resource_api("projects_internal", client.connection) res, err = ProjectInternalObj.read(project_uuid) if err: LOG.error(err) sys.exit(-1) project_payload = res.json() project_payload.pop("status", None) project_resources = project_payload["spec"]["project_detail"].get("resources", "") # Check if users are present in project project_users = [] for user in project_resources.get("user_reference_list", []): project_users.append(user["name"]) if not set(acp_users).issubset(set(project_users)): LOG.error( "Users {} are not registered in project".format( set(acp_users).difference(set(project_users)) ) ) sys.exit(-1) # Check if groups are present in project project_groups = [] for group in project_resources.get("external_user_group_reference_list", []): project_groups.append(group["name"]) if not set(acp_groups).issubset(set(project_groups)): LOG.error( "Groups {} are not registered in project".format( set(acp_groups).difference(set(project_groups)) ) ) sys.exit(-1) role_cache_data = Cache.get_entity_data(entity_type="role", name=role) role_uuid = role_cache_data["uuid"] # Check if there is an existing acp with given (project-role) tuple params = { "length": 1000, "filter": "role_uuid=={};project_reference=={}".format(role_uuid, project_uuid), } res, err = client.acp.list(params) if err: return None, err response = res.json() entities = response.get("entities", None) if entities: LOG.error( "ACP {} already exists for given role in project".format( entities[0]["status"]["name"] ) ) sys.exit(-1) # Constructing ACP payload -------- # Getting the cluster uuids for acp whitelisted_subnets = [] for subnet in project_resources.get("subnet_reference_list", []): whitelisted_subnets.append(subnet["uuid"]) for subnet in project_resources.get("external_network_list", []): whitelisted_subnets.append(subnet["uuid"]) cluster_uuids = [] for subnet_uuid in whitelisted_subnets: subnet_cache_data = Cache.get_entity_data_using_uuid( entity_type="ahv_subnet", uuid=subnet_uuid ) cluster_uuids.append(subnet_cache_data["cluster_uuid"]) # Default context for acp default_context = ACP.DEFAULT_CONTEXT # Setting project uuid in default context default_context["scope_filter_expression_list"][0]["right_hand_side"][ "uuid_list" ] = [project_uuid] # Role specific filters entity_filter_expression_list = [] if role == "Project Admin": entity_filter_expression_list = ( ACP.ENTITY_FILTER_EXPRESSION_LIST.PROJECT_ADMIN ) # TODO remove index bases searching entity_filter_expression_list[4]["right_hand_side"]["uuid_list"] = [ project_uuid ] elif role == "Developer": entity_filter_expression_list = ACP.ENTITY_FILTER_EXPRESSION_LIST.DEVELOPER elif role == "Consumer": entity_filter_expression_list = ACP.ENTITY_FILTER_EXPRESSION_LIST.CONSUMER elif role == "Operator" and cluster_uuids: entity_filter_expression_list = ACP.ENTITY_FILTER_EXPRESSION_LIST.CONSUMER if cluster_uuids: entity_filter_expression_list.append( { "operator": "IN", "left_hand_side": {"entity_type": "cluster"}, "right_hand_side": {"uuid_list": cluster_uuids}, } ) # TODO check these users are not present in project's other acps user_references = [] user_name_uuid_map = client.user.get_name_uuid_map({"length": 1000}) for u in acp_users: user_references.append( {"kind": "user", "name": u, "uuid": user_name_uuid_map[u]} ) usergroup_name_uuid_map = client.group.get_name_uuid_map({"length": 1000}) group_references = [] for g in acp_groups: group_references.append( {"kind": "user_group", "name": g, "uuid": usergroup_name_uuid_map[g]} ) context_list = [default_context] if entity_filter_expression_list: context_list.append( {"entity_filter_expression_list": entity_filter_expression_list} ) acp_payload = { "acp": { "name": acp_name, "resources": { "role_reference": Ref.Role(role), "user_reference_list": user_references, "user_group_reference_list": group_references, "filter_list": {"context_list": context_list}, }, }, "metadata": {"kind": "access_control_policy"}, "operation": "ADD", } # Appending acp payload to project acp_list = project_payload["spec"].get("access_control_policy_list", []) for _acp in acp_list: _acp["operation"] = "UPDATE" acp_list.append(acp_payload) project_payload["spec"]["access_control_policy_list"] = acp_list LOG.info("Creating acp {}".format(acp_name)) res, err = ProjectInternalObj.update(project_uuid, project_payload) if err: LOG.error(err) sys.exit(-1) res = res.json() stdout_dict = { "name": acp_name, "execution_context": res["status"]["execution_context"], } click.echo(json.dumps(stdout_dict, indent=4, separators=(",", ": "))) LOG.info("Polling on acp creation task") watch_task(res["status"]["execution_context"]["task_uuid"])