def run(**kwargs):
    # set the security message
    gp = GlobalPreferences.get()
    gp.security_message = "{{ message }}"
    gp.save()
    
    # set its background color
    default_portal = PortalConfig.get_current_portal()
    default_portal.security_bg_color = "{{ message_background_color_code }}"
    default_portal.save()
    
    return "", "", ""
def run(*args, **kwargs):
    only_change_default_portal = "{{ only_change_default_portal }}" == "True"

    if only_change_default_portal:
        set_progress("Changing colors only for the default portal")
        # get_current_portal() is used here not to get the current user's portal (since this is
        # run in the context of an asynchronous job, there is no current portal), but instead to
        # get the default portal.
        portals = [PortalConfig.get_current_portal()]
    else:
        set_progress("Changing colors only for all portals")
        portals = PortalConfig.objects.all()

    for portal in portals:
        set_random_colors(portal)

    return "SUCCESS", "Portal colors changed", ""
def run(job=None, logger=None, **kwargs):
    """
    Create a cluster, poll until the IP address becomes available, and import
    the cluster into CloudBolt.
    """
    environment = Environment.objects.get(id=ENV_ID)
    gcp_zone = CustomFieldValue.objects.get(id=GCP_ZONE_ID).value

    # Save cluster data on the resource so teardown works later
    create_required_parameters()
    resource = kwargs['resource']
    resource.create_gke_k8s_cluster_project = environment.id
    resource.gcp_zone = gcp_zone
    resource.create_gke_k8s_cluster_name = CLUSTER_NAME
    resource.name = CLUSTER_NAME
    resource.save()

    job.set_progress('Connecting to GKE...')
    builder = GKEClusterBuilder(environment, gcp_zone, CLUSTER_NAME)

    job.set_progress(
        'Sending request for new cluster {}...'.format(CLUSTER_NAME))
    builder.create_cluster(NODE_COUNT)

    job.set_progress(
        'Waiting up to {} seconds for provisioning to complete.'.format(
            TIMEOUT))
    start = time.time()
    job.set_progress('Waiting for cluster IP address...')
    endpoint = builder.wait_for_endpoint(timeout=TIMEOUT)
    if not endpoint:
        return ("FAILURE",
                "No IP address returned after {} seconds".format(TIMEOUT), "")

    remaining_time = TIMEOUT - (time.time() - start)
    job.set_progress('Waiting for nodes to report hostnames...')
    nodes = builder.wait_for_nodes(NODE_COUNT, timeout=remaining_time)
    if len(nodes) < NODE_COUNT:
        return ("FAILURE",
                "Nodes are not ready after {} seconds".format(TIMEOUT), "")

    job.set_progress('Importing cluster...')
    cluster = builder.get_cluster()
    tech = ContainerOrchestratorTechnology.objects.get(name='Kubernetes')
    kubernetes = Kubernetes.objects.create(
        name=CLUSTER_NAME,
        ip=cluster['endpoint'],
        port=443,
        protocol='https',
        serviceaccount=cluster['masterAuth']['username'],
        servicepasswd=cluster['masterAuth']['password'],
        container_technology=tech,
    )
    resource.create_gke_k8s_cluster_id = kubernetes.id
    resource.save()
    url = 'https://{}{}'.format(
        PortalConfig.get_current_portal().domain,
        reverse('container_orchestrator_detail', args=[kubernetes.id]))
    job.set_progress("Cluster URL: {}".format(url))

    job.set_progress('Importing nodes...')
    for node in nodes:
        # Generate libcloud UUID from GCE ID
        id_unicode = '{}:{}'.format(node['id'], 'gce')
        uuid = hashlib.sha1(id_unicode.encode('utf-8')).hexdigest()
        # Create a barebones server record. Other details like CPU and Mem Size
        # will be populated the next time the GCE handler is synced.
        Server.objects.create(
            hostname=node['name'],
            resource_handler_svr_id=uuid,
            environment=environment,
            resource_handler=environment.resource_handler,
            group=resource.group,
            owner=resource.owner,
        )

    job.set_progress('Waiting for cluster to report as running...')
    remaining_time = TIMEOUT - (time.time() - start)
    status = builder.wait_for_running_status(timeout=remaining_time)
    if status != 'RUNNING':
        return ("FAILURE",
                "Status is {} after {} seconds (expected RUNNING)".format(
                    status, TIMEOUT), "")

    return ("SUCCESS",
            "Cluster is ready and can be accessed at {}".format(url), "")
Exemple #4
0
def run(job=None, logger=None, **kwargs):
    """
    Create a cluster, poll until the IP address becomes available, and import
    the cluster into CB.
    """

    # Save cluster data on the resource so teardown works later
    create_required_parameters()
    resource = kwargs['resource']
    resource.create_aks_k8s_cluster_env = environment.id
    resource.create_aks_k8s_cluster_name = CLUSTER_NAME
    resource.name = CLUSTER_NAME
    resource.save()

    get_credentials()
    get_service_profile()
    get_resource_client()
    get_container_client()
    get_service_profile()

    job.set_progress("Creating Resource Group {}".format(resource_group))
    create_resource_group()

    job.set_progress("Creating Cluster {}".format(CLUSTER_NAME))
    create_cluster(NODE_COUNT)

    start = time.time()
    remaining_time = TIMEOUT - (time.time() - start)

    status = wait_for_running_status(timeout=remaining_time)
    job.set_progress('Waiting up to {} seconds for provisioning to complete.'
                         .format(remaining_time))
    job.set_progress("Configuring kubectl to connect to kubernetes cluster")

    # configure kubectl to connect to kubernetes cluster
    subprocess.run(['az', 'aks', 'get-credentials', '-g', resource_group, '-n', CLUSTER_NAME])
    start = time.time()

    config.load_kube_config()

    job.set_progress("Creating pod template container")
    api_instance = client.ExtensionsV1beta1Api()

    deployment = create_deployment_object()

    job.set_progress("Creating Deployment")
    create_deployment(api_instance, deployment)

    job.set_progress("Creating Service {}".format(service))
    create_service()

    job.set_progress("Waiting for cluster IP address...")
    endpoint = wait_for_endpoint(timeout=TIMEOUT)
    if not endpoint:
        return ("FAILURE", "No IP address returned after {} seconds".format(TIMEOUT),
                "")
    remaining_time = TIMEOUT - (time.time() - start)
    job.set_progress('Waiting for nodes to report hostnames...')

    nodes = wait_for_nodes(NODE_COUNT, timeout=remaining_time)
    if len(nodes) < NODE_COUNT:
        return ("FAILURE",
                "Nodes are not ready after {} seconds".format(TIMEOUT),
                "")

    job.set_progress('Importing cluster...')

    get_cluster()
    tech = ContainerOrchestratorTechnology.objects.get(name='Kubernetes')
    kubernetes = Kubernetes.objects.create(
        name=CLUSTER_NAME,
        ip=get_cluster_endpoint(),
        port=443,
        protocol='https',
        serviceaccount=handler.serviceaccount,
        servicepasswd=handler.secret,
        container_technology=tech,
    )

    resource.create_aks_k8s_cluster_id = kubernetes.id
    resource.save()
    url = 'https://{}{}'.format(
        PortalConfig.get_current_portal().domain,
        reverse('container_orchestrator_detail', args=[kubernetes.id])
    )
    job.set_progress("Cluster URL: {}".format(url))

    job.set_progress('Importing nodes...')

    job.set_progress('Waiting for cluster to report as running...')
    remaining_time = TIMEOUT - (time.time() - start)

    return ("SUCCESS","", "")
def run(job=None, logger=None, **kwargs):
    """
    Create a cluster, poll until the IP address becomes available, and import
    the cluster into CloudBolt.
    """
    cluster_env = AzureARMHandler.objects.first()
    # Save cluster data on the resource so teardown works later
    create_custom_fields()
    resource = kwargs['resource']

    resource.name = CLUSTER_NAME
    resource.aks_cluster_env = cluster_env.id
    resource.resource_group_name = resource_group
    resource.aks_cluster_name = CLUSTER_NAME
    resource.save()

    get_credentials()
    get_service_profile()
    get_resource_client()
    get_container_client()
    get_service_profile()

    # Clusters can be created in existing resource groups.
    job.set_progress(
        "Checking if Resource Group Exists {}".format(resource_group))
    rg_client = get_resource_client()
    response = rg_client.resource_groups.check_existence(resource_group)
    if response == True:
        job.set_progress(
            "Resource Group: {} already exits. Cluster {} will be created in {}"
            .format(resource_group, CLUSTER_NAME, resource_group))

    job.set_progress("Creating Resource Group {}".format(resource_group))
    create_resource_group()

    # Checks for existing cluster in the Resource Group and fails if exists
    job.set_progress("Creating Cluster {}".format(CLUSTER_NAME))
    try:
        create_cluster(NODE_COUNT)
    except CloudError as e:
        if e.status_code == 409:
            return ("FAILURE",
                    "Cluster: {} conflicts with existing cluster ".format(
                        CLUSTER_NAME), "")

        raise

    start = time.time()
    remaining_time = TIMEOUT - (time.time() - start)

    status = wait_for_running_status()
    job.set_progress(
        'Waiting up to {} seconds for provisioning to complete.'.format(
            remaining_time))
    job.set_progress("Configuring kubectl to connect to kubernetes cluster")

    # configure kubectl to connect to kubernetes cluster
    subprocess.run([
        'az', 'aks', 'get-credentials', '-g', resource_group, '-n',
        CLUSTER_NAME
    ])
    start = time.time()

    config.load_kube_config()

    job.set_progress("Creating pod template container")
    api_instance = client.ExtensionsV1beta1Api()

    deployment = create_deployment_object()

    job.set_progress("Creating Deployment")
    create_deployment(api_instance, deployment)

    job.set_progress("Creating Service {}".format(service))
    create_service()

    job.set_progress("Waiting for cluster IP address...")
    endpoint = wait_for_endpoint()
    if not endpoint:
        return ("FAILURE", "No IP address returned", "")
    remaining_time = TIMEOUT - (time.time() - start)
    job.set_progress('Waiting for nodes to report hostnames...')

    nodes = wait_for_nodes(NODE_COUNT)
    if len(nodes) < NODE_COUNT:
        return ("FAILURE", "Nodes are not ready after {} seconds", "")

    job.set_progress('Importing cluster...')

    get_cluster()
    tech = ContainerOrchestratorTechnology.objects.get(name='Kubernetes')
    kubernetes = Kubernetes.objects.create(
        name=CLUSTER_NAME,
        ip=endpoint,
        port=443,
        protocol='https',
        serviceaccount=handler.serviceaccount,
        servicepasswd=handler.secret,
        container_technology=tech,
    )

    resource.aks_cluster_id = kubernetes.id
    resource.save()
    url = 'https://{}{}'.format(
        PortalConfig.get_current_portal().domain,
        reverse('container_orchestrator_detail', args=[kubernetes.id]))
    job.set_progress("Cluster URL: {}".format(url))

    job.set_progress('Importing nodes...')

    job.set_progress('Waiting for cluster to report as running...')
    remaining_time = TIMEOUT - (time.time() - start)

    return ("SUCCESS",
            "Cluster is ready and can be accessed at {}".format(url), "")