def run(job, *args, **kwargs):
    bp = ServiceBlueprint.objects.get(id=BLUEPRINT)
    set_progress(
        "Running Continuous Infrastructure Test for blueprint {}".format(bp))

    client = get_api_client()

    # Order the BP
    set_progress("### ORDERING BLUEPRINT ###", tasks_done=0, total_tasks=3)

    orders_ids = test_order_blueprint(client)
    for order_id in orders_ids:
        order = Order.objects.get(id=order_id)
        resource = order.orderitem_set.first().cast().get_resource()

        vpc_id = resource.vpc_id

        # Delete the resource from the database only
        resource.delete()
        set_progress("### DISCOVERING RESOURCES FOR BLUEPRINT ###",
                     tasks_done=1)
        bp.sync_resources()

        # should be able to get the resource since the sync should have created it
        resources = bp.resource_set.filter(lifecycle='ACTIVE', blueprint=bp)

        set_progress("### DELETING RESOURCE FOR BLUEPRINT ###", tasks_done=2)

        [
            test_delete_resource(client, resource) for resource in resources
            if resource.vpc_id == vpc_id
        ]

    set_progress("ALL Tests completed!", tasks_done=3)
예제 #2
0
def run(job, logger=None, **kwargs):
    rh_id = '{{ aws_rh }}'
    region = '{{ s3_region }}'
    new_bucket_name = '{{ s3_bucket_name_input }}'
    rh = AWSHandler.objects.get(id=rh_id)
    CustomField.objects.get_or_create(
        name='aws_rh_id',
        label='AWS RH ID',
        type='STR',
        description='Used by the AWS S3 Bucket blueprint')

    resource = kwargs.pop('resources').first()
    resource.name = 'S3 Bucket - ' + new_bucket_name
    # Store bucket name and region on this resource as attributes
    resource.s3_bucket_name = new_bucket_name
    resource.s3_bucket_region = region
    # Store the resource handler's ID on this resource so the teardown action
    # knows which credentials to use.
    resource.aws_rh_id = rh.id
    resource.save()

    set_progress('Connecting to Amazon S3')
    conn = boto3.resource(
        's3',
        region_name=region,
        aws_access_key_id=rh.serviceaccount,
        aws_secret_access_key=rh.servicepasswd,
    )

    set_progress('Create S3 bucket "{}"'.format(new_bucket_name))
    conn.create_bucket(
        Bucket=new_bucket_name,
        CreateBucketConfiguration={'LocationConstraint': region})

    return "", "", ""
예제 #3
0
def discover_resources(**kwargs):

    discovered_azure_sql = []
    for handler in AzureARMHandler.objects.all():
        set_progress('Connecting to Azure sql \
        DB for handler: {}'.format(handler))
        credentials = ServicePrincipalCredentials(client_id=handler.client_id,
                                                  secret=handler.secret,
                                                  tenant=handler.tenant_id)
        azure_client = SqlManagementClient(credentials, handler.serviceaccount)
        azure_resources_client = resources.ResourceManagementClient(
            credentials, handler.serviceaccount)

        for resource_group in azure_resources_client.resource_groups.list():
            try:
                for server in azure_client.servers.list_by_resource_group(
                        resource_group.name)._get_next().json()['value']:
                    discovered_azure_sql.append({
                        'name':
                        server['name'],
                        'azure_server_name':
                        server['name'],
                        'resource_group_name':
                        resource_group.name,
                        'azure_rh_id':
                        handler.id
                    })
            except CloudError:
                continue

    return discovered_azure_sql
def run(job, group, logger=None):
    roles_to_sync = [
        'user_admins', 'resource_admins', 'approvers', 'requestors', 'viewers'
    ]

    if group.parent is None:
        set_progress("No parent group found to sync users from.", job)
        return "", "", ""

    set_progress("Syncing users from parent group {}".format(group.parent),
                 job)

    # Pre-7.2 version
    for role in roles_to_sync:
        group_role_members = getattr(group, role)
        parent_role_members = getattr(group.parent, role)
        for user in parent_role_members.all():
            group_role_members.add(user)

    # 7.2+ version
    # for role in roles_to_sync:
    #     parent_role_memberships = group.parent.grouprolemembership_set.filter(role=role)
    #     for membership in parent_role_memberships:
    #         membership.profile.add_role_for_group(role, group)

    return "", "", ""
예제 #5
0
def run(job, *args, **kwargs):

    # 1. Pull params from previous blueprint
    rh_id = '{{ blueprint_context.fancy_aws_s3_bucket.create_secure_s3_bucket.aws_rh }}'
    region = '{{ blueprint_context.fancy_aws_s3_bucket.create_secure_s3_bucket.s3_region }}'
    s3_bucket_name = '{{blueprint_context.fancy_aws_s3_bucket.create_secure_s3_bucket.s3_bucket_name_input}}'

    # 2. Connect to bucket
    set_progress("Connecting to Amazon S3... rh[" + rh_id + "] region[" +
                 region + "]")
    rh = AWSHandler.objects.get(id=rh_id)
    s3_client = boto3.client('s3',
                             region_name=region,
                             aws_access_key_id=rh.serviceaccount,
                             aws_secret_access_key=rh.servicepasswd)

    # 3. Set website config
    website_configuration = {
        'ErrorDocument': {
            'Key': 'error.html'
        },
        'IndexDocument': {
            'Suffix': 'index.html'
        },
    }
    s3_client.put_bucket_website(Bucket=s3_bucket_name,
                                 WebsiteConfiguration=website_configuration)

    # 3. Build CloudFront Entries?!

    if True:
        return "SUCCESS", "CloudFront Config Successfully laid in", ""
    else:
        return "FAILURE", "Failure applying CloudFront config"
예제 #6
0
def run(resource, logger=None, **kwargs):
    env_id = '{{ env_id }}'
    env = Environment.objects.get(id=env_id)
    region = env.aws_region
    rh = env.resource_handler.cast()
    wrapper = rh.get_api_wrapper()
    cluster_name = '{{ cluster_name }}'

    create_custom_fields()

    resource.name = cluster_name
    resource.cluster_name = cluster_name
    resource.aws_region = region
    # Store the resource handler's ID on this resource so the teardown action
    # knows which credentials to use.
    resource.aws_rh_id = rh.id
    resource.env_id = env_id
    resource.save()

    set_progress('Connecting to Amazon EC2 Cluster Service')
    ecs = wrapper.get_boto3_client('ecs', rh.serviceaccount, rh.servicepasswd,
                                   region)

    set_progress(
        'Create EC2 Cluster Service cluster "{}"'.format(cluster_name))

    try:
        ecs.create_cluster(clusterName=cluster_name)
    except Exception as err:
        return "FAILURE", "", err

    return "SUCCESS", "Created service cluster successfully", ""
예제 #7
0
def _get_clients(handler):
    """
    Get the clients using newer methods from the CloudBolt main repo if this CB is running
    a version greater than 9.2.1. These internal methods implicitly take care of much of the other
    features in CloudBolt such as proxy and ssl verification.
    Otherwise, manually instantiate clients without support for those other CloudBolt settings.
    :param handler:
    :return:
    """
    import settings
    from common.methods import is_version_newer

    set_progress("Connecting To Azure...")

    cb_version = settings.VERSION_INFO["VERSION"]
    if is_version_newer(cb_version, "9.2.1"):
        from resourcehandlers.azure_arm.azure_wrapper import configure_arm_client

        wrapper = handler.get_api_wrapper()
        web_client = configure_arm_client(wrapper, WebSiteManagementClient)
        resource_client = wrapper.resource_client
    else:
        # TODO: Remove once versions <= 9.2.1 are no longer supported.
        credentials = ServicePrincipalCredentials(
            client_id=handler.client_id,
            secret=handler.secret,
            tenant=handler.tenant_id,
        )
        web_client = WebSiteManagementClient(credentials, handler.serviceaccount)
        resource_client = ResourceManagementClient(credentials, handler.serviceaccount)

    set_progress("Connection to Azure established")

    return web_client, resource_client
예제 #8
0
def run(job, **kwargs):
    # User enters this value in the service action dialog.
    param_value = '{{ param_value }}'

    logger = kwargs['logger']  # use this to send text to the job log file
    job_params = job.job_parameters.cast()
    service = job_params.services.first()

    set_progress(
        'Setting parameter "{}" = "{}" on all severs in service {}'.format(
            PARAM_NAME, param_value, service))

    # Get all servers
    # servers = service.server_set.all()
    # --- or ---
    # Get only non-historical servers, in case any were deleted (they remain in
    # the CB database and associated to this service)
    servers = service.server_set.exclude(status='HISTORICAL')

    # Set the param on each server
    for server in servers:
        server.set_value_for_custom_field(PARAM_NAME, param_value)
        set_progress('  Server "{}"'.format(server))

    return '', '', ''
예제 #9
0
def run(job, logger, resources=None):
    """
    `resources` is a queryset (of length 1) of resources being acted on.
    That resource should have a 'aws_stack_name' attribute or nothing is deleted.
    """
    resource = resources.first()
    if not resource:
        raise CloudBoltException(
            "No resource provided, this needs to be run as a pre-delete "
            "resource action")

    rh = AWSHandler.objects.first()
    # See http://boto3.readthedocs.io/en/latest/guide/configuration.html#method-parameters
    session = boto3.Session(aws_access_key_id=rh.serviceaccount,
                            aws_secret_access_key=rh.servicepasswd,
                            region_name='us-west-2')
    client = session.client('cloudformation')

    stack_name = resource.attributes.filter(
        field__name="aws_stack_name").first()
    if not stack_name:
        set_progress("No aws_stack_name attribute set on resource; skipping.")
        return "FAILURE", "", ""

    stack_name = stack_name.value
    set_progress("Deleting Stack {}".format(stack_name))
    response = client.delete_stack(StackName=stack_name)
    logger.debug("Response: {}".format(response))
    return "", "", ""
def run(job, *args, **kwargs):

    handlers = AWSHandler.objects.all()
    for handler in handlers:
        set_progress("Fetching IAM Policies for {}".format(handler.name))
        wrapper = handler.get_api_wrapper()
        iam_client = wrapper.get_boto3_client('iam', handler.serviceaccount,
                                              handler.servicepasswd, None)

        response = iam_client.list_policies()

        exportable_policies = []
        for policy in response['Policies']:
            exportable_policies.append({
                "arn": policy['Arn'],
                "path": policy['Path'],
                "name": policy['PolicyName'],
            })

        os.makedirs(IAM_POLICY_CACHE_LOCATION_PATH, exist_ok=True)
        path = os.path.join(IAM_POLICY_CACHE_LOCATION_PATH,
                            'handler-{}-policies.json'.format(handler.id))
        write_list_to_file(exportable_policies, path)
        set_progress("Collected {} IAM Policies for {}.".format(
            len(exportable_policies), handler.name))
예제 #11
0
def discover_resources(**kwargs):
    discovered_security_groups = []
    for handler in AWSHandler.objects.all():
        set_progress(
            'Connecting to Amazon EC2 for handler: {}'.format(handler))
        for region in handler.current_regions():
            ec2_client = boto3.client(
                'ec2',
                region_name=region,
                aws_access_key_id=handler.serviceaccount,
                aws_secret_access_key=handler.servicepasswd)
            try:
                for response in ec2_client.describe_security_groups(
                )['SecurityGroups']:
                    discovered_security_groups.append({
                        "name":
                        response['GroupName'] + " - " + response['GroupId'],
                        "aws_region":
                        region,
                        "aws_rh_id":
                        handler.id,
                        "security_group_name":
                        response['GroupName'] + " - " + response['GroupId'],
                        "security_group_description":
                        response['Description'],
                        "aws_security_group_id":
                        response['GroupId']
                    })
            except ClientError as e:
                set_progress('AWS ClientError: {}'.format(e))
                continue

    return discovered_security_groups
예제 #12
0
def discover_resources(**kwargs):
    discovered_clusters = []

    for handler in AWSHandler.objects.all():
        for region in handler.current_regions():
            try:
                wrapper = handler.get_api_wrapper()
            except Exception:
                continue
            client = wrapper.get_boto3_client('docdb', handler.serviceaccount,
                                              handler.servicepasswd, region)
            try:
                for cluster in client.describe_db_clusters()['DBClusters']:
                    if cluster['Engine'] == 'docdb':
                        discovered_clusters.append({
                            'name':
                            cluster['DBClusterIdentifier'],
                            'docdb_name':
                            cluster['DBClusterIdentifier'],
                            'aws_rh_id':
                            handler.id,
                            'aws_region':
                            region,
                            'status':
                            cluster['Status'],
                            'engine':
                            cluster['Engine']
                        })
            except ClientError as e:
                set_progress('AWS ClientError: {}'.format(e))
                continue

    return discovered_clusters
예제 #13
0
def run(**kwargs):
    set_progress("Deleting office365 user...")
    resource = kwargs.pop('resources').first()
    CI = ConnectionInfo.objects.get(name='Office365')
    authority = 'https://login.microsoftonline.com/'
    resource_ = f'{CI.protocol}://{CI.ip}'
    url = f'{CI.protocol}://{CI.ip}:{CI.port}/v1.0/users/{resource.user_id}'
    headers = ast.literal_eval(CI.headers)

    tenant_id = headers.get('tenant_id')
    client_id = headers.get('client_id')
    client_secret = headers.get('client_secret')

    context = adal.AuthenticationContext(authority + tenant_id)
    token = context.acquire_token_with_client_credentials(
        resource_, client_id, client_secret)
    headers = {
        'Authorization': 'Bearer {0}'.format(token['accessToken']),
        'Content-Type': 'application/json'
    }

    response = requests.delete(url, headers=headers)
    if response.ok:
        return "SUCCESS", "", ""

    return "FAILURE", f"{response.reason}", ""
def run(job, *args, **kwargs):
    bp = ServiceBlueprint.objects.get(id=BLUEPRINT)
    set_progress(
        "Running Continuous Infrastructure Test for blueprint {}".format(bp)
    )

    client = get_api_client()

    # Order the BP
    set_progress("### ORDERING BLUEPRINT ###", tasks_done=0, total_tasks=3)
    test_order_blueprint(client)

    resource = bp.resource_set.filter(name__iendswith=NEW_RESOURCE_NAME, lifecycle='ACTIVE').first()
    # Delete the resource from the database only
    resource.delete()
    set_progress("### DISCOVERING RESOURCES FOR BLUEPRINT ###", tasks_done=1)
    bp.sync_resources()

    # should be able to get the resource since the sync should have created it
    resource = bp.resource_set.get(name__icontains=NEW_RESOURCE_NAME, lifecycle='ACTIVE')

    set_progress("### DELETING RESOURCE FOR BLUEPRINT ###", tasks_done=2)
    test_delete_resource(client, resource)

    set_progress("ALL Tests completed!", tasks_done=3)
예제 #15
0
def discover_resources(**kwargs):
    discovered_clusters = []

    for handler in AWSHandler.objects.all():
        for region in handler.current_regions():
            rds = boto3.client(
                'rds',
                region_name=region,
                aws_access_key_id=handler.serviceaccount,
                aws_secret_access_key=handler.servicepasswd
            )
            try:
                for cluster in rds.describe_db_clusters()['DBClusters']:
                    discovered_clusters.append({
                        'name': cluster['DBClusterIdentifier'],
                        'db_cluster_identifier': cluster['DBClusterIdentifier'],
                        'aws_rh_id': handler.id,
                        'aws_region': region,
                        'status': cluster['Status'],
                        'engine': cluster['Engine']
                    })
            except ClientError as e:
                set_progress('AWS ClientError: {}'.format(e))
                continue

    return discovered_clusters
예제 #16
0
def run(job, resource, **kwargs):
    region = resource.attributes.get(field__name='aws_region').value
    rh_id = resource.attributes.get(field__name='aws_rh_id').value
    db_identifier = resource.attributes.get(field__name='db_identifier').value
    handler = AWSHandler.objects.get(id=rh_id)

    set_progress('Connecting to Amazon RDS')
    rds = boto3.client('rds',
                       region_name=region,
                       aws_access_key_id=handler.serviceaccount,
                       aws_secret_access_key=handler.servicepasswd)

    set_progress('Getting all snapshots for "{}"'.format(db_identifier))

    blueprint = ServiceBlueprint.objects.filter(
        name__iexact='AWS MariaDB').first()
    group = Group.objects.first()
    resource_type = ResourceType.objects.filter(name__iexact="Snapshot")[0]

    response = rds.describe_db_snapshots(DBInstanceIdentifier=db_identifier, )

    for snapshot in response['DBSnapshots']:
        res, created = Resource.objects.get_or_create(
            name=snapshot['DBSnapshotIdentifier'],
            defaults={
                'blueprint': blueprint,
                'group': group,
                'parent_resource': resource,
                'resource_type': resource_type,
                'lifecycle': snapshot['Status']
            })

    return "SUCCESS", "", ""
예제 #17
0
def run(job, logger=None):
    # Get server & power status
    server = job.server_set.first()
    server_original_power_status = server.power_status

    # Power off VM (optional)
    #if server_original_power_status != "POWEROFF":
    #    set_progress("Powering off server.")
    #    task = server.power_off()
    # -->add timeout here to wait for shutdown

    # Connect to AWS
    e = server.environment
    set_progress("Connecting to EC2 region {}.".format(e.aws_region), logger,
                 job)
    rh = server.resource_handler
    aws = rh.cast()
    aws.connect_ec2(e.aws_region)
    ec2 = aws.resource_technology.work_class.ec2

    # Get instance-id & region
    instance_id = server.resource_handler_svr_id

    # Create AMI from instance
    #http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/Creating_EBSbacked_WinAMI.html
    ec2.create_image(instance_id,
                     name='{{ AMIname }}',
                     description='Created via CloudBolt')
    return "", "", ""
예제 #18
0
def discover_resources(**kwargs):
    discovered_cache = []
    cache = []

    envs = Environment.objects.filter(
        resource_handler__resource_technology__name="Amazon Web Services")

    for env in envs:
        if not env.aws_region:
            continue
        client = connect_to_elasticache(env)
        try:
            response = client.describe_cache_clusters()
            if len(response.get('CacheClusters')) > 0:
                res = response.get('CacheClusters')[0]
                data = {
                    'name': res.get('CacheClusterId'),
                    'cluster_name': res.get('CacheClusterId'),
                    'engine': res.get('Engine'),
                    'aws_rh_id': env.resource_handler_id,
                    'env_id': env.id,
                }
                _ = {
                    'cluster_name': res.get('CacheClusterId'),
                    'engine': res.get('Engine'),
                    'aws_rh_id': env.resource_handler_id,
                }
                if _ not in cache:
                    if data not in discovered_cache:
                        discovered_cache.append(data)
                        cache.append(_)
        except Exception as error:
            set_progress(error)
            continue
    return discovered_cache
예제 #19
0
def run(job, *args, **kwargs):
    set_progress(
        "Running Continuous Infrastructure Test for blueprint {}".format(
            BLUEPRINT))

    client = get_api_client()

    set_progress("### ORDERING BLUEPRINT ###", tasks_done=0, total_tasks=3)
    test_order_blueprint(client)

    resource = BLUEPRINT.resource_set.filter(name__icontains=NEW_RESOURCE_NAME,
                                             lifecycle="ACTIVE").first()

    # Delete the resource from the database only
    resource.delete()

    set_progress("### DISCOVERING RESOURCES FOR BLUEPRINT ###", tasks_done=1)
    BLUEPRINT.sync_resources()

    # should be able to get the resource since the sync should have created it
    resource = BLUEPRINT.resource_set.filter(name__icontains=NEW_RESOURCE_NAME,
                                             lifecycle="ACTIVE").first()
    if not resource:
        return "FAILURE", "The Network Security Group doesn't exist", ""

    set_progress("### DELETING RESOURCE FOR BLUEPRINT ###", tasks_done=2)
    test_result = test_delete_resource(client, resource)
    if test_result != "SUCCESS":
        return "FAILURE", "Unable to delxete Azure Network Security Group", ""

    set_progress("ALL Tests completed!", tasks_done=3)
예제 #20
0
def discover_resources(**kwargs):
    discovered_cloudformations = []

    for handler in AWSHandler.objects.all():
        wrapper = handler.get_api_wrapper()
        set_progress('Connecting to Amazon Cloudformation for \
                      handler: {}'.format(handler))
        for region in handler.current_regions():
            cloudformation = wrapper.get_boto3_client('cloudformation',
                                                      handler.serviceaccount,
                                                      handler.servicepasswd,
                                                      region)
            try:
                for stack in cloudformation.list_stacks()['StackSummaries']:
                    discovered_cloudformations.append({
                        'aws_stack_name':
                        stack['StackName'],
                        'stack_status':
                        stack['StackStatus'],
                        "aws_rh_id":
                        handler.id,
                        "aws_region":
                        region
                    })
            except ClientError as e:
                set_progress('AWS ClientError: {}'.format(e))
                continue

    return discovered_cloudformations
예제 #21
0
def discover_resources(**kwargs):
    discovered_google_bigtable = []

    #create a set of all projects
    projects = {rh.project for rh in GCEHandler.objects.all()}

    for handler in GCEHandler.objects.all():
        set_progress('Connecting to Google BigTable for \
                      handler: {}'.format(handler))

        #only get bigtables of projects in the set
        project = handler.project
        if project not in projects:
            continue

        client = create_client(handler)
        set_progress("Connection to GCE established")
        for bigtables in client.list_instances():
            if len(bigtables) > 0:
                for bigtable in bigtables:
                    discovered_google_bigtable.append({
                        'name':
                        bigtable.display_name,
                        'instance_name':
                        bigtable.display_name,
                        'google_rh_id':
                        handler.id,
                    })

        #remove project from the set after getting its bigtables
        projects.discard(project)
    return discovered_google_bigtable
예제 #22
0
def run(job, *args, **kwargs):
    env = Environment.objects.get(id='{{ aws_environment }}')
    subnet = '{{ subnet }}'
    securitygroup = '{{ securitygroup }}'
    role = '{{ role }}'
    notebook_instance_name = '{{ notebook_instance_name_input }}'
    rh = AWSHandler.objects.get(id=env.resource_handler.id)

    set_progress("Connection to Amazon Sagemaker...")

    client = boto3.client('sagemaker',
                          region_name=env.aws_region,
                          aws_access_key_id=rh.serviceaccount,
                          aws_secret_access_key=rh.servicepasswd)

    set_progress("Creating Sagemaker notebook instance - {}".format(
        notebook_instance_name))

    response = client.create_notebook_instance(
        NotebookInstanceName=notebook_instance_name,
        InstanceType='ml.t2.large',
        SubnetId=subnet,
        SecurityGroupIds=[
            securitygroup,
        ],
        RoleArn=role,
    )

    if True:
        return "SUCCESS", "Sucessfully created new Sagemaker notebook instance", ""
    else:
        return "FAILURE", "Failure creating Sagemaker notebook instance"
예제 #23
0
def discover_resources(**kwargs):

    discovered_az_block_storages = []
    discovered_az_block_storage_names = []

    for handler in AzureARMHandler.objects.all():
        set_progress('Connecting to Azure Block Storage \
        for handler: {}'.format(handler))
        credentials = ServicePrincipalCredentials(client_id=handler.client_id,
                                                  secret=handler.secret,
                                                  tenant=handler.tenant_id)
        azure_blob_client = storage.StorageManagementClient(
            credentials, handler.serviceaccount)

        azure_resources_client = resources.ResourceManagementClient(
            credentials, handler.serviceaccount)

        set_progress("Connection to Azure established")
        for st in azure_blob_client.storage_accounts.list():
            if st.kind == 'BlobStorage':
                if st.name not in discovered_az_block_storage_names:
                    discovered_az_block_storage_names.append(st.name)
                    discovered_az_block_storages.append({
                        'name':
                        st.name,
                        'azure_storage_blob_name':
                        st.name,
                        'resource_group_name':
                        st.id.split('/')[4],
                        'azure_location':
                        st.primary_location,
                        'azure_rh_id':
                        handler.id
                    })
    return discovered_az_block_storages
예제 #24
0
def discover_resources(**kwargs):
    discovered_routes = []
    for handler in AWSHandler.objects.all():
        try:
            wrapper = handler.get_api_wrapper()
        except Exception:
            continue
        set_progress('Connecting to Amazon for handler: {}'.format(handler))
        client = wrapper.get_boto3_client('route53', handler.serviceaccount,
                                          handler.servicepasswd, None)
        try:
            for hosted_zone in client.list_hosted_zones()['HostedZones']:
                zone_id = hosted_zone['Id'].split('/')[-1]
                records = client.list_resource_record_sets(
                    HostedZoneId=zone_id)['ResourceRecordSets']
                for record in records:
                    discovered_routes.append({
                        'name':
                        record.get('Name'),
                        'dns_record_type':
                        record.get('Type'),
                        'dns_record_value':
                        json.dumps(record.get('ResourceRecords')),
                        'zone_id':
                        zone_id,
                        'aws_rh_id':
                        handler.id
                    })
        except ClientError as e:
            set_progress('AWS ClientError: {}'.format(e))
            continue

    return discovered_routes
예제 #25
0
def run(job, **kwargs):
    resource = kwargs.pop('resources').first()

    redis_cache_name = resource.attributes.get(
        field__name='azure_redis_cache_name').value
    resource_group = resource.attributes.get(
        field__name='resource_group_name').value
    rh_id = resource.attributes.get(field__name='azure_rh_id').value
    rh = AzureARMHandler.objects.get(id=rh_id)

    set_progress("Connecting To Azure...")
    credentials = ServicePrincipalCredentials(
        client_id=rh.client_id,
        secret=rh.secret,
        tenant=rh.tenant_id,
    )
    redis_client = RedisManagementClient(credentials, rh.serviceaccount)
    set_progress("Connection to Azure established")

    set_progress("Deleting redis cache %s from Azure..." % redis_cache_name)

    try:
        redis_client.redis.delete(resource_group, redis_cache_name).wait()
        set_progress("Deleted cache %s..." % redis_cache_name)
    except CloudError as e:
        set_progress('Azure Clouderror: {}'.format(e))
        return "FAILURE", "Redid cache could not be deleted"

    return "Success", "Cache has been succesfully deleted", ""
예제 #26
0
def run(job, logger=None, **kwargs):
    create_custom_fields_as_required()
    env_id = '{{ env_id }}'
    env = Environment.objects.get(id=env_id)
    region = env.aws_region
    rh = env.resource_handler.cast()
    vault_name = '{{ vault_name }}'

    resource = kwargs.pop('resources').first()
    resource.name = vault_name
    resource.glacier_vault_name = vault_name
    resource.aws_region = region
    resource.aws_rh_id = rh.id
    resource.save()

    set_progress('Connecting to Amazon AWS')

    glacier = boto3.resource(
        'glacier',
        region_name=region,
        aws_access_key_id=rh.serviceaccount,
        aws_secret_access_key=rh.servicepasswd,
    )

    set_progress('Creating Glacier vault "{}"'.format(vault_name))
    glacier.create_vault(vaultName=vault_name)

    return "", "", ""
예제 #27
0
def discover_resources(**kwargs):
    discovered_azure_sql = []
    for handler in AzureARMHandler.objects.all():
        set_progress(
            "Connecting to Azure sql DB for handler: {}".format(handler))

        sql_client = _get_client(handler)

        for server in sql_client.servers.list():
            try:
                for db in sql_client.databases.list_by_server(
                        server.as_dict()["id"].split("/")[-5], server.name):
                    if db.name == "master":
                        continue
                    discovered_azure_sql.append({
                        "name":
                        db.name,
                        "azure_database":
                        db.id,
                        "azure_server_name":
                        server.name,
                        "azure_database_name":
                        db.name,
                        "resource_group_name":
                        server.as_dict()["id"].split("/")[-5],
                        "azure_rh_id":
                        handler.id,
                        "azure_location":
                        db.location,
                    })
            except CloudError as e:
                set_progress("Azure Clouderror: {}".format(e))
                continue

    return discovered_azure_sql
예제 #28
0
def _get_client(handler):
    """
    Get the clients using newer methods from the CloudBolt main repo if this CB is running
    a version greater than 9.2.2. These internal methods implicitly take care of much of the other
    features in CloudBolt such as proxy and ssl verification.
    Otherwise, manually instantiate clients without support for those other CloudBolt settings.
    """
    import settings
    from common.methods import is_version_newer

    cb_version = settings.VERSION_INFO["VERSION"]
    if is_version_newer(cb_version, "9.2.2"):
        wrapper = handler.get_api_wrapper()
        storage_client = wrapper.storage_client
    else:
        # TODO: Remove once versions <= 9.2.2 are no longer supported.
        credentials = ServicePrincipalCredentials(client_id=handler.client_id,
                                                  secret=handler.secret,
                                                  tenant=handler.tenant_id)
        storage_client = storage.StorageManagementClient(
            credentials, handler.serviceaccount)

    set_progress("Connection to Azure established")

    return storage_client
예제 #29
0
def delete_virtual_server(b, virtualname):
    path = '/Common/{}'.format(virtualname)
    if path in b.LocalLB.VirtualServer.get_list():
        b.LocalLB.VirtualServer.delete_virtual_server([virtualname])
        set_progress("Successfully deleted Virtual Server '{}'".format(virtualname))
    else:
        set_progress("Virtual Server '{}' not found".format(virtualname))
예제 #30
0
def delete_pool(b, pool_name):
    path = '/Common/{}'.format(pool_name)
    if path in b.LocalLB.Pool.get_list():
        b.LocalLB.Pool.delete_pool([pool_name])
        set_progress("Successfully deleted Pool '{}'".format(pool_name))
    else:
        set_progress("Pool '{}' not found.".format(pool_name))
예제 #31
0
def discover_resources(**kwargs):
    discovered_virtual_nets = []
    for handler in AzureARMHandler.objects.all():
        set_progress("Connecting to Azure networks \
        for handler: {}".format(handler))
        credentials = ServicePrincipalCredentials(client_id=handler.client_id,
                                                  secret=handler.secret,
                                                  tenant=handler.tenant_id)
        network_client = NetworkManagementClient(credentials,
                                                 handler.serviceaccount)

        azure_resources_client = resources.ResourceManagementClient(
            credentials, handler.serviceaccount)

        for resource_group in azure_resources_client.resource_groups.list():
            try:
                for security_group in network_client.network_security_groups.list(
                        resource_group_name=resource_group.name):
                    discovered_virtual_nets.append({
                        "name":
                        "Azure NSG - " + security_group.as_dict()["name"],
                        "azure_network_security_group":
                        security_group.as_dict()["name"],
                        "azure_location":
                        security_group.as_dict()["location"],
                        "azure_rh_id":
                        handler.id,
                        "resource_group_name":
                        resource_group.name,
                    })
            except CloudError as e:
                set_progress("Azure Clouderror: {}".format(e))
                continue

    return discovered_virtual_nets
예제 #32
0
def discover_resources(**kwargs):
    discovered_security_groups = []
    for handler in AWSHandler.objects.all():
        set_progress(
            'Connecting to Amazon backup for handler: {}'.format(handler))
        for region in handler.current_regions():
            try:
                client = boto3.client(
                    'backup',
                    region_name=region,
                    aws_access_key_id=handler.serviceaccount,
                    aws_secret_access_key=handler.servicepasswd)
                for response in client.list_backup_plans()['BackupPlansList']:
                    discovered_security_groups.append({
                        "aws_region":
                        region,
                        "aws_rh_id":
                        handler.id,
                        "name":
                        response['BackupPlanName'],
                        "backup_plan_name":
                        response['BackupPlanName'],
                        "backup_plan_id":
                        response['BackupPlanId'],
                    })
            except Exception as e:
                set_progress('AWS ClientError: {}'.format(e))
                continue

    return discovered_security_groups
예제 #33
0
def delete_nodes(b, pool_members):
    for member in pool_members:
        path = '/Common/{}'.format(member)
        if path in b.LocalLB.NodeAddressV2.get_list():
            b.LocalLB.NodeAddressV2.delete_node_address([member])
            set_progress("Successfully deleted Node '{}'".format(member))
        else:
            set_progress("Node '{}' not found".format(member))
예제 #34
0
def create_virtual_server(b, virtualname, address, port, member_pool):
    try:
        b.LocalLB.VirtualServer.create(
            definitions = [{'name': [virtualname],'address': [address], 'port': [port], 'protocol': 'PROTOCOL_TCP'}],
            wildmasks = ['255.255.255.255'], resources = [{'type': 'RESOURCE_TYPE_POOL', 'default_pool_name': [member_pool]}],
            profiles = [[{'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': 'tcp'}]]
        )
        set_progress("Successfully created virtual server '{}'".format(virtualname))
    except:
        set_progress("Error in creating virtual server")
예제 #35
0
def create_virtual_server(b, virtualname, address, port, member_pool):
    try:
        b.LocalLB.VirtualServer.create(
            definitions=[{"name": [virtualname], "address": [address], "port": [port], "protocol": "PROTOCOL_TCP"}],
            wildmasks=["255.255.255.255"],
            resources=[{"type": "RESOURCE_TYPE_POOL", "default_pool_name": [member_pool]}],
            profiles=[[{"profile_context": "PROFILE_CONTEXT_TYPE_ALL", "profile_name": "tcp"}]],
        )
        set_progress("Successfully created virtual server '{}'".format(virtualname))
    except:
        set_progress("Error in creating virtual server")
def run(job, **kwargs):  # the job is the sync-vms job that causes this
    logger.job = job

    job_params = job.job_parameters.cast()
    aws_rhs = job_params.resource_handlers.filter(real_type__app_label='aws')
    aws_servers = Server.objects.filter(resource_handler__in=aws_rhs)

    set_progress('Updating AWS server owners based on the "{}" tag value'.format(TAG_NAME))
    num_changed = map(update_server_owner, aws_servers).count(True)
    set_progress('Updated {} AWS server owners'.format(num_changed))

    return "", "", ""
def run(job, logger, *args, **kwargs):
    params = job.job_parameters.cast().arguments
    jobs = params['sync_jobs']
    sync_jobs = Job.objects.filter(id__in=jobs)
    set_progress("Rule will delete {} jobs".format(sync_jobs.count()))
    for job in sync_jobs:
        logfile = os.path.join(settings.VARDIR, "log", "cloudbolt", "jobs", "{}{}".format(str(job.id), ".log"))
        if os.path.exists(logfile):
            set_progress("Removing log file {}".format(logfile))
            os.remove(logfile)
    sync_jobs.delete()
    return ("SUCCESS", "", "")
def add_user_to_groups(user_profile, groups, role):
    """
    Adds a user to a list of groups, in the specified role.

    :param user_profile: a CB UserProfile object
    :param groups: a list of Group objects
    :param role: a string that should be "viewers", "requesters", "approvers",
    "resource_admins", or "user_admins"
    :return: None
    """
    set_progress("Adding {} to {} role on these groups: {}".format(user_profile, role, groups))
    role_relationship = getattr(user_profile, role)
    role_relationship.add(*groups)
예제 #39
0
def run(job, logger=None):
    # makes connection to F5 Big IP
    credential = ConnectionInfo.objects.filter(name='F5BIGIP')
    if not credential:
        return "", "", ""

    try:
        f5_conn = credential[0]
        b = bigsuds.BIGIP(
            hostname=f5_conn.ip,
            username=f5_conn.username,
            password=f5_conn.password,
            debug=True
        )
    except Exception, e:
        set_progress("Error in connecting to F5 BIG-IP")
def run(job, logger=None, server=None, **kwargs):

    si = None
    for server in job.server_set.all():
        if not si:
            si = get_vmware_service_instance(server.resource_handler.cast())
        vm = get_vm_by_uuid(si, server.resource_handler_svr_id)

        assert isinstance(vm, pyVmomi.vim.VirtualMachine)

        server.refresh_info()

        server_original_power_status = server.power_status
        set_progress("Performing VM power down...")
        task = vm.PowerOffVM_Task()
        wait_for_tasks(si, [task])

    return "", "", ""
def run(job, group, logger=None):
    roles_to_sync = ['user_admins',
                     'resource_admins',
                     'approvers',
                     'requestors',
                     'viewers']

    if group.parent is None:
        set_progress("No parent group found to sync users from.", job)
        return "", "", ""

    set_progress("Syncing users from parent group {}".format(group.parent), job)

    for role in roles_to_sync:
        group_role_members = getattr(group, role)
        parent_role_members = getattr(group.parent, role)
        for user in parent_role_members.all():
            group_role_members.add(user)

    return "", "", ""
def run(job, logger, service=None):
    if not service:
        raise CloudBoltException("No service provided, this needs to be run as a pre-delete "
                                 "service action")

    rh = AWSHandler.objects.first()
    # See http://boto3.readthedocs.io/en/latest/guide/configuration.html#method-parameters
    session = boto3.Session(
        aws_access_key_id=rh.serviceaccount,
        aws_secret_access_key=rh.servicepasswd,
        region_name='us-west-2'
    )
    client = session.client('cloudformation')

    stack_name = service.attributes.filter(field__name="aws_stack_name").first()
    if not stack_name:
        return "", "", ""
    stack_name = stack_name.value
    set_progress("Deleting Stack {}".format(stack_name))
    response = client.delete_stack(StackName=stack_name)
    logger.debug("Response: {}".format(response))
    return "", "", ""
예제 #43
0
def create_pool(b, pool, lbmethod, pl_mems):
    pool = "/Common/%s" % pool
    pmlist = []
    for x in pl_mems:
        pm = {}
        for key in x:
            y = []
            y.append(key)
            y.append(x[key])
            pm["address"] = str(y[0])
            pm["port"] = int(y[1])
            pmlist.append(pm)
    try:
        pllist = b.LocalLB.Pool.get_list()
        if pool in pllist:
            b.LocalLB.Pool.add_member_v2([pool], [pmlist])
        else:
            b.LocalLB.Pool.create_v2([pool], [lbmethod], [pmlist])

        return b.LocalLB.Pool.get_member_v2([pool])
        set_progress("Successfully created pool '{}'".format(pool))
    except Exception, e:
        set_progress("Error in creating pool")
def run(job, **kwargs):
    # User enters this value in the service action dialog.
    param_value = '{{ param_value }}'

    logger = kwargs['logger']  # use this to send text to the job log file
    job_params = job.job_parameters.cast()
    service = job_params.services.first()

    set_progress('Setting parameter "{}" = "{}" on all severs in service {}'
                 .format(PARAM_NAME, param_value, service))

    # Get all servers
    # servers = service.server_set.all()
    # --- or ---
    # Get only non-historical servers, in case any were deleted (they remain in
    # the CB database and associated to this service)
    servers = service.server_set.exclude(status='HISTORICAL')

    # Set the param on each server
    for server in servers:
        server.set_value_for_custom_field(PARAM_NAME, param_value)
        set_progress('  Server "{}"'.format(server))

    return '', '', ''
예제 #45
0
def run(job, logger=None, **kwargs):
	
    #Connect To The Server Via SSH And Run Chef Decommission Recipe
    cmd = '[ -f /etc/chef/client.pem ] && chef-client -o server-destroy || echo Chef Run Did Not Occur'
    params = job.job_parameters.cast()
    server = params.servers.first()
    server.key_name = '../svrbuild_id_rsa'
    server.save()
    try:
        output = server.execute_script(script_contents=cmd, runas_username='******', run_with_sudo=True, timeout=600)
        logger.debug(output)
        set_progress("Chef Decommission Complete")
    except Exception as ex:
        template = "An exception of type {0} occured. Arguments:\n{1!r}"
        message = template.format(type(ex).__name__, ex.args)
        if type(ex).__name__ in ['CommandExecutionException', 'ValueError']:
            if type(ex).__name__ == 'CommandExecutionException':
                logger.debug(ex.output)
            set_progress("Failed to Run Chef client destroy")
        else:
            set_progress(message)
            return "FAILURE", "Server failed during Decommission", ""
    server.save()
    return "SUCCESS", "Server Has Been Successfully Decommissioned!", ""
def set_user_permissions(user_profile, groups, data):
    """
    Set users permissions on the 3 groups based on security group membership in LDAP/AD

    Also make them a CB admin, super admin, and env admin if they are a member of the
    corresponding security group in AD.
    """
    group_objects = groups.values()
    is_admin = False  # keeps track of whether they have been found to be in the CB admin sec group

    # First remove all permissions from the groups we are managing
    user_profile.requestors.remove(*group_objects)
    user_profile.approvers.remove(*group_objects)
    user_profile.user_admins.remove(*group_objects)
    user_profile.resource_admins.remove(*group_objects)

    user = user_profile.user

    # Add additional permissions based on LDAP/AD group membership
    if 'memberOf' not in data[0][1]:
        set_progress("No memberOf information found for user {}, skipping adding user to any "
                     "roles".format(user_profile))
        return

    ldap_groups = data[0][1]['memberOf']
    set_progress("{} was found to be a member of these security groups: {}".format(
        user_profile, ldap_groups))

    # for each security group the user is in
    for lgroup in ldap_groups:
        # if the viewer security group name is anywhere in the name of the security group
        if "{{viewers_security_group_name}}" in lgroup:
            # Add user as an viewer in all 3 groups in CB
            add_user_to_groups(user_profile, group_objects, "viewers")
        if "{{requesters_security_group_name}}" in lgroup:
            # Requesters are different - check to see if sec group name ends in any one of the
            # group level names ('silver', 'bronze', or 'gold'). If so, add to the corresponding
            # group in CB as a requester.
            for group_level in GROUP_LEVELS:
                if group_level in lgroup:
                    group = groups[group_level]
                    set_progress("Adding {} to {} as a requester".format(user_profile, group))
                    user_profile.requestors.add(group)
        if "{{approvers_security_group_name}}" in lgroup:
            # Add user as an approver in all 3 groups in CB
            add_user_to_groups(user_profile, group_objects, "approvers")
        if "{{group_admins_security_group_name}}" in lgroup:
            # Add user as a user_admin (group admin) in all 3 groups in CB
            add_user_to_groups(user_profile, group_objects, "user_admins")
        if "{{resource_admins_security_group_name}}" in lgroup:
            # Add user as a resource_admin in all 3 groups in CB
            add_user_to_groups(user_profile, group_objects, "resource_admins")

        # process CB admin perms
        if "{{cb_admins_security_group_name}}" in lgroup:
            set_progress("Making the user an admin")
            # Make the user a super admin, CB admin, and env admin
            user_profile.super_admin = True
            user_profile.environment_admin = True
            user_profile.save()
            # set the django user permission that maps to cb_admin
            user.is_superuser = True
            user.save()
            is_admin = True

    if not is_admin:
        # after checking all groups, the user was not part of any that make them a CB admin
        set_progress("Removing admin permissions from the user")
        # Make the user not a super admiin, CB admin, and env admin
        user_profile.super_admin = False
        user_profile.environment_admin = False
        user_profile.save()
        # unset the django user permission that maps to cb_admin
        user.is_superuser = False
        user.save()
def run(job, **kwargs):

    # Place this pluign in the Orchestration Actions -> Other -> Pre-Job hook
    # This script will allow you to hook into any buildup or teardown for any Azure services found.
    # This hook is called when the 'Delete' button on a service is pressed.
    # This script currently only supports Azure Websites.

    if job.type == "orchestration_hook":
        action = job.job_parameters.cast().hook
        if action.name == "Delete Service":

            # Evaluate Service To See If There Is A Custom Defined Teardown Process
            for service in job.service_set.all():
                set_progress("Evaluating XaaS tear-down needs for '{}'".format(service))
                if service.attributes.filter(field__name="Azure_Publish_Profile_XML").exists():

                    #########################Teardown Azure Websites#########################
                    set_progress("Azure Website Found! Tearing Down Website")

                    # Define Service Variables
                    subscription_id = "3ba523b7-5b38-430c-9ae7-b89b6051f756"
                    certificate_path = "/var/opt/cloudbolt/resourcehandlers/azure/azureclient.pem"

                    # Init Website Management Service
                    set_progress("Connecting To Azure Management Service...")
                    wms = WebsiteManagementService(subscription_id, certificate_path)
                    set_progress("Successfully Connected To Azure Management Service!")

                    # Get Service Field - Azure Webspace
                    set_progress("Retrieving Service Field - Azure Webspace")
                    Azure_Webspace = service.attributes.filter(field__label="Azure Webspace")[0].value
                    set_progress("Found Azure Webspace: {}".format(Azure_Webspace))

                    # Get Service Field - Define Azure Website Name
                    set_progress("Retrieving Service Field - Azure Website")
                    Azure_Name = service.attributes.filter(field__label="Azure Website Name")[0].value
                    set_progress("Found Azure Website: {}".format(Azure_Name))

                    # Delete Website
                    try:
                        set_progress("Deleting Azure Website...")
                        deletedwebsite = wms.delete_site(
                            Azure_Webspace, Azure_Name, delete_empty_server_farm=False, delete_metrics=True
                        )
                        set_progress("Successfully Deleted Azure Website - {0}".format(Azure_Name))
                    except:
                        set_progress("Site Deletion Failed! Please Try Again!")
                    #########################Teardown Azure Websites#########################

                else:
                    set_progress("This Service Is Not An Azure Service, Azure Service Teardown Process Skipped.")

    return "", "", ""
예제 #48
0
        b = bigsuds.BIGIP(hostname="10.60.103.79", username="******", password="******", debug=True)
    except Exception, e:
        set_progress("Error in connecting to F5 BIG-IP")

    # Naming of Pool
    pool_name = "CloudBolt_Rocks"
    # pool_name = 'CloudBolt' + str(int(time.time()))
    lb_meth = "LB_METHOD_ROUND_ROBIN"
    # Taking in pool members aka Instance Ip's from job
    servers = job.parent_job.server_set.all()
    server_ips = []
    # Takes in port
    # server_port = int({{ port }})
    server_port = 80
    for server in servers:
        server_ips.append({str(server.ip): server_port})
        set_progress("Adding instance ip {} to pool".format(server.ip))

    poolinfo = create_pool(b, pool_name, lb_meth, server_ips)
    for x in poolinfo:
        set_progress("Pool: {}".format(pool_name))
        for y in x:
            set_progress("\t%s:%d" % (y["address"], y["port"]))

    # created pool and pool members, create VS and add default pool
    virtualname = "CloudBoltF5"
    address = "10.60.60.88"
    create_virtual_server(b, virtualname, address, server_port, pool_name)

    return "", "", ""
예제 #49
0
def run(job, logger=None, **kwargs):
	
    # PREREQUISITE: Must Create Custom Fields Under Infrastructure -> CustomField For Each Customfield Object
    # Any references to a template variable will be automatically created ion custom fields for you.
    # Must Already Have An Azure Handler Setup

    # Example: FinalName = CustomField.objects.get(label='Azure Website Name')
    # You Would Create A Custom Field In The DB With The Label "Azure Website Name"

    #THIS PLUGIN ASSUMES YOU ARE DEPLOYING THIS SERVICE FOR A HOSTED WEBSITE WITH AZURE
    #This will be expanded over time to include all service tiers and configurations.

    #Define Service Variables
    subscription_id = '1234567890'
    certificate_path = '/var/opt/cloudbolt/resourcehandlers/azure/azureclient.pem'

    #Init Website Management Service
    set_progress("Connecting To Azure Management Service...")
    wms = WebsiteManagementService(subscription_id, certificate_path)
    set_progress("Successfully Connected To Azure Management Service!")

    #Define Azure Webspace
    Azure_Webspace = 'westuswebspace'

    #Define Azure Website Name
    Azure_Name = '{{ Azure_Website_Name }}'
    set_progress("Setting Azure Website Name To: " + Azure_Name)

    #Define Azure Website Host Names
    Azure_Hostnames = ['{}.azurewebsites.net'.format(Azure_Name)]
    set_progress("Setting Azure Hostname To " + '{{ Azure_Website_Name }}.azurewebsites.net')
    Azure_URL = Azure_Hostnames[0]
    set_progress("Setting Azure Website URL To " + Azure_URL)

    #Define Azure Website Plan
    Azure_Plan = 'VirtualDedicatedPlan'

    #Define Azure Compute Mode
    Azure_Compute_Mode = 'Shared'

    #Define Azure Server Farm
    Azure_Server_Farm='None'

    #Define Azure Site Mode
    Azure_Site_Mode='Basic'

    #Define Azure Location
    Azure_Location = 'West US'

    #Create The Service
    try:
        set_progress("Creating New Azure Website...")
        siteinfo = wms.create_site(Azure_Webspace, Azure_Name, Azure_Location, Azure_Hostnames, Azure_Plan, Azure_Compute_Mode, Azure_Server_Farm, Azure_Site_Mode)
        set_progress("Successfully Created New Azure Website!")
    except:
        set_progress("Failed to Provision Website! Please Try Again!")

    #Retrieve Publish Profile XML
    try:
        set_progress("Retrieving Website Publishing Profile")
        PublishProfileXML = wms.get_publish_profile_xml(Azure_Webspace, Azure_Name)
        Azure_Publish_Profile = PublishProfileXML.encode('ascii', 'ignore')
        set_progress("Sucessfully Retrieved Website Publishing Profile!")
    except:
        set_progress("Failed To Retrieve Website Publishing Profile!")
        FailedMessage = 'Failed To Retrieve Website Publishing Profile!'
        FailedName = CustomField.objects.get(label='Azure Publish Profile XML')
        FailedNameCV = CustomFieldValue(field=FailedName, value=FailedMessage)
        FailedNameCV.save()
        service.attributes.add(FailedNameCV)        

    #Get The Service
    services = job.parent_job.service_set.all()
    service = services[0]

    #Bind The Service Values And Save Them To A CFV (Custom Field Value)
    set_progress("Updating Azure Website URL Information To: " + Azure_URL)
    FinalURL = CustomField.objects.get(label='Azure Website URL')
    FinalURLCV = CustomFieldValue(field=FinalURL, value=Azure_URL)
    FinalURLCV.save()
    service.attributes.add(FinalURLCV)

    set_progress("Updating Azure Webspace Information To: " + Azure_Webspace)
    FinalWebspace = CustomField.objects.get(label='Azure Webspace')
    FinalWebspaceCV = CustomFieldValue(field=FinalWebspace, value=Azure_Webspace)
    FinalWebspaceCV.save()
    service.attributes.add(FinalWebspaceCV)

    set_progress("Updating Azure Location Information To: " + Azure_Location)
    FinalLocation = CustomField.objects.get(label='Azure Website Location')
    FinalLocationCV = CustomFieldValue(field=FinalLocation, value=Azure_Location)
    FinalLocationCV.save()
    service.attributes.add(FinalLocationCV)

    set_progress("Updating Hostnames Information To: " + Azure_Hostnames[0])
    FinalHostnames = CustomField.objects.get(label='Azure Website Hostname')
    FinalHostnamesCV = CustomFieldValue(field=FinalHostnames, value=Azure_Hostnames[0])
    FinalHostnamesCV.save()
    service.attributes.add(FinalHostnamesCV)

    set_progress("Updating Azure Publishing Profile Information")
    PublishProfile = CustomField.objects.get(label='Azure Publish Profile XML')
    PublishProfileCV = CustomFieldValue(field=PublishProfile, value=Azure_Publish_Profile)
    PublishProfileCV.save()
    service.attributes.add(PublishProfileCV)

    #Check The State Of The Job And Output The Result To The Console 
    success = True
    if success:
        set_progress("Successfully Deployed Azure Website Service")
        return "", "", ""
    else:
        set_progress("Failed To Deploy Azure Website Service")
        return "FAILURE", "Failed To Deploy Azure Website Service", ""
예제 #50
0
def run(job, logger=None):
    # makes connection to F5 Big IP
    try:
        b = bigsuds.BIGIP(hostname="10.60.103.79", username="******", password="******", debug=True)
    except Exception, e:
        set_progress("Error in connecting to F5 BIG-IP")
def run(job, logger=None, server=None, **kwargs):

    si = None
    for server in job.server_set.all():
        if not si:
            si = get_vmware_service_instance(server.resource_handler.cast())
        vm = get_vm_by_uuid(si, server.resource_handler_svr_id)
    
        assert isinstance(vm, pyVmomi.vim.VirtualMachine)
        
        if vm.config.version == "vmx-08":
            set_progress("Hardware version already updated. Nothing to do.")
            continue
        
        server.refresh_info()

        server_original_power_status = server.power_status
        if server_original_power_status != "POWERON":
            set_progress("Server is off. Turning it on to upgrade VMware Tools.")
            # Make sure VM is powered on
            task = vm.PowerOnVM_Task()
            wait_for_tasks(si, [task])

        set_progress("Upgrading VMware Tools")
        # Upgrade VMware tools
        try:
            task = vm.UpgradeTools_Task()
            wait_for_tasks(si, [task])
        except:
            set_progress("Cannot upgrade VM tools. Will still try to upgrade hardware version. ")
            pass
            
        
        # Power off VM for hw upgrade
        set_progress("Powering off server to upgrade HW version")
        task = vm.PowerOffVM_Task()
        wait_for_tasks(si, [task])

        # Snapshot VM
        set_progress("Creating snapshot")
        #server.resource_handler.cast().create_snapshot(server, "version4hw-{}".format(time.time()), "Pre Hardware Upgrade Snapshot")
        
        task = vm.CreateSnapshot_Task("version4hw-{}".format(time.time()), "Pre Hardware Upgrade Snapshot", False, True)
        wait_for_tasks(si, [task])
        
        failure_msg = ""

        # Upgrade VM
        try:
            set_progress("Updating HW version")
            task = vm.UpgradeVM_Task(version="vmx-08")
            wait_for_tasks(si, [task])
        except:
            failure_msg = "Failed to upgrade hardware version"
            set_progress("{}. Will now return VM to original power state.".format(failure_msg))
            pass
        
        if server_original_power_status == "POWERON":
            set_progress("Server was originally on, so power it on again")
            task = vm.PowerOnVM_Task()
            wait_for_tasks(si, [task])
        
        if failure_msg:
            return "FAILURE","",failure_msg
        
        return "","",""

    return "", "", ""
def run(job, **kwargs):
    """
    Gets the domain the Windows server has joined and confirms it is what we
    expect it to be
    """
    if job.status == "FAILURE":
        set_progress("Domain verification hook skipped because job failed")
        return "", "", ""
    server = job.server_set.all()[0]
    if not server.os_build.is_windows():
        set_progress("Skipping domain verification for non-windows machine")
        # only run verification on windows VMs
        return "", "", ""

    expected_domain_obj = server.get_value_for_custom_field("domain_to_join")
    if not expected_domain_obj:
        set_progress("Skipping domain verification test since no " "domain was specified")
        return "", "", ""

    expected_domain = expected_domain_obj.ldap_domain
    actual_domain = server.get_current_domain()

    if actual_domain == expected_domain:
        set_progress("Domain '{}' successfully set on server".format(actual_domain))
        return "", "", ""
    elif actual_domain is None:
        msg = (
            "Could not determine current domain. Be sure to provide either "
            "Windows Server Password or VMware Template Password parameter."
        )
        set_progress(msg)
        return "FAILURE", msg, ""
    else:
        msg = "Failed to join server to domain '{}', actual domain is '{}'".format(expected_domain, actual_domain)
        set_progress(msg)
        return "FAILURE", msg, ""