Пример #1
0
    def get_session_id(self, connection_info=None, save_to_self=False):

        if not connection_info:
            connection_info = self.connection_info
        if not connection_info:
            return None

        login_username = connection_info.username
        login_password = connection_info.password

        headers = {
            'content-type': 'application/json',
            'Tintri-Api-Client': self.__get_client_header()
        }
        data = {
            "username": login_username,
            "password": login_password,
            "typeId": "com.tintri.api.rest.vcommon.dto.rbac.RestApiCredentials"
        }
        login_url = '{}/session/login'.format(
            self.get_base_url(connection_info))

        try:
            httpresp = requests.post(login_url,
                                     json.dumps(data),
                                     headers=headers,
                                     verify=False)
        except requests.exceptions.ConnectionError:
            err = "Unable to connect to Tintri server"
            logger.exception(err)
            raise CloudBoltException(err)

        if httpresp.status_code is not 200:
            err = 'Failed to authenticate to {} as user {}. HTTP status code: {}'.format(
                login_url, login_username, httpresp.status_code)
            try:
                json_error = json.loads(httpresp.text)
                logger.error(
                    f"{err}. code: {json_error['code']}, message: {json_error['message']} "
                    f"details: {json_error['causeDetails']}")
            except Exception:
                logger.exception(
                    f"Error attempting to convert httpresp to JSON: {httpresp.text}"
                )

            raise CloudBoltException(err)

        session_id = httpresp.cookies['JSESSIONID']

        if save_to_self:
            self.session_id = session_id

        return session_id
Пример #2
0
def run(job, logger, resources=None):
    """
    `resources` is a queryset (of length 1) of resources being acted on.
    That resource should have a 'aws_stack_name' attribute or nothing is deleted.
    """
    resource = resources.first()
    if not resource:
        raise CloudBoltException(
            "No resource provided, this needs to be run as a pre-delete "
            "resource action")

    rh = AWSHandler.objects.first()
    # See http://boto3.readthedocs.io/en/latest/guide/configuration.html#method-parameters
    session = boto3.Session(aws_access_key_id=rh.serviceaccount,
                            aws_secret_access_key=rh.servicepasswd,
                            region_name='us-west-2')
    client = session.client('cloudformation')

    stack_name = resource.attributes.filter(
        field__name="aws_stack_name").first()
    if not stack_name:
        set_progress("No aws_stack_name attribute set on resource; skipping.")
        return "FAILURE", "", ""

    stack_name = stack_name.value
    set_progress("Deleting Stack {}".format(stack_name))
    response = client.delete_stack(StackName=stack_name)
    logger.debug("Response: {}".format(response))
    return "", "", ""
Пример #3
0
def test_order_blueprint(client):
    # In the case that an order fails, the test writer should be able to run the output
    # of the BP_PAYLOAD in Admin > API Browser > Orders.
    set_progress("Ordering for the API using the following BP_PAYLOAD:")
    set_progress(BP_PAYLOAD)
    order = json.loads(client.post("/api/v2/orders/", body=BP_PAYLOAD))
    order_href = order["_links"]["self"]["href"]
    order_id = get_id_from_href(order_href)
    set_progress("Current Running order: {}".format(order_id))
    result = wait_for_order_completion(client, order_id, 180, 10)
    order_object = Order.objects.filter(id=order_id).first()
    job_list = order_object.list_of_jobs()
    job_object = job_list[0]
    resource = job_object.get_resource()

    if not result == 0 and (not resource
                            or resource.lifecycle == "PROVFAILED"):
        test_delete_resource(client, resource)
        raise CloudBoltException(
            "Blueprint Deployment order {} did not succeed.".format(order_id))

    set_progress(
        "Blueprint deployment order {} completed successfully.".format(
            order_id))

    return resource
Пример #4
0
def run(job, **kwargs):
    resource = kwargs.get("resource")
    create_custom_fields_as_needed()

    env_id = "{{ env_id }}"
    env = Environment.objects.get(id=env_id)
    rh = env.resource_handler.cast()
    location = env.node_location
    set_progress("Location: %s" % location)

    resource_group = "{{ resource_group }}"
    account_name = "{{ account_name }}"

    client = _get_client(rh)

    # Missing dataa
    server_params = {
        "location": location,
        "version": "12.0",
        "administrator_login": "******",
        "administrator_login_password": "******",
        "locations": [{
            "location_name": location
        }],
    }

    set_progress("Creating database %s..." % account_name)

    try:
        command = client.database_accounts.create_or_update(
            resource_group,
            account_name,
            server_params,
        )
    except CloudError as e:
        msg = """The Azure Cosmos DB API was not able to connect.
                                     Please verify that you listed a valid Account Name.
                                     The account name provided was {}.
                                     Please see the Azure docs for more information
                                     https://docs.microsoft.com/en-us/azure/templates/microsoft.documentdb/2015-04-01/databaseaccounts.
                                """.format(account_name)
        raise CloudBoltException(msg) from e

    while not command.done():
        set_progress("Waiting for database to be created...")
        command.wait(20)

    resource.name = "Azure CosmosDB - " + account_name
    resource.azure_account_name = account_name
    resource.resource_group_name = resource_group
    resource.azure_location = location
    resource.azure_rh_id = rh.id
    resource.save()

    # Verify that we can connect to the new database
    set_progress("Verifying the connection to the new database...")
    db = client.database_accounts.get(resource_group,
                                      account_name)  # noqa: F841
    set_progress("Database %s has been created." % account_name)
def test_order_blueprint(client):
    order = json.loads(client.post('/api/v2/orders/', body=BP_PAYLOAD))
    order_href = order['_links']['self']['href']
    order_id = get_order_id_from_href(order_href)
    result = wait_for_order_completion(client, order_id, 180, 10)
    if result != 0:
        raise CloudBoltException("Blueprint Deployment order {} did not succeed.".format(order_id))
    set_progress("Blueprint deployment order {} completed successfully.".format(order_id))
Пример #6
0
 def get_vm_by_filter(self, filter_string):
     url = f"vm?{filter_string}"
     resp = self.api_get(url)
     total = resp.get("filteredTotal", 0)
     if total == 0:
         raise NotFoundException(
             f"Could Not Found VM matching {filter_string}")
     elif total > 1:
         raise CloudBoltException(
             f"Too many objects returned for '{filter_string}'': {total}")
     return resp.get("items")[0]
Пример #7
0
def test_order_blueprint(client):
    try:
        # In the case that an order fails, the test writer should be able to run the output
        # of the BP_PAYLOAD in Admin > API Browser > Orders.
        set_progress("Ordering for the API using the following BP_PAYLOAD:")
        set_progress(BP_PAYLOAD)
        order = json.loads(client.post("/api/v2/orders/", body=BP_PAYLOAD))
    except requests.HTTPError as err:
        raise CloudBoltException(
            "The order failed. Please review that the Resource Group is available in the Azure lab to deploy this test."
        ) from err
    order_href = order["_links"]["self"]["href"]
    order_id = get_order_id_from_href(order_href)
    result = wait_for_order_completion(client, order_id, 600, 10)
    if result != 0:
        raise CloudBoltException(
            "Blueprint Deployment order {} did not succeed.".format(order_id))
    set_progress(
        "Blueprint deployment order {} completed successfully.".format(
            order_id))
Пример #8
0
def test_order_blueprint(client):
    order = json.loads(client.post("/api/v2/orders/", body=BP_PAYLOAD))
    order_href = order["_links"]["self"]["href"]
    order_id = get_order_id_from_href(order_href)
    result = wait_for_order_completion(client, order_id, 180, 10)
    if result != 0:
        raise CloudBoltException(
            "Blueprint Deployment order {} did not succeed. Please verify that the Azure Lab has not met it's limit for Web Applications"
            .format(order_id))
    set_progress(
        "Blueprint deployment order {} completed successfully.".format(
            order_id))
Пример #9
0
def get_recommended_server_instance_types(handler: AWSHandler):
    """
    Connect to the boto3 CostExplorer client and get recommended
    instance sizes for all ec2 instances on this account with info
    about potential cost savings.
    """

    wrapper = handler.get_api_wrapper()

    recommendations_by_instance = dict()
    total_savings = 0

    client = wrapper.get_boto3_client(
        'ce',
        handler.serviceaccount,
        handler.servicepasswd,
        'us-east-1'  # Connecting to any region should return recommendations for all regions.
    )

    try:
        response = client.get_rightsizing_recommendation(Service='AmazonEC2')
    except (AttributeError, UnknownServiceError):
        # This will happen if the version of boto3 pre-dates the existence
        # of either this CostExplorer method that is being called, or the CostExplorer
        # Service all together. If this happens, then let the
        # user know what version of CloudBolt they need to be on for this to work.
        raise CloudBoltException(
            'This version of CloudBolt does not support '
            'this UI-extension. Please upgrade to version 9.0.1 or '
            'greater to get recommendations. '
        )

    summary_data = response.get('Summary')
    total_recommendations = int(summary_data.get('TotalRecommendationCount'))

    if total_recommendations > 0:
        recommendations_list = response.get('RightsizingRecommendations')

        # Save all the recommendations for this region.
        for raw_dict in recommendations_list:
            recommendation_dict = get_recommendation_dict(raw_dict)
            instance_id = recommendation_dict.get('current_instance').get('id')
            recommendations_by_instance[instance_id] = recommendation_dict
            total_savings += float(recommendation_dict.get('recommendations').get('savings'))

    currency = summary_data.get('SavingsCurrencyCode')
    summary = dict(
        total_recommendations=total_recommendations,
        total_savings=total_savings,
        currency=currency
    )

    return recommendations_by_instance, summary
Пример #10
0
def test_delete_resource(client, resource):
    body = "{}"
    response = json.loads(
        client.post('/api/v2/resources/{}/{}/actions/1/'.format(
            resource.resource_type.name, resource.id),
                    body=body))
    job_href = response['run-action-job']['self']['href']
    job_id = get_id_from_href(job_href)
    result = wait_for_job_completion(client, job_id, 180, 10)
    if not result == 0:
        raise CloudBoltException(
            "Resource deletion job {} did not succeed.".format(job_id))
    set_progress(
        "Resource deletion job {} completed successfully.".format(job_id))
def generate_options_for_gcp_project(group=None, **kwargs):
    """
    List all GCP Projects that are orderable by the current group.
    """
    if not GCPHandler.objects.exists():
        raise CloudBoltException(
            'Ordering this Blueprint requires having a '
            'configured Google Cloud Platform resource handler.')
    envs = Environment.objects.filter(
        resource_handler__resource_technology__name='Google Cloud Platform') \
        .select_related('resource_handler')
    if group:
        group_env_ids = [env.id for env in group.get_available_environments()]
        envs = envs.filter(id__in=group_env_ids)
    return [(env.id, u'{env}'.format(env=env)) for env in envs]
Пример #12
0
def test_order_blueprint(client):
    order = json.loads(client.post('/api/v2/orders/', body=BP_PAYLOAD))
    order_href = order['_links']['self']['href']
    order_id = get_order_id_from_href(order_href)
    result = wait_for_order_completion(client, order_id, 1800, 10)
    if result != 0:
        raise CloudBoltException(
            "Blueprint Deployment order {} did not succeed.".format(order_id))
    set_progress(
        "Blueprint deployment order {} completed successfully.".format(
            order_id))
    # Since this resource has no unique name supplied by the user, return the resource created by FSx
    order = Order.objects.get(id=order_id)
    order_item = order.orderitem_set.first()
    return order_item.get_resource()
Пример #13
0
def test_order_blueprint(client):
    # In the case that an order fails, the test writer should be able to run the output
    # of the BP_PAYLOAD in Admin > API Browser > Orders.
    set_progress("Ordering for the API using the following BP_PAYLOAD:")
    set_progress(BP_PAYLOAD)
    order = json.loads(client.post("/api/v2/orders/", body=BP_PAYLOAD))
    order_href = order["_links"]["self"]["href"]
    order_id = get_order_id_from_href(order_href)
    result = wait_for_order_completion(client, order_id, 600, 10)
    if result != 0:
        raise CloudBoltException(
            "Blueprint Deployment order {} did not succeed.".format(order_id))
    set_progress(
        "Blueprint deployment order {} completed successfully.".format(
            order_id))
Пример #14
0
def wait_for_running_status(timeout=None):
    # Check and wait for build status of cluster and work node builds
    status = ''
    start = time.time()
    while status != "Succeeded":
        set_progress("waiting for SUCCEED status of cluster build")
        if status == 'Failed':
            raise CloudBoltException("Deployment failed")
        if timeout is not None and (time.time() - start > timeout):
            break
        cluster = subprocess.check_output([
            'az', 'aks', 'show', '-g', resource_group, '-n', CLUSTER_NAME,
            '-o', 'json'
        ])
        res = json.loads(cluster)
        status = res['provisioningState']
    return status
Пример #15
0
def test_order_blueprint(client):
    order = json.loads(client.post('/api/v2/orders/', body=BP_PAYLOAD))
    order_href = order['_links']['self']['href']
    order_id = get_id_from_href(order_href)
    set_progress("Current Running order: {}".format(order_id))
    result = wait_for_order_completion(client, order_id, 180, 10)
    order_object = Order.objects.filter(id=order_id).first()
    job_list = order_object.list_of_jobs()
    job_object = job_list[0]
    resource = job_object.get_resource()

    if not result == 0 and (not resource
                            or resource.lifecycle == 'PROVFAILED'):
        raise CloudBoltException(
            "Blueprint Deployment order {} did not succeed.".format(order_id))
    set_progress(
        "Blueprint deployment order {} completed successfully.".format(
            order_id))

    return resource
def run(job, logger, service=None):
    if not service:
        raise CloudBoltException(
            "No service provided, this needs to be run as a pre-delete "
            "service action")

    rh = AWSHandler.objects.first()
    # See http://boto3.readthedocs.io/en/latest/guide/configuration.html#method-parameters
    session = boto3.Session(aws_access_key_id=rh.serviceaccount,
                            aws_secret_access_key=rh.servicepasswd,
                            region_name='us-west-2')
    client = session.client('cloudformation')

    stack_name = service.attributes.filter(
        field__name="aws_stack_name").first()
    if not stack_name:
        return "", "", ""
    stack_name = stack_name.value
    set_progress("Deleting Stack {}".format(stack_name))
    response = client.delete_stack(StackName=stack_name)
    logger.debug("Response: {}".format(response))
    return "", "", ""
Пример #17
0
    def approve(self, approver=None, parent_job=None):
        """
        Sets this order to the "Active" status and kicks off the jobs needed
        to complete this order.

        One job of the appropriate type ('provision' or 'decom') is kicked
        off per OrderItem for this order.  An exception to this statement is
        if the "quantity" field on the OrderItem is set, then a set of
        identical jobs will be kicked off (however many are specified by
        quantity).

        Returns list of jobs and error messages from any cleanup of order
        items.
        """
        if self.status != 'PENDING':
            msg = _("Only orders that are in 'PENDING' state can be approved. "
                    "Current state of order is '{status}'.").format(
                        status=self.status)
            raise CloudBoltException(msg)

        approve_this_order = False
        if self.is_multilevel_approval():
            logger.info('models.approve is multilevel!')
            self.approve_my_grms(approver)
            logger.info(f'models.approve after approve_my_grms ({approver})!')
            if self.is_multilevel_approval():
                logger.info('models.approve ml approval complete!')
                approve_this_order = True
        else:
            logger.info('models.approve is NOT multilevel!')
            #single-level approval
            approve_this_order = True

        if not approve_this_order:
            #should only kick off if multilevel approvals
            msg = _(
                "Cannot fully approve this order.  Multilevel approvals not complete. "
                "Current state of order is '{status}'.").format(
                    status=self.status)
            return [], msg

        try:
            # Raise an error to bubble up specific reason as part of the exception
            self.group.quota_set.can_use(raise_error=True, **self.net_usage())
        except QuotaSetError as quota_set_error:
            raise QuotaError(
                _("Cannot approve order #{order_id} because doing so would exceed the "
                  "quota for group '{group}'.  {error}").format(
                      order_id=self.id,
                      group=self.group,
                      error=quota_set_error))

        # Before we create job records, order the order items to make
        # sure decom jobs are queued before prov jobs.  the job engine
        # may still parallelize them, that's something we can revisit
        # later.  In the meantime, customers can set the concurrency
        # level to 1 to prevent this.
        # we're taking advantage of the fact that "decom" comes before
        # "prov" in the alphabet here.
        order_items = [
            oi.cast()
            for oi in self.top_level_items.order_by("real_type", "add_date")
        ]

        order_items, msg = self.__filter_illegal_order_items(order_items)
        if not order_items:
            msg = _(
                "{message}  There are no valid order items left.  This order is "
                "being marked as complete.").format(message=msg)
            self.complete("SUCCESS")
            return [], msg

        self.status = "ACTIVE"
        self.approved_by = approver
        self.approve_date = get_current_time()
        self.save()

        history_msg = _("The '{order}' order has been approved.").format(
            order=escape(self))
        self.add_event('APPROVED', history_msg, profile=self.owner)

        # run pre order execution hook
        try:
            cbhooks.run_hooks("pre_order_execution", order=self)
        except cbhooks.exceptions.HookFailureException as e:
            self.status = "FAILURE"
            self.save()
            msg = _("Failed to run hook for order approval. Status: {status},"
                    " Output: {output}, Errors: {errors}").format(
                        status=e.status, output=e.output, errors=e.errors)

            history_msg = _("The '{order}' order has failed.").format(
                order=escape(self))
            self.add_event('FAILED', history_msg, profile=self.owner)
            raise CloudBoltException(msg)

        from jobs.models import Job
        # Saving job objects will cause them to be kicked off by the
        # job engine within a minute
        jobs = []

        for order_item in order_items:
            jobtype = getattr(order_item, 'job_type', None)
            if not jobtype:
                # the job type will default to the first word of the class type
                # ex. "provision", "decom"

                jobtype = str(order_item.real_type).split(" ", 1)[0]
            quantity = 1
            # quantity is a special field on order_items.  If an
            # order_item has the quantity field, kick off that many
            # jobs
            if hasattr(order_item, 'quantity') and \
                    order_item.quantity is not None and \
                    order_item.quantity != '':
                quantity = int(order_item.quantity)
            for i in range(quantity):
                job = Job(job_parameters=order_item,
                          type=jobtype,
                          owner=self.owner,
                          parent_job=parent_job)
                job.save()

                # Associate the job with any server(s)
                # This may seem unnecessary because it's done when most jobs
                # run, but it's needed at the very least for scheduled server
                # modification jobs (for changing resources) so they show up on
                # the server as scheduled before they actually run
                servers = []
                if hasattr(order_item, "server"):
                    servers = [order_item.server]
                elif hasattr(order_item, "servers"):
                    servers = order_item.servers.all()
                for server in servers:
                    server.jobs.add(job)

                jobs.append(job)

        # If it didn't make any jobs, just call it done
        if not jobs:
            self.complete("SUCCESS")

        return jobs, msg
Пример #18
0
    def start_approval_process(self, request=None):
        """
        This method determines what order process should be taken, and
        takes it.  By default, the process is to email the approvers, but
        this can be overriden by customers to instead call out to a hook,
        and that can be overridden by auto-approval (set on the group or
        env, or by the owner being an approver or a super admin).

        This method returns a message summarizing what action was taken.

        `request` is needed to determine the current portal URL; if not
        passed, default portal URL is used.
        """
        # done here to avoid circular import
        from cbhooks.models import HookPoint

        hook_point = HookPoint.objects.filter(name="order_approval").first()
        orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point)
        if orch_actions:
            #the orchestration action NEEDs to be first in order to allow a hook
            # to model the approval process correctly and not have something
            # auto-approve before the hook is run
            logger.debug(
                "Order Approval orchestration actions exist, so bypassing built-in approver emails."
            )
            try:
                cbhooks.run_hooks("order_approval", order=self)
            except cbhooks.exceptions.HookFailureException as e:
                msg = _(
                    "Failed to run hook for order approval. Status: {status},"
                    " Output: {output}, Errors: {errors}").format(
                        status=e.status, output=e.output, errors=e.errors)
                raise CloudBoltException(msg)
            return ""

        #now that the hooks have run, check if it should be auto-approved
        profile = request.get_user_profile()
        if self.is_multilevel_approval():
            self.approve_my_grms(profile)

        if self.should_auto_approve():
            logger.debug(
                "Order can be automatically approved, attempting approval by {}"
                .format(self.owner))
            jobs, msg = self.approve(self.owner)
            if jobs:
                msg = render_to_string(
                    'orders/approved_msg.html', {
                        'order': self,
                        'autoapproved': True,
                        'num_jobs': len(jobs),
                        'extramsg': msg,
                    })
            return msg
        else:
            # No auto approval and no approval hooks, so go with
            # the default process of emailing a set of approvers, unless the
            # owner is an approver.
            msg = _("Order #{order_id} has been submitted for approval.  "
                    ).format(order_id=self.id)
            msg += orders.mail.email_approvers(self, request)
            logger.debug(msg)
            return msg
Пример #19
0
def run(job=None, logger=None, **kwargs):
    """
    """
    db_name = DB_NAME
    if db_name.isalnum() is False:
        raise CloudBoltException(
            f"Only alpha numeric characters (A-Z+0-9, case insensitive) are allowed and the name provided was '{db_name}'."
        )

    instance_name = db_name

    environment = Environment.objects.get(id=ENVIRONMENT)
    rh = environment.resource_handler.cast()
    assert isinstance(rh, GCPHandler)
    project = environment.gcp_project
    region = GCP_REGION
    set_progress("REGION: %s" % GCP_REGION)

    try:
        CustomField.objects.get_or_create(
            name="gcp_sql_rh_id",
            label="Google RH ID",
            type="STR",
            description="Used by the Google SQL blueprint",
            show_as_attribute=True,
        )
    except IntegrityError:
        # IntegrityError: (1062, "Duplicate entry 'google_rh_id' for key 'name'")
        pass

    try:
        CustomField.objects.get_or_create(
            name="gcp_sql_instance_name",
            label="Google instance identifier",
            type="STR",
            description="Used by the Google Cloud SQL blueprint",
            show_as_attribute=True,
        )
    except IntegrityError:
        # IntegrityError: (1062, "Duplicate entry 'db_identifier' for key 'name'")
        pass

    try:
        CustomField.objects.get_or_create(
            name="gcp_sql_project",
            label="Google project",
            type="STR",
            description="Used by the Google Cloud SQL blueprint",
            show_as_attribute=True,
        )
    except IntegrityError:
        # IntegrityError: (1062, "Duplicate entry 'db_identifier' for key 'name'")
        pass

    resource = kwargs.get("resource")
    resource.name = "Google SQL - " + instance_name
    resource.gcp_sql_instance_name = instance_name
    # Store the resource handler's ID on this resource so the teardown action
    # knows which credentials to use.
    resource.gcp_sql_rh_id = rh.id
    resource.gcp_sql_project = project
    resource.save()

    try:
        account_info = json.loads(
            rh.gcp_projects.get(id=project).service_account_info)
    except Exception:
        account_info = json.loads(
            rh.gcp_projects.get(id=project).service_account_key)

    credentials = ServiceAccountCredentials.from_json_keyfile_dict(
        account_info)

    job.set_progress(
        "Connecting to Google Cloud through service account email {}".format(
            account_info["client_email"]))
    set_progress("RH: %s" % rh)

    service_name = "sqladmin"
    version = "v1beta4"
    client = build(service_name, version, credentials=credentials)

    set_progress("Connection established")

    try:
        inst_data = client.instances().list(project=project).execute()

        if "items" in inst_data:
            instance_names = [inst["name"] for inst in inst_data["items"]]
            if instance_name in instance_names:
                return (
                    "ERROR",
                    'Server instance "%s" already exists' % instance_name,
                    "",
                )
    except HttpError as e:
        client_username = account_info["client_email"].split("@")[0]
        return (
            "ERROR",
            "Server instance {instance_name} could not be created ({reason}), make sure that this ResourceHandler's service account ({service_account_name}) is given the Cloud SQL Admin Permission"
            .format(
                instance_name=instance_name,
                reason=str(e),
                service_account_name=client_username,
            ),
            e,
        )

    set_progress("\nCreating instance...")

    body = {
        "kind": "sql#instance",
        "name": instance_name,
        "project": project,
        "region": region,
        "databaseVersion": DB_VERSION,
        "settings": {
            "tier": "db-n1-standard-1"
        },
    }
    result = client.instances().insert(project=project, body=body).execute()

    # Wait the server instance to be created:
    while True:
        inst_data = client.instances().list(project=project).execute()
        status = None
        for inst in inst_data["items"]:
            if inst["name"] == instance_name:
                status = inst["state"]
                break
        set_progress("Status of the server instance is: %s" % status)
        if status == "RUNNABLE":
            break
        time.sleep(2)

    set_progress("\nNow attempting to create a new database...")

    body = {
        "kind": "sql#database",
        "name": db_name,
        "project": project,
        "instance": instance_name,
    }

    result = (client.databases().insert(project=project,
                                        instance=instance_name,
                                        body=body).execute())
    assert result["status"] == "DONE"

    set_progress("Database %s is now available on instance: %s" %
                 (db_name, instance_name))
def run(job, *args, **kwargs):
    bp = ServiceBlueprint.objects.get(id=BLUEPRINT)
    starting_resource_set = bp.resource_set.all()
    set_progress(
        "Running Continuous Infrastructure Test for blueprint {}".format(bp))

    client = get_api_client()
    set_progress("### ORDERING BLUEPRINT TO TEST DELETING###",
                 tasks_done=0,
                 total_tasks=3)
    generate_test_file(TEST_FILE_FILEPATH, TEST_FILE_NAME)
    created_resource = test_order_blueprint(client)
    test_delete_resource(client, created_resource)
    resource_set_after_deleting = bp.resource_set.all()
    if not len(starting_resource_set) == len(resource_set_after_deleting):
        intersection == starting_resource_set & resource_set_after_deleting
        set_progress(
            "Delete failed: resource set ending bigger than it started even after deleting added, here are the extra values: {}"
            .format(intersection))

    # Order the BP
    set_progress("### ORDERING BLUEPRINT TO TEST DISCOVERY###",
                 tasks_done=0,
                 total_tasks=3)
    generate_test_file(TEST_FILE_FILEPATH, TEST_FILE_NAME)
    created_resource = test_order_blueprint(client)
    created_resource_name = created_resource.name
    created_resource_azure_storage_file_name = created_resource.azure_storage_file_name
    created_resource_azure_storage_file_share_name = created_resource.azure_storage_file_share_name
    created_resource_resource_group_name = created_resource.resource_group_name
    created_resource_azure_storage_account_name = created_resource.azure_storage_account_name
    created_resource_azure_account_key = created_resource.azure_account_key
    created_resource_azure_account_key_fallback = created_resource.azure_account_key_fallback

    set_progress(f"RESOURCE {created_resource}")
    rce = bp.resource_set.last()
    set_progress(f"LAST RESOURCE {rce}")
    # Delete the resource from the database only
    created_resource.delete()

    set_progress("active resources before sync: {}".format(
        bp.resource_set.filter(lifecycle='Active')))
    set_progress("provisioning resources before sync: {}".format(
        bp.resource_set.filter(lifecycle='Provisioning')))
    set_progress("all resources before sync: {}".format(bp.resource_set.all()))
    set_progress("### DISCOVERING RESOURCES FOR BLUEPRINT ###", tasks_done=1)
    bp.sync_resources()

    # should be able to get the resource since the sync should have created it
    set_progress("active resources after sync: {}".format(
        bp.resource_set.filter(lifecycle='Active')))
    set_progress("provisioning resources after sync: {}".format(
        bp.resource_set.filter(lifecycle='Provisioning')))
    set_progress("all resources after sync: {}".format(bp.resource_set.all()))
    discovered_resource = bp.resource_set.filter(name=NEW_RESOURCE_NAME,
                                                 lifecycle='Active').first()

    if not discovered_resource:
        discovered_resource = bp.resource_set.filter(
            name=NEW_RESOURCE_NAME).first()

    set_progress('filtered set: {}'.format(
        bp.resource_set.filter(name=NEW_RESOURCE_NAME)))
    set_progress('discovered_resource: {}'.format(discovered_resource))
    set_progress('created_resource_name: {}'.format(created_resource_name))
    sync_failed = False
    failure_message = ''

    try:
        if not discovered_resource.name == created_resource_name:
            raise CloudBoltException(
                'Sync failed: Discovered resource\'s name not the same as created resource, {} =/= {}'
                .format(discovered_resource.name, created_resource_name))

        if not discovered_resource.azure_storage_file_name == created_resource_azure_storage_file_name:
            raise CloudBoltException(
                'Sync failed: Discovered resource\'s azure_storage_file_name not the same as created resource, {} =/= {}'
                .format(discovered_resource.azure_storage_file_name,
                        created_resource_azure_storage_file_name))

        if not discovered_resource.azure_storage_file_share_name == created_resource_azure_storage_file_share_name:
            raise CloudBoltException(
                'Sync failed: Discovered resource\'s azure_storage_file_share_name not the same as created resource, {} =/= {}'
                .format(discovered_resource.azure_storage_file_share_name,
                        created_resource_azure_storage_file_share_name))

        if not discovered_resource.azure_storage_account_name == created_resource_azure_storage_account_name:
            raise CloudBoltException(
                'Sync failed: Discovered resource\'s azure_storage_account_name not the same as created resource, {} =/= {}'
                .format(discovered_resource.azure_storage_account_name,
                        created_resource_azure_storage_account_name))

        if not discovered_resource.azure_account_key == created_resource_azure_account_key:
            raise CloudBoltException(
                'Sync failed: Discovered resource\'s azure_account_key not the same as created resource, {} =/= {}'
                .format(discovered_resource.azure_account_key,
                        created_resource_azure_account_key))

        if not discovered_resource.azure_account_key_fallback == created_resource_azure_account_key_fallback:
            raise CloudBoltException(
                'Sync failed: Discovered resource\'s azure_account_key_fallback not the same as created resource, {} =/= {}'
                .format(discovered_resource.azure_account_key_fallback,
                        created_resource_azure_account_key_fallback))

    except Exception as e:
        set_progress("### FAILED TO SYNC RESOURCE ###")
        set_progress(e)
        sync_failed = True

    try:
        set_progress("### DELETING DISCOVERED RESOURCE FOR BLUEPRINT ###",
                     tasks_done=2)
        test_delete_resource(client, discovered_resource)
    except Exception as e:
        set_progress(
            "Delete failed: deletion of the discovered bp threw an exception: {}"
            .format(e))

    ending_resource_set = bp.resource_set.all()
    if not len(starting_resource_set) == len(ending_resource_set):
        intersection = starting_resource_set & ending_resource_set
        set_progress(
            "Delete failed: resource set ending bigger than it started even after deleting added, here are the extra values: {}"
            .format(intersection))

    set_progress("ALL Tests completed!", tasks_done=3)

    if sync_failed:
        raise CloudBoltException("Failed to Sync Resource")
Пример #21
0
def run(job, **kwargs):
    resource = kwargs.get('resource')
    create_custom_fields_as_needed()

    storage_account = '{{ storage_account }}'
    file_path = "{{ file }}"
    azure_storage_file_share_name = '{{ azure_storage_file_share_name }}'
    overwrite_files = {{overwrite_files}}
    file_name = Path(file_path).name

    if file_path.startswith(settings.MEDIA_URL):
        set_progress("Converting relative URL to filesystem path")
        file_path = file_path.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)

    if not file_path.startswith(settings.MEDIA_ROOT):
        file_path = os.path.join(settings.MEDIA_ROOT, file_path)

    try:
        set_progress("Connecting To Azure...")
        account_key = Resource.objects.filter(
            name__icontains=storage_account)[0].azure_account_key
        fallback_account_key = Resource.objects.filter(
            name__icontains=storage_account)[0].azure_account_key_fallback
        file_service = FileService(account_name=storage_account,
                                   account_key=account_key)

        set_progress(
            'Creating file share {file_share_name} if it doesn\'t already exist...'
            .format(file_share_name=azure_storage_file_share_name))
        file_service.create_share(share_name=azure_storage_file_share_name,
                                  quota=1)

        set_progress('Connecting to file share')
        file_name_on_azure = file_name
        count = 0
        while (not overwrite_files) and file_service.exists(
                share_name=azure_storage_file_share_name,
                file_name=file_name_on_azure,
                directory_name=''):
            count += 1
            file_name_on_azure = '{file_name}({duplicate_number})'.format(
                file_name=file_name, duplicate_number=count)
            set_progress(
                'File with name already exists on given file share, testing new name: {new_name}'
                .format(new_name=file_name_on_azure))

        local_resource_name = azure_storage_file_share_name + '-' + file_name_on_azure
        if overwrite_files and file_service.exists(
                share_name=azure_storage_file_share_name,
                file_name=file_name_on_azure,
                directory_name=''):
            set_progress(
                'File with name already exists on given file share, overwriting'
            )
            old_resource_to_overwite = Resource.objects.filter(
                name=local_resource_name, lifecycle='ACTIVE').first()

            if old_resource_to_overwite:
                old_resource_to_overwite.delete()

        set_progress(
            'Creating the file with name {file_name} on the Storage Account {storage_account} using the share named {share_name}'
            .format(file_name=file_name_on_azure,
                    storage_account=storage_account,
                    share_name=azure_storage_file_share_name))
        file_service.create_file_from_path(
            share_name=azure_storage_file_share_name,
            file_name=file_name_on_azure,
            directory_name='',
            local_file_path=file_path)
        os.remove(file_path)

        set_progress(
            'Creating local storage resource named {resource_name}'.format(
                resource_name=local_resource_name))
        resource.name = local_resource_name
        resource.azure_storage_account_name = storage_account
        resource.azure_account_key = account_key
        resource.azure_account_key_fallback = fallback_account_key
        resource.azure_storage_file_share_name = azure_storage_file_share_name
        resource.azure_storage_file_name = file_name_on_azure
        resource.save()

        return "Success", "The File has succesfully been uploaded", ""
    except Exception as e:
        if os.path.exists(file_path):
            os.remove(file_path)

        if resource:
            resource.delete()

        raise CloudBoltException(
            "File could not be uploaded because of the following error: {error}"
            .format(error=e))