コード例 #1
0
ファイル: utils.py プロジェクト: HumayunNasir23/test
def verify_cloud_credentials(cloud):
    """
    Verify IBM cloud credentials and save the credentials to database
    """
    try:
        hmac = True
        resource_group_names = []
        ibm_manager = IBMManager(cloud)
        cloud.credentials = IBMCredentials(
            ibm_manager.iam_ops.authenticate_cloud_account())
        if cloud.service_credentials:
            ibm_manager.cos_ops.fetch_ops.get_buckets()
            resource_groups = ibm_manager.resource_ops.raw_fetch_ops.get_resource_groups(
            )
            resource_group_names = [
                resource_group["name"] for resource_group in resource_groups
            ]
        if cloud.service_credentials.access_key_id and cloud.service_credentials.secret_access_key:
            hmac = validate_hmac(
                decrypt_api_key(cloud.service_credentials.access_key_id),
                decrypt_api_key(cloud.service_credentials.secret_access_key))
        doosradb.session.commit()
    except (IBMAuthError, IBMConnectError, IBMExecuteError,
            IBMInvalidRequestError) as ex:
        current_app.logger.info(ex)
        cloud.status = INVALID
        doosradb.session.commit()
    else:
        if len(resource_group_names) != len(set(resource_group_names)):
            cloud.status = IBMCloud.STATUS_ERROR_DUPLICATE_RESOURCE_GROUPS
        else:
            cloud.status = VALID
        if not hmac:
            cloud.status = INVALID
        doosradb.session.commit()
コード例 #2
0
def get_image_size(cloud_id, region, bucket_name, image_name, image_format):
    """
    Get the size of image to convert. This task will get the size using object's HEAD data using S3 APIs
    :param cloud_id: <string> cloud ID for which the image is being converted (for credentials)
    :param region: <string> region in which the COS bucket resides
    :param bucket_name: <string> bucket name in which the image resides
    :param image_name: <string> Name of the image
    :return: <int> Image size in MBs
    """
    cloud = IBMCloud.query.filter_by(id=cloud_id).first()
    if not cloud:
        return

    client = ibm_boto3.client(
        service_name='s3',
        ibm_api_key_id=decrypt_api_key(cloud.api_key),
        ibm_service_instance_id=cloud.service_credentials.resource_instance_id,
        ibm_auth_endpoint="https://iam.cloud.ibm.com/identity/token",
        config=Config(signature_version="oauth"),
        endpoint_url="https://s3.{region}.cloud-object-storage.appdomain.cloud"
        .format(region=region))

    response = client.head_object(Bucket=bucket_name,
                                  Key="{image_name}.{image_format}".format(
                                      image_name=image_name,
                                      image_format=image_format))
    if not response.get("ResponseMetadata") or not response["ResponseMetadata"].get("HTTPHeaders") \
            or not response["ResponseMetadata"]["HTTPHeaders"].get("content-length"):
        return

    return int(
        int(response["ResponseMetadata"]["HTTPHeaders"]["content-length"]) /
        1000000)
コード例 #3
0
    def generate_token(self):
        """
        Authenticate IBM Cloud account and return IAM token
        :return:
        """
        softlayer_cloud = doosradb.session.query(SoftlayerCloud).filter_by(
            username=self.user_name, api_key=self.api_key).first()
        if not softlayer_cloud:
            return Response("SOFTLAYER_CLOUD_NOT_FOUND", status=404)

        ibm_cloud = doosradb.session.query(IBMCloud).filter_by(
            id=softlayer_cloud.ibm_cloud_account_id).first()

        request = requests.post(AUTH_URL,
                                params={
                                    "grant_type":
                                    "urn:ibm:params:oauth:grant-type:apikey",
                                    "apikey":
                                    decrypt_api_key(ibm_cloud.api_key),
                                    "client_id": "bx",
                                    "client_secret": "bx"
                                },
                                headers={
                                    "Content-Type":
                                    "application/x-www-form-urlencoded",
                                    "Accept": "application/json"
                                })
        return request.json()
コード例 #4
0
def task_add_content_migration_meta_data(self, user_id, data):
    user = User.query.get(user_id)
    softlayer_clouds = user.project.softlayer_clouds.filter_by(
        status=VALID).all()
    for cloud in softlayer_clouds:
        client = create_client_from_env(cloud.username,
                                        decrypt_api_key(cloud.api_key))
        vs_manger = VSManager(client)
        try:
            vsi_id = vs_manger.list_instances(public_ip=data["ip"])[0]["id"]
            break
        except (KeyError, IndexError):
            vsi_id = None
    if not vsi_id:
        LOGGER.info(
            f"VSI Not Found for public IP: {data['ip']} against user: {user_id}"
        )
        return
    cm_object = CMModels.query.filter_by(softlayer_vsi_id=vsi_id).first()
    if cm_object:
        cm_object.cm_meta_data = data
    else:
        cm_object = CMModels(softlayer_vsi_id=vsi_id, cm_meta_data=data)
        doosradb.session.add(cm_object)
    doosradb.session.commit()

    cm_object = CMModels.query.filter_by(softlayer_vsi_id=vsi_id).first()
    if cm_object:
        cm_object.cm_meta_data = data
    else:
        cm_object = CMModels(softlayer_vsi_id=vsi_id, cm_meta_data=data)
        doosradb.session.add(cm_object)
    doosradb.session.commit()
    LOGGER.info(f"VSI: {vsi_id} meta data saved for user: {user_id}")
コード例 #5
0
def update_softlayer_cloud_account(user_id, user, softlayer_cloud_account_id):
    """
    Update Softlayer Cloud Account
    :param user_id:
    :param user:
    :param softlayer_cloud_account_id:
    :return:
    """
    from doosra.tasks.other.softlayer_tasks import task_validate_softlayer_cloud

    data = request.get_json(force=True)
    softlayer_cloud_account = doosradb.session.query(SoftlayerCloud).filter_by(
        project_id=user.project.id, id=softlayer_cloud_account_id).first()
    if not softlayer_cloud_account:
        current_app.logger.info(
            "No Softlayer Cloud account found with ID {id}".format(
                id=softlayer_cloud_account_id))
        return Response(status=204)

    if not softlayer_cloud_account.ibm_cloud_account_id:
        if data.get("ibm_cloud_account_id"):
            softlayer_cloud_account.ibm_cloud_account_id = data.get(
                "ibm_cloud_account_id")

    if data.get("name") and data["name"] != softlayer_cloud_account.name:
        existing_softlayer_account = doosradb.session.query(
            SoftlayerCloud).filter_by(name=data["name"],
                                      project_id=user.project.id).first()
        if existing_softlayer_account:
            return Response("ERROR_SAME_NAME", status=409)

        softlayer_cloud_account.name = data["name"]

    if data.get("username"
                ) and data["username"] != softlayer_cloud_account.username:
        existing_softlayer_account = doosradb.session.query(
            SoftlayerCloud).filter_by(username=data["username"],
                                      project_id=user.project.id).first()
        if existing_softlayer_account:
            return Response("ERROR_SAME_USERNAME", status=409)

        softlayer_cloud_account.username = data["username"]

    if data.get("ibm_cloud_account_id") and data["ibm_cloud_account_id"] != \
            softlayer_cloud_account.ibm_cloud_account_id:
        softlayer_cloud_account.ibm_cloud_account_id = data[
            "ibm_cloud_account_id"]

    if data.get("api_key") and data["api_key"] != decrypt_api_key(
            softlayer_cloud_account.api_key):
        softlayer_cloud_account.api_key = data["api_key"]

    softlayer_cloud_account.status = "AUTHENTICATING"
    doosradb.session.commit()
    task_validate_softlayer_cloud.apply_async(
        queue='sync_queue', args=[softlayer_cloud_account.id])
    return Response(json.dumps(softlayer_cloud_account.to_json()),
                    status=200,
                    mimetype="application/json")
コード例 #6
0
    def __init__(self, username: str, api_key: str):
        assert (username and
                api_key), "'SoftLayerManager' must have username and api_key"

        self.username = username
        self.api_key = decrypt_api_key(api_key)
        self.client = SoftLayer.create_client_from_env(self.username,
                                                       self.api_key)
        self.fetch_ops = FetchOperations(self.client, self.username)
        self.create_ops = CreateOperations(self.client, self.username)
コード例 #7
0
    def __init__(self, cloud, region, service_id):
        self.cloud = cloud
        self.region = region or DEFAULT_REGION

        self.cos = ibm_boto3.client(
            "s3",
            ibm_api_key_id=decrypt_api_key(self.cloud.api_key),
            ibm_service_instance_id=service_id,
            ibm_auth_endpoint=COS_AUTH_ENDPOINT,
            config=Config(signature_version="oauth"),
            endpoint_url=COS_ENDPOINT.format(region=self.region))
        self.fetch_ops = FetchOperations(self.cloud, self.region, self.cos)
コード例 #8
0
def task_group_clouds_by_api_key():
    """
    Assign same group id to all clouds with same api-key
    """

    # TODO introduce pagination when clouds entries are mounting exorbitantly
    clouds = (doosradb.session.query(IBMCloud).filter(
        IBMCloud.status.in_([VALID])).all())

    grouped_clouds_lib = {}
    clouds_lib = {}
    for cloud in clouds:
        if cloud.group_id:
            entry = grouped_clouds_lib.get(decrypt_api_key(cloud.api_key))
            if entry:
                grouped_clouds_lib[decrypt_api_key(
                    cloud.api_key)].append(cloud)
            else:
                grouped_clouds_lib[decrypt_api_key(cloud.api_key)] = [cloud]

        else:
            entry = clouds_lib.get(decrypt_api_key(cloud.api_key))
            if entry:
                clouds_lib[decrypt_api_key(cloud.api_key)].append(cloud)
            else:
                clouds_lib[decrypt_api_key(cloud.api_key)] = [cloud]

    for key, clouds in clouds_lib.items():
        existing_group = grouped_clouds_lib.get(key)
        group_id = existing_group[0].group_id if existing_group else str(
            uuid.uuid4().hex)
        for cloud in clouds:
            cloud.group_id = group_id

    doosradb.session.commit()
コード例 #9
0
def task_restore_workload_backup(self, task_id, region, cloud_id,
                                 cluster_migration_task_id, cluster_id,
                                 backup_name):
    cluster_migration_task = doosradb.session.query(
        KubernetesClusterMigrationTask).filter_by(
            id=cluster_migration_task_id).first()

    if not cluster_migration_task:
        LOGGER.info("Migration Task Not Found")
        doosradb.session.commit()
        return Response(status=404)

    cluster = doosradb.session.query(KubernetesCluster).filter_by(
        id=cluster_id).first()
    if not cluster:
        LOGGER.info("No IKS Cluster found with ID: {}".format(cluster_id))
        return Response(status=404)

    self.resource = cluster
    self.resource_type = "iks_clusters"
    self.report_path = ".cluster_migration.steps.restore"
    self.report_utils.update_reporting(task_id=task_id,
                                       resource_name=self.resource.name,
                                       resource_type=self.resource_type,
                                       stage=PROVISIONING,
                                       status=IN_PROGRESS,
                                       path=self.report_path)

    try:
        LOGGER.info(f"Migration task found for ID {cluster_migration_task.id}")
        deploy_velero = deploy_velero_agent(cluster_migration_task.id)

        if deploy_velero:
            LOGGER.info("Velero successfully deployed on cluster '{}'".format(
                cluster.name))

            source_config = K8s(
                configuration_json=cluster_migration_task.source_cluster)
            target_config = K8s(
                configuration_json=cluster_migration_task.target_cluster)

            restore_name = "restore" + str(
                datetime.utcnow().strftime("-%m-%d-%Y%H-%M-%S"))
            backup = deploy_velero_and_get_backup_restore(
                kube_config=target_config,
                task_type="BACKUP",
                backup_name=backup_name)

            if backup:
                LOGGER.info("Restore In Progress")
                self.report_path = ".cluster_migration.steps.restore"
                self.report_utils.update_reporting(
                    task_id=task_id,
                    resource_name=self.resource.name,
                    resource_type=self.resource_type,
                    stage=PROVISIONING,
                    status=IN_PROGRESS,
                    path=self.report_path)

                LOGGER.info("Discovering source cluster PVC's")
                pvcs = source_config.client.CoreV1Api(
                ).list_persistent_volume_claim_for_all_namespaces(watch=False)

                if pvcs.items:
                    pvcs_list = list()
                    object_storage = False
                    cos_secret = cluster_migration_task.cos
                    access_key = decrypt_api_key(cos_secret["access_key_id"])
                    secret_key = decrypt_api_key(
                        cos_secret["secret_access_key"])

                    access_key_bytes = access_key.encode('ascii')
                    secret_key_bytes = secret_key.encode('ascii')

                    access_key_base64_bytes = base64.b64encode(
                        access_key_bytes)
                    secret_key_base64_bytes = base64.b64encode(
                        secret_key_bytes)

                    access_key = access_key_base64_bytes.decode('ascii')
                    secret_key = secret_key_base64_bytes.decode('ascii')

                    for pvc in pvcs.items:
                        if pvc.metadata.annotations['volume.beta.kubernetes.io/storage-provisioner'] ==\
                                "ibm.io/ibmc-file":
                            LOGGER.info(
                                "Discovering PVC's with File Storage Classes")

                            body = json.loads(COS_PVC)
                            body['metadata']['name'] = pvc.metadata.name
                            body['metadata'][
                                'namespace'] = pvc.metadata.namespace
                            body['metadata']['annotations']['ibm.io/bucket'] = \
                                "bucket" + str(datetime.utcnow().strftime("-%m-%d-%Y-%H-%M-%S"))
                            body['spec']['accessModes'] = pvc.spec.access_modes
                            body['spec']['resources']['requests'][
                                'storage'] = pvc.spec.resources.requests[
                                    'storage']
                            pvcs_list.append(body)

                        elif pvc.metadata.annotations['volume.beta.kubernetes.io/storage-provisioner'] == \
                                "ibm.io/ibmc-block":
                            LOGGER.info(
                                "Discovering PVC's with Block Storage Classes")
                            body = json.loads(BLOCK_STORAGE_PVC)
                            for classic_block_storage_class in CLASSIC_BLOCK_STORAGE_CLASSES:
                                if pvc.spec.storage_class_name == classic_block_storage_class:
                                    body['metadata'][
                                        'name'] = pvc.metadata.name
                                    body['metadata'][
                                        'namespace'] = pvc.metadata.namespace
                                    body['spec'][
                                        'accessModes'] = pvc.spec.access_modes
                                    body['spec']['resources']['requests'][
                                        'storage'] = pvc.spec.resources.requests[
                                            'storage']
                                    body['spec'][
                                        'storageClassName'] = "ibmc-vpc-block-10iops-tier"

                                else:
                                    body['metadata'][
                                        'name'] = pvc.metadata.name
                                    body['metadata'][
                                        'namespace'] = pvc.metadata.namespace
                                    body['spec'][
                                        'accessModes'] = pvc.spec.access_modes
                                    body['spec']['resources']['requests'][
                                        'storage'] = pvc.spec.resources.requests[
                                            'storage']
                                    body['spec'][
                                        'storageClassName'] = "ibmc-vpc-block-custom"

                            pvcs_list.append(body)

                    LOGGER.info("Creating Namespaces On Provisioned Cluster")
                    for namespace in pvcs_list:
                        try:
                            target_config.client.CoreV1Api().create_namespace(
                                client.
                                V1Namespace(metadata=client.V1ObjectMeta(
                                    name=namespace['metadata']['namespace'])))
                        except ApiException as error:
                            if error.status == 409:
                                continue

                        if namespace['spec'][
                                'storageClassName'] == "ibmc-s3fs-standard-regional":

                            object_storage = True
                            secret = client.V1Secret(
                                api_version='v1',
                                metadata=client.V1ObjectMeta(
                                    name='cos-write-access',
                                    namespace=namespace['metadata']
                                    ['namespace']),
                                kind='Secret',
                                type='ibm/ibmc-s3fs',
                                data={
                                    'access-key': access_key,
                                    'secret-key': secret_key
                                })

                            try:
                                target_config.client.CoreV1Api(
                                ).create_namespaced_secret(
                                    body=secret,
                                    namespace=namespace['metadata']
                                    ['namespace'])
                            except ApiException as error:
                                if error.status == 409:
                                    pass

                    if object_storage:
                        LOGGER.info("Deploying COS-Driver & COS-Plugin")

                        try:
                            target_config.client.CoreV1Api().create_namespace(
                                client.V1Namespace(
                                    metadata=client.V1ObjectMeta(
                                        name="ibm-object-s3fs")))
                        except ApiException as error:
                            if error.status == 409:
                                pass

                        for service_account in COS_SERVICE_ACCOUNTS:
                            body = json.loads(service_account)
                            try:
                                LOGGER.info(
                                    "Creating Cloud Object Storage Service Account"
                                )
                                target_config.client.CoreV1Api(
                                ).create_namespaced_service_account(
                                    body=body, namespace="ibm-object-s3fs")
                            except ApiException as error:
                                if error.status == 409:
                                    pass

                        for cluster_role in COS_CLUSTER_ROLES:
                            body = json.loads(cluster_role)
                            try:
                                LOGGER.info(
                                    "Creating Cluster Role for Cloud Object Storage Plugin "
                                )
                                target_config.client.RbacAuthorizationV1Api(
                                ).create_cluster_role(body=body)
                            except ApiException as error:
                                if error.status == 409:
                                    pass

                        for cluster_role_binding in COS_CLUSTER_ROLE_BINDING:
                            body = json.loads(cluster_role_binding)
                            try:
                                LOGGER.info(
                                    "Creating COS Custom Role Binding in Cluster"
                                )
                                target_config.client.RbacAuthorizationV1Api(
                                ).create_cluster_role_binding(body=body)
                            except ApiException as error:
                                if error.status == 409:
                                    pass

                        body = json.loads(COS_DRIVER_DAEMONSET)
                        try:
                            LOGGER.info(
                                "Installing COS Driver Daemonset on Cluster")
                            target_config.client.AppsV1Api(
                            ).create_namespaced_daemon_set(
                                body=body, namespace="ibm-object-s3fs")
                        except ApiException as error:
                            if error.status == 409:
                                pass

                        body = json.loads(COS_PLUGIN_DEPLOYMENT)
                        try:
                            LOGGER.info("Creating COS Deployment on Cluster")
                            target_config.client.AppsV1Api(
                            ).create_namespaced_deployment(
                                body=body, namespace="ibm-object-s3fs")
                        except ApiException as error:
                            if error.status == 409:
                                pass

                        for cos_storage_class in COS_STORAGE_CLASSES:
                            body = json.loads(cos_storage_class)
                            if "REGION" in body['parameters'][
                                    'ibm.io/object-store-endpoint']:
                                body['parameters'][
                                    'ibm.io/object-store-endpoint'] = \
                                    f"https://s3.direct.{region}.cloud-object-storage.appdomain.cloud"
                            try:
                                target_config.client.StorageV1Api(
                                ).create_storage_class(body=body)
                            except ApiException as error:
                                if error.status == 409:
                                    pass

                    LOGGER.info(
                        "Creating Persistent Volume Claims in destination Cluster"
                    )
                    for pvc in pvcs_list:
                        try:
                            target_config.client.CoreV1Api(
                            ).create_namespaced_persistent_volume_claim(
                                body=pvc,
                                namespace=pvc['metadata']['namespace'])
                        except ApiException as error:
                            if error.status == 409:
                                pass

                    LOGGER.info("Waiting for PVC's to get binded with PV's")
                    time.sleep(60)

            else:
                raise WorkflowTerminated("RESTORE, Backup Not Found")

            body = json.loads(CREATE_RESTORE_TEMPLATE)
            body['metadata']['name'] = restore_name
            body["spec"]["backupName"] = backup_name
            target_config.client.CustomObjectsApi(
            ).create_namespaced_custom_object(body=body,
                                              group="velero.io",
                                              namespace="velero",
                                              version="v1",
                                              plural="restores")

            restore = deploy_velero_and_get_backup_restore(
                kube_config=target_config,
                task_type="RESTORE",
                restore_name=restore_name)

            if restore:
                LOGGER.info(
                    f"Updating migration status for migration id : {cluster_migration_task.id}"
                )

                self.report_path = ".cluster_migration.steps.restore"
                self.report_utils.update_reporting(
                    task_id=task_id,
                    resource_name=self.resource.name,
                    resource_type=self.resource_type,
                    stage=PROVISIONING,
                    status=SUCCESS,
                    path=self.report_path)

                self.report_path = ".cluster_migration"
                self.report_utils.update_reporting(
                    task_id=task_id,
                    resource_name=self.resource.name,
                    resource_type=self.resource_type,
                    stage=PROVISIONING,
                    status=SUCCESS,
                    path=self.report_path)
                self.report_utils.update_reporting(
                    task_id=task_id,
                    resource_name=self.resource.name,
                    resource_type=self.resource_type,
                    stage=PROVISIONING,
                    status=SUCCESS)

                cluster_migration_task.message = f"successfully migrated {cluster_migration_task.id}"
                cluster_migration_task.completed_at = datetime.utcnow()
                doosradb.session.commit()
                LOGGER.info(
                    f"migration completed Successfully {cluster_migration_task.id}"
                )

            else:
                LOGGER.info("Restore creation failed")
                raise WorkflowTerminated("RESTORE, Restoration Failed")

            task_cleanup_cluster.delay(cluster_migration_task.target_cluster)

        else:
            LOGGER.info("Velero failed to deploy on migrated cluster")
            raise WorkflowTerminated(
                "RESTORE, Restoration Failed, Please Check your Cluster internet Connectivity"
            )

    except ApiException as error:
        LOGGER.info(error)
        cluster_migration_task.message = f"Failed due {error.reason} : status {error.status}"
        doosradb.session.commit()
        return
コード例 #10
0
ファイル: cloud_models.py プロジェクト: HumayunNasir23/test
 def to_json_body(self):
     return {
         "grant_type": "urn:ibm:params:oauth:grant-type:apikey",
         "apikey": decrypt_api_key(self.api_key),
     }
コード例 #11
0
ファイル: cloud_models.py プロジェクト: HumayunNasir23/test
 def verify_api_key(self, api_key):
     return api_key == decrypt_api_key(self.api_key)
コード例 #12
0
def construct_user_data_script(instance, ibm_cloud, region, instance_id):
    """
    Create user_data script from COS files and a disk for linux helper migration.
    Fifth Volume is added directly to instance as IBM is supporting more than four volumes with all profiles.
    :return:
    """
    if len(instance.get("volume_attachments") or []) <= 0:
        return

    api_key = decrypt_api_key(ibm_cloud.api_key)

    # sorting in ascending order by volume capacity
    volumes = instance["volume_attachments"]
    sorted_volumes = sorted(volumes, key=lambda i: i['capacity'])
    instance["volume_attachments"] = sorted_volumes

    ## TODO: This is what was consider for linux A1
    attach_volumes = ' '.join([
        str(volume["volume_index"])
        for volume in instance["volume_attachments"]
        if volume.get("volume_index") and volume.get("is_migration_enabled")
    ])
    try:
        attach_volumes_capacity = ' '.join([
            str(volume["capacity"])
            for volume in instance["volume_attachments"] if
            volume.get("volume_index") and volume.get("is_migration_enabled")
        ])
    except KeyError:
        return

    ## TODO: This was considered for windows A2
    window_vhds_index = [
        volume["volume_index"] for volume in instance["volume_attachments"]
        if volume.get("volume_index")
    ]
    ## TODO: Need to consider anyof(A1, A2)

    volume_mig_task = SecondaryVolumeMigrationTask(instance_id=instance_id)
    doosradb.session.add(volume_mig_task)
    doosradb.session.commit()

    if "WINDOWS" in instance.get("original_operating_system_name", "").upper() or \
            "WINDOWS" in instance.get("original_image", "").upper() or \
            "WINDOWS" in instance["image"].get("public_image", "").upper() or \
            "WINDOWS" in instance["image"].get("vpc_image_name", "").upper():
        web_hook_uri = os.environ.get(
            "VPCPLUS_LINK"
        ) + "v1/ibm/instances/secondary-volume-migration/windows/" + volume_mig_task.id
        user_data_script = WINDOWS_MIG_REQ.format(
            API_KEY=api_key,
            REGION=region,
            BUCKET=instance["image"]["bucket_name"],
            VHDS_INDEX=", ".join(repr(item) for item in window_vhds_index),
            INSTANCE_ID=instance_id,
            WEB_HOOK_URI=web_hook_uri,
            VERSION=VERSION,
            GENERATION=GENERATION)
    else:
        new_volume_json = attach_additional_volume(
            instance["volume_attachments"], instance_id, ibm_cloud.id, region)

        operating_system = return_class(
            instance["image"].get("public_image")
            or instance["image"].get("vpc_image_name")
            or instance.get("original_operating_system_name"))

        packages = operating_system.qemu_package
        for pkg in operating_system.PACKAGES:
            packages = packages + " " + pkg

        data_mig_req_string = DATA_MIG_REQUIREMENTS.format(
            SVM_WORKING_DISK=str(new_volume_json[VOLUME][CAPACITY]) + "G",
            ATTACHED_VOLUME_COUNT=attach_volumes,
            ATTACHED_VOLUMES_CAPACITY=attach_volumes_capacity,
            INSTANCE_NAME=instance["name"],
            VOLUME_NAME=new_volume_json["name"],
            PACKAGES=packages,
            REGION=region,
            VERSION=VERSION,
            BUCKET=instance["image"]["bucket_name"],
            WEB_HOOK_URI=os.environ.get("VPCPLUS_LINK") +
            "v1/ibm/instances/secondary_volume_migration/" +
            volume_mig_task.id,
            API_KEY=api_key,
        )
        user_data_script = "{data_mig_req_string}\n{packages}".format(
            data_mig_req_string=data_mig_req_string,
            packages=operating_system.bash_installation_string)
        insert_volume_in_db(instance_id,
                            volumes_json=[new_volume_json],
                            region=region,
                            cloud_id=ibm_cloud.id)

    ibm_instance = IBMInstance.query.get(instance_id)
    if ibm_instance.user_data:
        user_data_script = f"{decrypt_api_key(ibm_instance.user_data)}\n{user_data_script}"
    ibm_instance.user_data = encrypt_api_key(user_data_script)
    doosradb.session.commit()
    LOGGER.info(
        f"Volume Migration Requirements Added for instance {instance_id} Secondary Migration Data"
    )
コード例 #13
0
    def __execute_request(self, request, request_type, updated_api_version,
                          force_auth):
        assert request_type in [
            "AUTH", "VPC_RESOURCE", "RESOURCE_GROUP", "K8S_RESOURCE",
            "K8S_KUBE_CONFIG"
        ]
        assert isinstance(request, requests.Request)

        auth_resp = None

        cloud = doosradb.session.query(IBMCloud).get(self.cloud_id)
        if not cloud:
            raise IBMInvalidRequestError("Cloud not found")

        auth_required = cloud.auth_required or force_auth
        api_key = decrypt_api_key(cloud.api_key)
        access_token = cloud.credentials.access_token
        doosradb.session.commit()

        if request_type in [
                "VPC_RESOURCE", "RESOURCE_GROUP", "K8S_RESOURCE",
                "K8S_KUBE_CONFIG"
        ]:
            if auth_required:
                LOGGER.info("Authenticating Cloud {}".format(self.cloud_id))
                auth_resp = self.authenticate_cloud_account(api_key)
                access_token = " ".join([
                    auth_resp.get("token_type"),
                    auth_resp.get("access_token")
                ])

            if request_type == "VPC_RESOURCE":
                if updated_api_version:
                    VPC_RESOURCE_REQUIRED_PARAMS[
                        "version"] = VSI_FIXED_DATE_BASED_VERSION
                if request.params:
                    request.params.update(VPC_RESOURCE_REQUIRED_PARAMS)
                else:
                    request.params = VPC_RESOURCE_REQUIRED_PARAMS

            if request.headers:
                request.headers.update({"Authorization": access_token})
            else:
                request.headers = {"Authorization": access_token}

        if auth_resp:
            cloud.update_from_auth_response(auth_resp)
            doosradb.session.commit()

        try:
            with get_requests_session() as req_session:
                request = req_session.prepare_request(request)
                response = req_session.send(request, timeout=30)
        except (ConnectionError, ReadTimeout, RequestException):
            raise IBMConnectError(self.cloud_id)

        if response.status_code == 401:
            raise IBMAuthError(self.cloud_id)
        elif response.status_code in [400, 403, 404, 408, 409]:
            raise IBMExecuteError(response)
        elif response.status_code not in [200, 201, 204]:
            raise IBMExecuteError(response)

        try:
            response_json = response.json()
        except Exception:
            response_json = {}
        return response_json
コード例 #14
0
ファイル: api.py プロジェクト: HumayunNasir23/test
def update_ibm_cloud_account(user_id, user, cloud_id):
    """
    Update an IBM Cloud Account
    :param user_id: ID of the user initiating the request
    :param user: object of the user initiating the request
    :param cloud_id: cloud_id for Cloud object
    :return: Response object from flask package
    """
    from doosra.tasks import task_process_new_ibm_cloud_account

    data = request.get_json(force=True)
    force = request.args.get("force")

    cloud_account = doosradb.session.query(IBMCloud).filter_by(
        id=cloud_id, project_id=user.project.id).first()
    if not cloud_account:
        current_app.logger.info(
            "No IBM cloud account found with ID {}".format(cloud_id))
        return Response(status=404)

    if force:
        cloud_account.status = AUTHENTICATING
        doosradb.session.commit()

    if not cloud_account.service_credentials:
        if data.get("resource_instance_id"):
            cloud_account.service_credentials = IBMServiceCredentials(
                data["resource_instance_id"])
            doosradb.session.commit()

        if data.get("access_key_id") and data.get("secret_access_key"):
            cloud_account.service_credentials.access_key_id = encrypt_api_key(
                data["access_key_id"])
            cloud_account.service_credentials.secret_access_key = encrypt_api_key(
                data["secret_access_key"])
            doosradb.session.commit()

    if not cloud_account.service_credentials.access_key_id and not cloud_account.service_credentials.secret_access_key:
        if data.get("access_key_id") and data.get("secret_access_key"):
            cloud_account.service_credentials.access_key_id = encrypt_api_key(
                data["access_key_id"])
            cloud_account.service_credentials.secret_access_key = encrypt_api_key(
                data["secret_access_key"])
            doosradb.session.commit()

    elif data.get("resource_instance_id") and data["resource_instance_id"] != \
            cloud_account.service_credentials.resource_instance_id:
        cloud_account.service_credentials.resource_instance_id = data[
            "resource_instance_id"]
        doosradb.session.commit()

    elif data.get("access_key_id") and data["access_key_id"] != \
            decrypt_api_key(cloud_account.service_credentials.access_key_id) and \
            data.get("secret_access_key") and data["secret_access_key"] != \
            decrypt_api_key(cloud_account.service_credentials.secret_access_key):

        cloud_account.service_credentials.access_key_id = encrypt_api_key(
            data.get("access_key_id"))
        cloud_account.service_credentials.secret_access_key = encrypt_api_key(
            data.get("secret_access_key"))
        cloud_account.status = AUTHENTICATING

    if data.get("name") and data["name"] != cloud_account.name:
        existing_cloud = doosradb.session.query(IBMCloud).filter_by(
            name=data["name"], project_id=user.project.id).first()
        if existing_cloud:
            return Response("ERROR_SAME_NAME", status=409)

        cloud_account.name = data["name"]
        doosradb.session.commit()

    if data.get("api_key") and data["api_key"] != decrypt_api_key(
            cloud_account.api_key):
        existing_clouds = doosradb.session.query(IBMCloud).filter_by(
            project_id=user.project.id).all()
        for cloud in existing_clouds:
            if cloud.verify_api_key(data['api_key']):
                return Response("ERROR_SAME_API_KEY, cloud_id={}".format(
                    cloud.id),
                                status=409)

        cloud_account.api_key = encrypt_api_key(data["api_key"])
        cloud_account.status = AUTHENTICATING

    if data.get('resource_instance_id'):
        cloud_account.status = AUTHENTICATING
        cloud_account.service_credential = data['resource_instance_id']

    if data.get("access_key_id") and data.get("secret_access_key"):
        cloud_account.status = AUTHENTICATING
        cloud_account.service_credential = encrypt_api_key(data["access_key_id"]), \
                                           encrypt_api_key(data["secret_access_key"])

    doosradb.session.commit()
    if cloud_account.status == AUTHENTICATING:
        task_process_new_ibm_cloud_account.apply_async(queue='sync_queue',
                                                       args=[cloud_account.id])

    return jsonify(cloud_account.to_json())