Пример #1
0
class IxiaBreakingpointVeVbladeDriver(ResourceDriverInterface):
    def __init__(self):
        """
        ctor must be without arguments, it is created with reflection at run time
        """
        pass

    def initialize(self, context):
        """
        Initialize the driver session, this function is called everytime a new instance of the driver is created
        This is a good place to load and cache the driver configuration, initiate sessions etc.
        :param InitCommandContext context: the context the command runs on
        """
        self.name = context.resource.name
        self.model = context.resource.model

    def pre_autoload_configuration_command(self, context):
        """
        A simple example function
        :param cloudshell.shell.core.driver_context.ResourceCommandContext context: the context the command runs on
        """
        self.api = CloudShellAPISession(host=context.connectivity.server_address,
                                        token_id=context.connectivity.admin_auth_token,
                                        domain='Global')
        self.user_msg = get_sandbox_msg(self.api, context)
        self.user_msg('Checking port configuration on {0}'.format(self.name))

        for attribute_name in REQUIRED_ATTRIBUTES:
            if attribute_name not in context.resource.attributes: raise Exception(
                EX_MISSING_ATTRIBUTE.format(self.model, attribute_name))

        number_of_ports = int(context.resource.attributes[ATTR_NUMBER_OF_PORTS])
        number_of_cpus = int(context.resource.attributes[ATTR_NUMBER_OF_CPUS])
        memory_in_gbs = int(context.resource.attributes[ATTR_MEMORY_IN_GBS])

        vm_changes = dict()
        vm_changes = self._get_nic_changes(number_of_ports, vm_changes)
        vm_changes = self._get_CPU_changes(number_of_cpus, vm_changes)
        vm_changes = self._get_memory_changes(memory_in_gbs, vm_changes)
        vm_changes_params = json.dumps(vm_changes)
        if vm_changes:
            self.api.ExecuteResourceConnectedCommand(context.reservation.reservation_id, context.resource.name,
                                                     'modify_vm_hardware', 'remote_app_management', [vm_changes_params])

        pass

    def _get_nic_changes(self, number_of_ports, vm_changes):
        if number_of_ports + 1 > MAXIMUM_ALLOWED_VNICS_ON_CLOUD_PROVIDER:
            self.user_msg(
                WARN_MAXIMUM_PORTS.format(number_of_ports, self.name, MAXIMUM_ALLOWED_VNICS_ON_CLOUD_PROVIDER))
        if number_of_ports < NUMBER_OF_OVF_VNICS_BY_DEFAULT:
            self.user_msg(WARN_MINIMUM_PORTS.format(number_of_ports, self.name, NUMBER_OF_OVF_VNICS_BY_DEFAULT))
        if number_of_ports > NUMBER_OF_OVF_VNICS_BY_DEFAULT:
            number_of_ports_to_add = number_of_ports - NUMBER_OF_OVF_VNICS_BY_DEFAULT
            self.user_msg(
                '{0} ports needed on {1}\nadding {2} ports '.format(number_of_ports, self.name, number_of_ports_to_add))
            vm_changes['nics'] = number_of_ports_to_add
        return vm_changes

    def _get_CPU_changes(self, number_of_cpus, vm_changes):
        if number_of_cpus > MAXIMUM_ALLOWED_CPUS_ON_CLOUD_PROVIDER:
            self.user_msg(
                WARN_MAXIMUM_CPUS.format(number_of_cpus, self.name, MAXIMUM_ALLOWED_CPUS_ON_CLOUD_PROVIDER))
        if number_of_cpus < NUMBER_OF_VCPUS_BY_DEFAULT:
            self.user_msg(WARN_MINIMUM_CPUS.format(number_of_cpus, self.name, NUMBER_OF_VCPUS_BY_DEFAULT))
        if number_of_cpus > NUMBER_OF_VCPUS_BY_DEFAULT:
            self.user_msg('{0} cpus needed on {1} '.format(number_of_cpus, self.name))
            vm_changes['cpu'] = int(number_of_cpus)
        return vm_changes

    def _get_memory_changes(self, memory_in_GBs, vm_changes):
        if memory_in_GBs > MAXIMUM_ALLOWED_CPUS_ON_CLOUD_PROVIDER:
            self.user_msg(
                WARN_MAXIMUM_MEMORY.format(memory_in_GBs, self.name, MAXIMUM_ALLOWED_MEMORY_ON_CLOUD_PROVIDER))
        if memory_in_GBs < MEMORY_BY_DEFAULT:
            self.user_msg(WARN_MINIMUM_MEMORY.format(memory_in_GBs, self.name, MEMORY_BY_DEFAULT))
        if memory_in_GBs > MEMORY_BY_DEFAULT:
            self.user_msg('{0} GBs of memory needed on {1} '.format(memory_in_GBs, self.name))
            vm_changes['memory'] = int(memory_in_GBs)
        return vm_changes

    def get_inventory(self, context):
        """
        Discovers the resource structure and attributes.
        :param AutoLoadCommandContext context: the context the command runs on
        :return Attribute and sub-resource information for the Shell resource you can return an AutoLoadDetails object
        :rtype: AutoLoadDetails
        """
        # See below some example code demonstrating how to return the resource structure
        # and attributes. In real life, of course, if the actual values are not static,
        # this code would be preceded by some SNMP/other calls to get the actual resource information

        number_of_ports = int(context.resource.attributes[ATTR_NUMBER_OF_PORTS]) + 1
        resources = []
        attributes = []
        for i in range(number_of_ports):
            address = str(i)
            attributes.append(AutoLoadAttribute(address, 'Requested vNIC Name', 'Network adapter ' + str(i + 1)))
            if i == 0:
                port_name = IXIA_MANAGEMENT_PORT
            else:
                port_name = 'Port ' + address
                attributes.append(AutoLoadAttribute(address, ATTR_LOGICAL_NAME, address))

            resources.append(AutoLoadResource(model=MODEL_PORT, name=port_name, relative_address=address))
        return AutoLoadDetails(resources, attributes)

    def connect_child_resources(self, context):
        """
        :type context: cloudshell.shell.core.driver_context.ResourceCommandContext
        :rtype: str
        """
        api = CloudShellAPISession(host=context.connectivity.server_address,
                                   token_id=context.connectivity.admin_auth_token,
                                   domain='Global')
        resource_name = context.resource.fullname
        reservation_id = context.reservation.reservation_id
        connectors = context.connectors

        if not context.connectors:
            return 'Success'

        resource = api.GetResourceDetails(resource_name)

        to_disconnect = []
        to_connect = []
        temp_connectors = []
        ports = self._get_ports(resource)

        for connector in connectors:
            me, other = self._set_remap_connector_details(connector, resource_name, temp_connectors)
            to_disconnect.extend([me, other])

        connectors = temp_connectors

        # these are connectors from app to vlan where user marked to which interface the connector should be connected
        connectors_with_predefined_target = [connector for connector in connectors if connector.vnic_id != '']

        # these are connectors from app to vlan where user left the target interface unspecified
        connectors_without_target = [connector for connector in connectors if connector.vnic_id == '']

        for connector in connectors_with_predefined_target:
            if connector.vnic_id not in ports.keys():
                raise Exception('Tried to connect an interface that is not on reservation - ' + connector.vnic_id)

            else:
                if hasattr(ports[connector.vnic_id], 'allocated'):
                    raise Exception('Tried to connect several connections to same interface: ' + ports[connector.vnic_id])

                else:
                    to_connect.append(SetConnectorRequest(SourceResourceFullName=ports[connector.vnic_id].Name,
                                                          TargetResourceFullName=connector.other,
                                                          Direction=connector.direction,
                                                          Alias=connector.alias))
                    ports[connector.vnic_id].allocated = True

        unallocated_ports = [port for key, port in ports.items() if not hasattr(port, 'allocated')]

        if len(unallocated_ports) < len(connectors_without_target):
            raise Exception('There were more connections to TeraVM than available interfaces after deployment.')
        else:
            for port in unallocated_ports:
                if connectors_without_target:
                    connector = connectors_without_target.pop()
                    to_connect.append(SetConnectorRequest(SourceResourceFullName=port.Name,
                                                          TargetResourceFullName=connector.other,
                                                          Direction=connector.direction,
                                                          Alias=connector.alias))

        if connectors_without_target:
            raise Exception('There were more connections to TeraVM than available interfaces after deployment.')

        api.RemoveConnectorsFromReservation(reservation_id, to_disconnect)
        api.SetConnectorsInReservation(reservation_id, to_connect)

        return 'Success'

    @staticmethod
    def _set_remap_connector_details(connector, resource_name, connectors):
        attribs = connector.attributes
        if resource_name in connector.source.split('/'):
            remap_requests = attribs.get(ATTR_REQUESTED_SOURCE_VNIC, '').split(',')

            me = connector.source
            other = connector.target

            for vnic_id in remap_requests:
                new_con = copy.deepcopy(connector)
                IxiaBreakingpointVeVbladeDriver._update_connector(new_con, me, other, vnic_id)
                connectors.append(new_con)

        elif resource_name in connector.target.split('/'):
            remap_requests = attribs.get(ATTR_REQUESTED_TARGET_VNIC, '').split(',')

            me = connector.target
            other = connector.source

            for vnic_id in remap_requests:
                new_con = copy.deepcopy(connector)
                IxiaBreakingpointVeVbladeDriver._update_connector(new_con, me, other, vnic_id)
                connectors.append(new_con)



        else:
            raise Exception("Oops, a connector doesn't have required details:\n Connector source: {0}\n"
                            "Connector target: {1}\nPlease contact your admin".format(connector.source,
                                                                                      connector.target))

        return me, other

    @staticmethod
    def _update_connector(connector, me, other, vnic_id):
        connector.vnic_id = vnic_id
        connector.me = me
        connector.other = other

    @staticmethod
    def _get_ports(resource):
        ports = {str(idx): port for idx, port in enumerate(resource.ChildResources)
                 if port.ResourceModelName == MODEL_PORT}
        return ports

    def cleanup(self):
        """
        Destroy the driver session, this function is called everytime a driver instance is destroyed
        This is a good place to close any open sessions, finish writing to log files
        """
        pass
Пример #2
0
    def ApplyConnectivityChanges(self, context, request):
        """
        Configures VLANs on multiple ports or port-channels
        :param ResourceCommandContext context: The context object for the command with resource and reservation info
        :param str request: A JSON object with the list of requested connectivity changes
        :return: a json object with the list of connectivity changes which were carried out by the switch
        :rtype: str
        """
        logger = get_qs_logger(log_group=context.reservation.reservation_id,
                               log_file_prefix='vMX')

        logger.info('ApplyConnectivityChanges called with json %s' % request)

        api = CloudShellAPISession(
            host=context.connectivity.server_address,
            token_id=context.connectivity.admin_auth_token,
            domain=context.reservation.domain)

        vmuid2portno_req_tuples = {}
        vmuid2cpname = {}
        vmuid2resourcename = {}
        o = json.loads(request)
        for action in o['driverRequest']['actions']:
            targetrd = api.GetResourceDetails(
                action['actionTarget']['fullName'])

            vmname = [
                a.Value for a in targetrd.ResourceAttributes
                if a.Name == 'VM Name'
            ][0]
            nicno = [
                a.Value for a in targetrd.ResourceAttributes
                if a.Name == 'VM Port vNIC Name'
            ][0]

            action['actionTarget']['fullName'] = vmname
            action['actionTarget']['fullAddress'] = vmname

            vmrd = api.GetResourceDetails(vmname)
            cpname = vmrd.VmDetails.CloudProviderFullName
            cpdetails = api.GetResourceDetails(cpname)
            vmuid = vmrd.VmDetails.UID
            vmuid2cpname[vmuid] = cpname
            vmuid2resourcename[vmuid] = vmrd.Name

            if 'customActionAttributes' not in action:
                action['customActionAttributes'] = []

            action['customActionAttributes'].append({
                'attributeName': 'VM_UUID',
                'attributeValue': vmuid,
            })
            # Vnic Name is supported on vSphere only (OpenStack relies on requests being sorted by NIC number)
            action['customActionAttributes'].append({
                'attributeName': 'Vnic Name',
                'attributeValue': nicno,
            })

            req = json.dumps({'driverRequest': {'actions': [action]}})
            if vmuid not in vmuid2portno_req_tuples:
                vmuid2portno_req_tuples[vmuid] = []
            try:
                nn = int(nicno or '0')
            except:
                nn = nicno
            vmuid2portno_req_tuples[vmuid].append((nn, req))

        results = []
        for vmuid in vmuid2portno_req_tuples:
            if 'openstack' in cpdetails.ResourceModelName.lower():
                api.ExecuteResourceConnectedCommand(
                    context.reservation.reservation_id,
                    vmuid2resourcename[vmuid], 'PowerOff', 'power')

            # send requests one by one in order by requested NIC number -- only way to control NIC order in OpenStack
            for portno, req in sorted(vmuid2portno_req_tuples[vmuid]):
                cpname = vmuid2cpname[vmuid]
                logger.info(
                    'Executing single translated request on cloud provider %s: vmuid=%s portno=%s req=%s'
                    % (cpname, vmuid, str(portno), req))
                nr = api.ExecuteCommand(
                    context.reservation.reservation_id, cpname, 'Resource',
                    'ApplyConnectivityChanges',
                    [InputNameValue('request', req)]).Output
                logger.info('Result: %s' % nr)
                onr = json.loads(nr)
                onra = onr['driverResponse']['actionResults'][0]
                results.append(onra)

            if 'openstack' in cpdetails.ResourceModelName.lower():
                api.ExecuteResourceConnectedCommand(
                    context.reservation.reservation_id,
                    vmuid2resourcename[vmuid], 'PowerOn', 'power')

        return json.dumps({'driverResponse': {'actionResults': results}})
Пример #3
0
import time
import json
from cloudshell.api.cloudshell_api import CloudShellAPISession, CloudShellAPIError
import xml.etree.ElementTree as ET
import datetime

user = "******"
password = "******"
# server = "localhost"
server = "54.200.66.175"
domain = "Global"

RESOURCE_NAME = "CentOS_no_config_2G_Test i-095fe8bd6200b8854"
SANDBOX_ID = "4814527a-139b-46a9-a9c5-b91d0ae33466"

api = CloudShellAPISession(host=server,
                           username=user,
                           password=password,
                           domain=domain)

result = json.loads(
    api.ExecuteResourceConnectedCommand(reservationId=SANDBOX_ID,
                                        resourceFullPath=RESOURCE_NAME,
                                        commandName='save_app',
                                        commandTag='remote_connectivity',
                                        parameterValues=[],
                                        connectedPortsFullPath=[],
                                        printOutput=True).Output)
print(result)