def enableComponents(self, session: Session):
        """
        Raises:
            ConfigurationError
        """

        self._logger.info('Enabling \'installer\' component')

        base_kit = KitApi().getKit(session, 'base')

        enabledComponents = ['installer']

        # get list of components from 'base' kit
        components = [c for c in base_kit.getComponentList()
                      if c.getName() in enabledComponents]

        installerNode = NodeApi().getInstallerNode(session)

        for component in components:
            SoftwareProfileApi().enableComponent(
                session,
                installerNode.getSoftwareProfile().getName(),
                base_kit.getName(),
                base_kit.getVersion(),
                base_kit.getIteration(),
                component.getName(), compVersion=component.getVersion(),
            )
Exemple #2
0
    def postAddHost(self, session: Session, hardwareProfileName: str,
                    softwareProfileName: Optional[str],
                    addHostSession: str) -> None:
        """
        Perform post add host operations
        """

        self._logger.debug(
            'postAddHost(): hardwareProfileName=[%s]'
            ' softwareProfileName=[%s] addHostSession=[%s]' % (
                hardwareProfileName, softwareProfileName, addHostSession))

        # this query is redundant; in the calling method, we already have
        # a list of Node (db) objects
        from tortuga.node.nodeApi import NodeApi
        nodes = NodeApi().getNodesByAddHostSession(session, addHostSession)

        mgr = KitActionsManager()
        mgr.session = session

        mgr.post_add_host(
            hardwareProfileName,
            softwareProfileName,
            nodes
        )

        # Always go over the web service for this call.
        SyncWsApi().scheduleClusterUpdate(updateReason='Node(s) added')
Exemple #3
0
    def test_getNodeList(self, get_software_profile_metadata_mock,
                         get_os_boot_host_manager_mock):         \
            # pylint: disable=unused-argument
        """
        Get all nodes
        """

        fake_metadata = {
            'mike': {
                'tag': 'value',
            }
        }

        get_software_profile_metadata_mock.return_value = fake_metadata

        from tortuga.node.nodeApi import NodeApi

        with self.dbm.session() as session:
            result = NodeApi().getNodeList(session)

        assert isinstance(result, TortugaObjectList)

        node = result[0]

        metadata = node.getSoftwareProfile().getMetadata()

        assert metadata == fake_metadata

        get_software_profile_metadata_mock.assert_called()
Exemple #4
0
    def __getNodeApi(self):
        """Get and cache the Node API"""

        if self.__nodeApi is None:
            from tortuga.node.nodeApi import NodeApi
            self.__nodeApi = NodeApi()
        return self.__nodeApi
Exemple #5
0
    def test_getNodeById_nonexistent(self, get_software_profile_metadata_mock,
                                     get_os_boot_host_manager_mock):         \
            # pylint: disable=unused-argument

        from tortuga.node.nodeApi import NodeApi

        with pytest.raises(NodeNotFound):
            with self.dbm.session() as session:
                NodeApi().getNodeById(session, 99999)
Exemple #6
0
    def test_getNodeById(self, get_software_profile_metadata_mock,
                         get_os_boot_host_manager_mock): \
            # pylint: disable=unused-argument
        from tortuga.node.nodeApi import NodeApi

        node_id = 1

        with self.dbm.session() as session:
            node = NodeApi().getNodeById(session, node_id)

            assert node.getId() == node_id

            # this is a bit of a cheat since it compares host names only
            # but that should be sufficient here...
            assert node.getName().split('.', 1)[0] == \
                socket.getfqdn().split('.', 1)[0]

        get_software_profile_metadata_mock.assert_called_with(
            session, node.getSoftwareProfile().getName())
Exemple #7
0
    def __init__(self, kit):
        """
        Initialise parent class.
        """
        super().__init__(kit)

        self._provider = DhcpdDhcpProvider(self)
        self._manager = self._get_os_dhcpd_manager('dhcpd')
        self._config = ConfigManager()
        self._installer_node = NodeApi().getInstallerNode()
    def runCommand(self):
        self.parseArgs(
            _("""
Display list of nodes able to use the specified software profile,
ordered by cost.
"""))
        softwareProfileName = self.getArgs().softwareProfile

        nodeApi = NodeApi()
        softwareUsesHardwareDbApi = SoftwareUsesHardwareDbApi()
        hardwareProfileDbApi = HardwareProfileDbApi()

        load_kits()

        with DbManager().session() as session:
            hwPList = hardwareProfileDbApi.getHardwareProfileList(session)

            hardwareProfileIdList = softwareUsesHardwareDbApi.\
                getAllowedHardwareProfilesBySoftwareProfileName(
                    session, softwareProfileName)

            nodeList = nodeApi.getNodeList(session)
            usableNodes = []
            for node in nodeList:
                if (node.getHardwareProfile().getId() in hardwareProfileIdList) \
                        and node.getIsIdle():
                    usableNodes.append(node)

            costNameList = []
            for node in usableNodes:
                nodeHwP = node.getHardwareProfile().getId()
                for hwP in hwPList:
                    if hwP.getId() == nodeHwP:
                        costNameList.append(
                            [int(hwP.getCost()),
                             node.getName()])
                        break

            costNameList.sort()

            for node in costNameList:
                print('%s' % (node[1]))
Exemple #9
0
def process_delete_host_request(session: Session,
                                transaction_id: str,
                                nodespec: str,
                                force: bool = False):
    try:
        req = NodeRequestsDbHandler().get_by_addHostSession(
            session, transaction_id)
        if req is None:
            # Session was deleted prior to being processed. Nothing to do...

            logger.warning(
                'Delete host request [%s] not found; nothing to do...',
                transaction_id)

            return

        #
        # Save this data so that we have it for firing the event below
        #
        evt_req_id = req.id
        evt_req_request = {'name': nodespec}

        ahm.update_session(transaction_id, running=True)

        logger.debug('process_delete_host_request(): transaction_id=[{0}],'
                     ' nodespec=[{1}]'.format(transaction_id, nodespec))

        try:
            NodeApi().deleteNode(session, nodespec, force=force)

            ahm.delete_session(transaction_id)

            session.delete(req)

            DeleteNodeRequestComplete.fire(request_id=evt_req_id,
                                           request=evt_req_request)
        except (OperationFailed, NodeNotFound):
            ahm.delete_session(transaction_id)

            session.delete(req)

            raise
        except TortugaException as exc:
            logger.exception('Exception while deleting nodes')

            req.message = str(exc)

            req.state = 'error'

            req.last_update = datetime.datetime.utcnow()
        finally:
            ahm.update_session(transaction_id, running=False)
    finally:
        session.commit()
Exemple #10
0
    def test_invalid_transfer(self, node_state_change_object,
                              get_kit_installer_function, MockClass1,
                              get_os_object_factory_function, dbm):
        """
        Attempt to transfer node to current software profile
        """

        with dbm.session() as session:
            with pytest.raises(NodeTransferNotValid):
                NodeApi().transferNodes(session,
                                        'compute',
                                        nodespec='compute-02.private')
Exemple #11
0
    def configure(self):
        #
        # Write config file
        #
        fp = open(CONFIG_FILE, 'w')

        try:
            print("# File generated by genconfig", file=fp)

            installer = self.kit_installer.config_manager.getInstaller()

            all_node_list = NodeApi().getNodeList()
            node_list = [
                node for node in all_node_list if node.getName() != installer
                and node.getState() != 'Deleted' and not node.getIsIdle()
            ]
            for node in node_list:
                print('{}'.format(node), file=fp)

        finally:
            fp.close()

        #
        # Write /etc/netgroup
        #
        fp = open('/etc/netgroup', 'w')
        dbm = DbManager()
        session = dbm.openSession()

        try:
            software_profiles = \
                SoftwareProfilesDbHandler().getSoftwareProfileList(session)
            for software_profile in software_profiles:
                if not software_profile.nodes:
                    continue

                software_profile_node_list = [
                    node.name for node in software_profile.nodes
                    if node.state != 'Deleted'
                ]
                if not software_profile_node_list:
                    continue

                fp.write('{} {}\n\n'.format(
                    software_profile.name, ' '.join([
                        '({},,)'.format(node)
                        for node in software_profile_node_list
                    ])))

        finally:
            fp.close()
            dbm.closeSession()
            fp.close()
Exemple #12
0
    def test_invalid_software_profile_transferNode(
            self, get_software_profile_metadata_mock,
            get_os_boot_host_manager_mock):         \
        # pylint: disable=unused-argument

        from tortuga.node.nodeApi import NodeApi

        with pytest.raises(SoftwareProfileNotFound):
            with self.dbm.session() as session:
                NodeApi().transferNodes(session,
                                        'Compute2',
                                        nodespec='compute-01.private')
Exemple #13
0
    def enableComponents(self):
        """
        Raises:
            ConfigurationError
        """

        self._logger.info('Enabling \'installer\' component')

        k = self.__getBaseKit()

        enabledComponents = ['installer']

        components = [
            c for c in k.getComponentList() if c.getName() in enabledComponents
        ]

        installerNode = NodeApi().getInstallerNode()

        for component in components:
            SoftwareProfileApi().enableComponent(
                installerNode.getSoftwareProfile().getName(), k.getName(),
                k.getVersion(), k.getIteration(), component.getName(),
                component.getVersion())
Exemple #14
0
    def _get_installer_ip(self, network_id):
        """
        Return IP address of provisioning interface on installer

        :raises NicNotFound:

        """

        installer_node = NodeApi().getInstallerNode(self.session)

        prov_nics = self._get_provisioning_nics(installer_node)
        for prov_nic in prov_nics:
            if prov_nic.getNetwork().getId() == network_id:
                return ipaddress.IPv4Address(prov_nic.getIp())
        raise NicNotFound(
            'Network has no corresponding provisioning NIC on installer')
Exemple #15
0
    def _findUnmanagedNics(self, session: Session):
        # Get list of all NICs on the installer
        systemNics = set(self._getSystemNics())

        # Get provisioning NICs on installer
        nics = NodeApi().getInstallerNode(session).getNics()

        # Filter out the NIC names
        primaryInstallerNics = set(
            [nic.getNetworkDevice().getName() for nic in nics])

        # Determine difference between all system NICs and managed NICs
        unmanagedNics = systemNics.difference(primaryInstallerNics)

        print('The following NICs are not currently managed: %s' %
              (' '.join(unmanagedNics)))
Exemple #16
0
    def _local_delete(self, resource_adapter_name: str,
                      resource_adapter_profile: str, cloudserver_id: str):
        from tortuga.db.models.instanceMapping import InstanceMapping
        from tortuga.db.models.instanceMetadata import InstanceMetadata
        from tortuga.db.resourceAdapterConfigDbHandler import ResourceAdapterConfigDbHandler
        from tortuga.node.nodeApi import NodeApi

        node_api = NodeApi()

        #
        # Assume the node instance ID name is the last item in the
        # delimited cloud server ID
        #
        instance = cloudserver_id.split(":")[-1]

        #
        # Lookup the resource adapter configuration profile...
        #
        rac_api = ResourceAdapterConfigDbHandler()
        rac = rac_api.get(self._sess, resource_adapter_name,
                          resource_adapter_profile)

        #
        # Check the instance mapping to see if there is a matching
        # instance id...
        #
        im_list = self._sess.query(InstanceMapping).filter(
            InstanceMapping.instance == instance,
            InstanceMapping.resource_adapter_configuration_id == rac.id)
        #
        # Found something? Delete it and then return...
        #
        for im in im_list:
            node = im.node
            node_api.deleteNode(self._sess, node.name)

        #
        # Check the instance metadata so see if there is a
        # matching vm_name...
        #
        im_list = self._sess.query(InstanceMetadata).filter(
            InstanceMetadata.key == "vm_name",
            InstanceMetadata.value == instance)
        #
        # Found something? Delete it and then return...
        #
        for im in im_list:
            if im.instance.resource_adapter_configuration_id != rac.id:
                continue
            node = im.instance.node
            node_api.deleteNode(self._sess, node.name)
Exemple #17
0
    def action_configure(self, _, *args, **kwargs):
        """
        Configure.

        :param _: Unused
        :param *args: Unused
        :param **kwargs: Unused
        :returns: None
        """

        try:
            result = GlobalParameterDbApi().getParameter(
                self.session,
                'DHCPLeaseTime'
            )

            dhcp_lease_time = int(result.getValue())
        except ParameterNotFound:
            dhcp_lease_time = 2400

        try:
            result = GlobalParameterDbApi().getParameter(
                self.session,
                'DNSZone')

            dns_zone = result.getValue()
        except ParameterNotFound:
            dns_zone = ''

        installer_node = NodeApi().getInstallerNode(self.session)

        self._manager.configure(
            dhcp_lease_time,
            dns_zone,
            self._get_provisioning_nics_ip(installer_node),
            self._dhcp_subnets(),
            installerNode=installer_node,
            bUpdateSysconfig=kwargs.get('bUpdateSysconfig', True),
            kit_settings=self._get_kit_settings_dictionary
        )
Exemple #18
0
def process_delete_host_request(request):
    session = DbManager().openSession()

    try:
        req = NodeRequestsDbHandler().get_by_addHostSession(
            session, request['transaction_id'])
        if req is None:
            # Session was deleted prior to being process. Nothing to do...
            return

        ahm.update_session(request['transaction_id'], running=True)

        logger.debug('process_delete_host_request(): transaction_id=[{0}],'
                     ' nodespec=[{1}]'.format(request['transaction_id'],
                                              request['nodespec']))

        try:
            NodeApi().deleteNode(request['nodespec'])

            ahm.delete_session(request['transaction_id'])

            session.delete(req)
        except NodeNotFound:
            ahm.delete_session(request['transaction_id'])

            session.delete(req)
        except TortugaException as exc:
            logger.exception('Exception while deleting nodes')

            req.message = str(exc)

            req.state = 'error'

            req.last_update = datetime.datetime.utcnow()
        finally:
            ahm.update_session(request['transaction_id'], running=False)
    finally:
        session.commit()

        DbManager().closeSession()
    def delete_node(self, sir_id):
        with spot_cache:
            cfg = refresh_spot_instance_request_cache()

            if not cfg.has_section(sir_id) or \
                    not cfg.has_option(sir_id, 'node'):
                self._logger.warning('Spot instance [{0}] does not have an'
                                     ' associated node'.format(sir_id))

                return

            spot_instance_node_mapping = cfg.get(sir_id, 'node')

            if spot_instance_node_mapping:
                self._logger.info('Removing node [{0}] for spot instance'
                                  ' request [{1}]'.format(
                                      spot_instance_node_mapping, sir_id))

            try:
                NodeApi().deleteNode(spot_instance_node_mapping)
            except NodeNotFound:
                pass
Exemple #20
0
    def test_transferNodes_single_node(
            self,
            get_resourceadapter_class_mock,
            default_resource_adapter_mock,
            # boot_host_manager_mock,
            get_software_profile_metadata_mock,
            get_os_boot_host_manager_mock):  # pylint: disable=unused-argument
        from tortuga.node.nodeApi import NodeApi

        with patch('tortuga.node.nodeManager.KitActionsManager.refresh') as \
                kit_actions_manager_refresh_mock:
            nodeApi = NodeApi()

            with self.dbm.session() as session:
                nodeApi.transferNodes(session,
                                      'compute2',
                                      nodespec='compute-01.private')

                kit_actions_manager_refresh_mock.assert_called()

                nodeApi.transferNodes(session,
                                      'compute',
                                      nodespec='compute-01.private')
Exemple #21
0
 def __init__(self):
     self.cm = ConfigManager()
     self.node_api = NodeApi()
     self.admin_api = AdminApi()
     self.network_api = NetworkApi()
     self.parameter_api = ParameterApi()
Exemple #22
0
 def node_api(self):
     if not self._node_api:
         self._node_api = NodeApi()
     return self._node_api
Exemple #23
0
class CancelSpotInstanceRequestsCLI(TortugaCli):
    def __init__(self):
        super(CancelSpotInstanceRequestsCLI, self).__init__(validArgCount=1)

        self.nodeApi = NodeApi()

    def parseArgs(self, usage=None):
        self.addOption('--all', action='store_true', default=False,
                       help='Cancel all spot instance requests managed by'
                            ' Tortuga')

        self.addOption('--terminate', action='store_true', default=False,
                       help='Terminate any running (fulfilled) instance(s).')

        super(CancelSpotInstanceRequestsCLI, self).parseArgs(usage=usage)

        if not self.getOptions().all and not self.getArgs():
            self.getParser().error(
                '<spot instance request id> or --all argument must be'
                ' specified')

    def runCommand(self):
        self.parseArgs(
            usage='%prog [--terminate] <spot instance request id|--all>')

        sir_instance_cache_filename = \
            os.path.join(ConfigManager().getRoot(), 'var',
                         'spot-instances.conf')

        cfg = configparser.ConfigParser()
        cfg.read(sir_instance_cache_filename)

        if self.getOptions().all:
            result = self.__get_spot_instance_request_ids(cfg)
        else:
            sir_id = self.getArgs()[0]

            result = [self.__get_spot_instance_request_id(cfg, sir_id)]

        if not result:
            # Nothing to do...
            sys.exit(0)

        self.__cancel_spot_instances(result)

    def __get_spot_instance_request_id(self, cfg, sir_id):
        # Ensure spot instance request id
        if not cfg.has_section(sir_id):
            sys.stderr.write(
                'Spot instance request [{0}] is not managed by'
                ' Tortuga\n'.format(sir_id))

            sys.exit(0)

        resource_adapter_configuration = \
            self.__get_resource_adapter_configuration(cfg, sir_id)

        return sir_id, resource_adapter_configuration

    def __get_resource_adapter_configuration(self, cfg, sir_id): \
            # pylint: disable=no-self-use
        return cfg.get(sir_id, 'resource_adapter_configuration') \
            if cfg.has_option(
                sir_id, 'resource_adapter_configuration') else \
            'default'

    def __get_spot_instance_request_ids(self, cfg):
        result = []

        for sir_id in cfg.sections():
            resource_adapter_configuration = \
                self.__get_resource_adapter_configuration(cfg, sir_id)

            result.append((sir_id, resource_adapter_configuration))

        return result

    def __get_spot_instance_request_map(self, result): \
            # pylint: disable=no-self-use
        sir_map = {}
        adapter_cfg_map = {}

        adapter = Aws()

        # Create map of spot instance requests keyed on EC2 region
        for sir_id, resource_adapter_configuration in result:
            if resource_adapter_configuration not in adapter_cfg_map:
                adapter_cfg = adapter.getResourceAdapterConfig(
                    resource_adapter_configuration)

                adapter_cfg_map[resource_adapter_configuration] = \
                    adapter_cfg
            else:
                adapter_cfg = adapter_cfg_map[
                    resource_adapter_configuration]

            if adapter_cfg['region'].name not in sir_map:
                sir_map[adapter_cfg['region'].name] = []

            sir_map[adapter_cfg['region'].name].append(sir_id)

        return sir_map

    def __cancel_spot_instances(self, result):
        sir_map = self.__get_spot_instance_request_map(result)

        aws_instance_cache = configparser.ConfigParser()
        aws_instance_cache.read('/opt/tortuga/var/aws-instance.conf')

        # Iterate on map cancelling requests in each region
        for region_name, sir_ids in sir_map.iteritems():
            session = boto3.session.Session(region_name=region_name)

            ec2_conn = session.client('ec2')

            if len(sir_ids) == 1:
                print('Cancelling spot instance request [{0}]'
                      ' in region [{1}]'.format(sir_ids[0], region_name))
            else:
                print('Cancelling {0} spot instance requests in'
                      ' region [{1}]'.format(len(sir_ids), region_name))

            response = ec2_conn.describe_spot_instance_requests(
                SpotInstanceRequestIds=sir_ids)

            # Create list of tuples (sir_id, bool) which indicate if the
            # spot instance request should be terminated
            cancelled_spot_instance_requests = []

            for sir in response['SpotInstanceRequests']:
                # All spot instance requests that are 'open' should be
                # terminated to avoid leaving orphaned Tortuga node records
                cancelled_spot_instance_requests.append(
                    (sir['SpotInstanceRequestId'],
                     self.getOptions().terminate or sir['State'] == 'open'))

            result = ec2_conn.cancel_spot_instance_requests(
                SpotInstanceRequestIds=sir_ids)

            # Delete corresponding node entries
            for sir_id, terminate in cancelled_spot_instance_requests:
                if terminate:
                    node_name = self.__get_associated_node(
                        aws_instance_cache, sir_id)
                    if node_name:
                        print('  - Deleting node [{0}]'.format(node_name))

                        self.nodeApi.deleteNode(node_name)

    def __get_associated_node(self, aws_instance_cache, sir_id): \
            # pylint: disable=no-self-use
        node_name = None

        for node_name in aws_instance_cache.sections():
            if aws_instance_cache.has_option(
                    node_name, 'spot_instance_request') and \
                aws_instance_cache.get(
                    node_name, 'spot_instance_request') == sir_id:
                break
        else:
            return None

        return node_name
Exemple #24
0
    def __init__(self):
        super(CancelSpotInstanceRequestsCLI, self).__init__(validArgCount=1)

        self.nodeApi = NodeApi()
    def __fulfilled_request_handler(self, ec2_conn, sir_id, instance_id,
                                    spot_instance_request, hwp):
        # Ensure node entries created
        resvs = ec2_conn.get_all_instances(instance_ids=[instance_id])

        instance = resvs[0].instances[0]

        if instance.state not in ('pending', 'running'):
            self._logger.info('Ignoring instance [{0}] in state [{1}]'.format(
                instance.id, instance.state))

            return

        # Determine node from spot instance request id
        adapter_cfg = configparser.ConfigParser()
        adapter_cfg.read('/opt/tortuga/var/aws-instance.conf')

        create_node = False

        for node_name in adapter_cfg.sections():
            if adapter_cfg.has_option(
                    node_name, 'spot_instance_request') and \
                    adapter_cfg.get(node_name,
                                    'spot_instance_request') == sir_id:
                break
        else:
            create_node = True

            node_name = instance.private_dns_name \
                if hwp.getNameFormat() == '*' else None

        if create_node:
            self._logger.info('Creating node for spot instance'
                              ' [{0}]'.format(instance.id))

            # Error: unable to find pre-allocated node record for spot
            # instance request
            addNodesRequest = {
                'softwareProfile':
                spot_instance_request['softwareprofile'],
                'hardwareProfile':
                spot_instance_request['hardwareprofile'],
                'isIdle':
                False,
                'count':
                1,
                'nodeDetails': [{
                    'metadata': {
                        'ec2_instance_id': instance.id,
                        'ec2_ipaddress': instance.private_ip_address,
                    }
                }],
            }

            if 'resource_adapter_configuration' in spot_instance_request:
                addNodesRequest['resource_adapter_configuration'] = \
                    spot_instance_request['resource_adapter_configuration']

            if node_name:
                addNodesRequest['nodeDetails'][0]['name'] = node_name

            try:
                addHostSession = AddHostWsApi().addNodes(addNodesRequest)

                with gevent.Timeout(300):
                    while True:
                        response = AddHostWsApi()\
                            .getStatus(session=addHostSession, getNodes=True)

                        if not response['running']:
                            self._logger.debug(
                                'response: {0}'.format(response))
                            node_name = response['nodes'][0]['name']
                            break

                        gevent.sleep(5)
            except gevent.timeout.Timeout:
                self._logger.error('Timeout waiting for add nodes operation'
                                   ' to complete')
            except NodeAlreadyExists:
                self._logger.error('Error adding node [{0}]:'
                                   ' already exists'.format(
                                       instance.private_dns_name))
        else:
            self._logger.info('Updating existing node [{0}]'.format(node_name))

            # Mark node as 'Provisioned' now that there's a backing instance
            NodeApi().updateNode(node_name,
                                 updateNodeRequest={
                                     'state':
                                     state.NODE_STATE_PROVISIONED,
                                     'nics': [{
                                         'ip':
                                         instance.private_ip_address,
                                     }],
                                     'metadata': {
                                         'ec2_instance_id': instance.id,
                                     }
                                 })

        update_spot_instance_request_cache(sir_id,
                                           metadata=dict(node=node_name,
                                                         status='fulfilled'))
Exemple #26
0
    def __init__(self):
        super(CancelSpotInstanceRequestsCLI, self).__init__(validArgCount=1)

        self.nodeApi = NodeApi()

        self._logger = logging.getLogger('tortuga.console')
Exemple #27
0
    def test_basic(self, node_state_change_object, get_kit_installer_function,
                   MockClass1, get_os_object_factory_function, dbm):
        """
        Transfer a single node
        """

        name = 'compute-01'

        with dbm.session() as session:
            # xfer node 'compute-01' to 'compute2' software profile
            result = NodeApi().transferNodes(session,
                                             'compute2',
                                             nodespec=name)

            # get node after xfer
            node = NodeApi().getNode(session, name)

            # validate new software profile
            assert node.getSoftwareProfile().getName() == 'compute2'

            # validate state (which is fudged above)
            assert node.getState() != 'Installed'

            # update status from 'Expired' to 'Installed' to allow xfer
            NodeApi().updateNodeStatus(session, name, state='Installed')

            # ensure event fired to indicate node state change
            node_state_change_object.assert_called()

            # xfer node back to 'compute' software profile
            NodeApi().transferNodes(session, 'compute', nodespec=name)

            NodeApi().updateNodeStatus(session, name, state='Installed')

            node = NodeApi().getNode(session, name)

            assert node.getSoftwareProfile().getName() == 'compute'