Exemplo n.º 1
0
def pip_install_requirements(requirements_path):
    """
    Installs packages specified in a requirements.txt file, using the tortuga
    package repo in addition to the standard python repos. This function
    returns nothing, and does nothing if the requirements.txt file is not
    found.

    :param requirements_path: the path to the requirements.txt file

    """
    cm = ConfigManager()

    if not os.path.exists(requirements_path):
        logger.debug('Requirements not found: {}'.format(requirements_path))
        return

    if is_requirements_empty(requirements_path):
        logger.debug('Requirements empty: {}'.format(requirements_path))
        return

    pip_cmd = [
        '{}/pip'.format(cm.getBinDir()),
        'install',
    ]

    installer = cm.getInstaller()
    int_webroot = cm.getIntWebRootUrl(installer)
    installer_repo = '{}/python-tortuga/simple/'.format(int_webroot)

    if cm.is_offline_installation():
        # add tortuga distribution repo
        pip_cmd.append('--index-url')
        pip_cmd.append(installer_repo)

        # add offline dependencies repo
        pip_cmd.append('--extra-index-url')
        pip_cmd.append('{}/offline-deps/python/simple/'.format(int_webroot))
    else:
        pip_cmd.append('--extra-index-url')

        pip_cmd.append(installer_repo)

    pip_cmd.extend(['--trusted-host', installer, '-r', requirements_path])

    logger.debug(' '.join(pip_cmd))
    proc = subprocess.Popen(pip_cmd)
    proc.wait()
    if proc.returncode:
        raise Exception(proc.stderr)
Exemplo n.º 2
0
def pip_install_requirements(requirements_path):
    """
    Installs packages specified in a requirements.txt file, using the kit
    package repo in addition to the standard python repos. This function
    returns nothing, and does nothing if the requirements.txt file is not
    found.

    :param kit_installer:     an instance of KitInstallerBase, which will
                              be searched for a local python package repo
    :param requirements_path: the path to the requirements.txt file

    """
    #
    # In the kit directory:
    #
    #     /opt/tortuga/kits/kit-x.y.z/tortuga_kits/kit_x_y_z
    #
    # if there is a python_packages directory, with a simple subdirectory
    # in it, it is assumed that the simple subdirectory is a PEP 503
    # compliant Python package repository. If found, this directory is
    # added to the list of directories searched for Python packages via
    # pip when installing the requirements.txt file.
    #
    # These directories can easily be created using the py2pi utility.
    #
    cm = ConfigManager()

    if not os.path.exists(requirements_path):
        logger.debug('Requirements not found: {}'.format(requirements_path))
        return

    if is_requirements_empty(requirements_path):
        logger.debug('Requirements empty: {}'.format(requirements_path))
        return

    installer = cm.getInstaller()
    int_webroot = cm.getIntWebRootUrl(installer)
    installer_repo = '{}/python-tortuga/simple/'.format(int_webroot)

    pip_cmd = [
        '{}/pip'.format(cm.getBinDir()), 'install',
        '--extra-index-url', installer_repo,
        '--trusted-host', installer,
        '-r', requirements_path
    ]

    logger.debug(' '.join(pip_cmd))
    subprocess.Popen(pip_cmd).wait()
Exemplo n.º 3
0
class SyncManager(TortugaObjectManager):
    """Class for cluster sync management"""

    __instanceLock = threading.RLock()

    # update delay increase (seconds)
    CLUSTER_UPDATE_DELAY_INCREASE = 30

    # after this limit is reached, warning will be logged
    CLUSTER_UPDATE_WARNING_LIMIT = 10

    def __init__(self):
        super(SyncManager, self).__init__()

        self._isUpdateScheduled = False
        self._isUpdateRunning = False
        self._cm = ConfigManager()
        self._logger = logging.getLogger(SYNC_NAMESPACE)

    def __runClusterUpdate(self, opts={}):
        """ Run cluster update. """
        self._logger.debug('Update timer running, opts={}'.format(opts))

        updateCmd = os.path.join(self._cm.getBinDir(), 'run_cluster_update.sh')

        delay = 0
        updateCnt = 0
        while self.__resetIsUpdateScheduled():
            self._isUpdateRunning = True

            self._logger.debug('New cluster update delay: %s seconds' %
                               (delay))

            time.sleep(delay)
            delay += SyncManager.CLUSTER_UPDATE_DELAY_INCREASE

            # Log warning if timer has been running for too many times.
            updateCnt += 1
            self._logger.debug('Cluster update timer count: %s' % (updateCnt))

            if updateCnt > SyncManager.CLUSTER_UPDATE_WARNING_LIMIT:
                self._logger.warning(
                    'Cluster updated more than %s times using the same'
                    ' timer (possible configuration problem)' %
                    (SyncManager.CLUSTER_UPDATE_WARNING_LIMIT))

            self._logger.debug('Starting cluster update using: %s' %
                               (updateCmd))

            # Since we might sleep for a while, we need to
            # reset update flag just before we run update to avoid
            # unnecessary syncs.

            self.__resetIsUpdateScheduled()

            if 'node' in opts:
                node_update = opts['node']
                env = {
                    **os.environ, 'FACTER_node_tags_update':
                    json.dumps(node_update)
                }
                self._logger.debug('FACTER_node_tags_update={}'.format(
                    env['FACTER_node_tags_update']))
                p = TortugaSubprocess(updateCmd, env=env)
            elif 'software_profile' in opts:
                sp_update = opts['software_profile']
                env = {
                    **os.environ, 'FACTER_softwareprofile_tags_update':
                    json.dumps(sp_update)
                }
                self._logger.debug(
                    'FACTER_softwareprofile_tags_update={}'.format(
                        env['FACTER_softwareprofile_tags_update']))
                p = TortugaSubprocess(updateCmd, env=env)
            else:
                p = TortugaSubprocess(updateCmd)

            try:
                p.run()
                self._logger.debug('Cluster update successful')
                self._logger.debug('stdout: {}'.format(
                    p.getStdOut().decode().rstrip()))
                self._logger.debug('stderr: {}'.format(
                    p.getStdErr().decode().rstrip()))
            except CommandFailed:
                if p.getExitStatus() == tortugaStatus.\
                        TORTUGA_ANOTHER_INSTANCE_OWNS_LOCK_ERROR:
                    self._logger.debug(
                        'Another cluster update is already running, will'
                        ' try to reschedule it')

                    self._isUpdateRunning = False

                    self.scheduleClusterUpdate(
                        updateReason='another update already running',
                        delay=60,
                        opts=opts)

                    break
                else:
                    self._logger.error(
                        'Update command "%s" failed (exit status: %s):'
                        ' %s' % (updateCmd, p.getExitStatus(), p.getStdErr()))

            self._logger.debug('Done with cluster update')

        self._isUpdateRunning = False

        self._logger.debug('Update timer exiting')

    def __resetIsUpdateScheduled(self):
        """ Reset cluster update flag, return old flag value. """
        SyncManager.__instanceLock.acquire()
        try:
            flag = self._isUpdateScheduled
            self._isUpdateScheduled = False
            return flag
        finally:
            SyncManager.__instanceLock.release()

    def scheduleClusterUpdate(self, updateReason=None, delay=5, opts={}):
        """ Schedule cluster update. """
        SyncManager.__instanceLock.acquire()
        try:
            if self._isUpdateScheduled:
                # Already scheduled.
                return

            # Start update timer if needed.
            self._isUpdateScheduled = True
            if not self._isUpdateRunning:
                self._logger.debug('Scheduling cluster update in %s seconds,'
                                   ' reason: %s, opts: %s' %
                                   (delay, updateReason, opts))

                t = threading.Timer(delay,
                                    self.__runClusterUpdate,
                                    kwargs=dict(opts=opts))

                t.start()
            else:
                self._logger.debug(
                    'Will not schedule new update timer while the old'
                    ' timer is running')
        finally:
            SyncManager.__instanceLock.release()

    def getUpdateStatus(self):  # pylint: disable=no-self-use
        """ Check cluster update flag. """
        return RunManager().checkLock('cfmsync')
Exemplo n.º 4
0
class KitInstallerBase(ConfigurableMixin, metaclass=KitInstallerMeta):
    """
    Base class for kit installers.

    """
    config_type = 'kit'

    #
    # The kit installation directory
    #
    install_path = None

    #
    # Metadata, loaded via the load_meta class method.
    #
    name = None
    version = None
    iteration = None
    spec = (None, None, None)
    meta = {}

    #
    # Loader state
    #
    ws_controllers_loaded = False
    db_tables_loaded = False

    #
    # Attributes, provided by instances of this class
    #
    puppet_modules = []
    task_modules = []

    def __init__(self):
        self.config_manager = ConfigManager()

        #
        # Setup paths
        #
        self.kit_path = os.path.dirname(inspect.getfile(self.__class__))
        self.puppet_modules_path = os.path.join(self.kit_path,
                                                'puppet_modules')
        self.files_path = os.path.join(self.kit_path, 'files')

        #
        # Initialize configuration
        #
        super().__init__()

        #
        # Load components and resource adapters
        #
        self._component_installers = {}
        self._component_installers_loaded = False

        #
        # Web service controller classes
        #
        self._ws_controller_classes = []

        self.session = None

    def get_config_base(self):
        return self.config_manager.getKitConfigBase()

    @classmethod
    def load_meta(cls, meta_dict):
        """
        Loads the meta data for the kit into the class.

        :param meta_dict: A dict containing the metadata, as specified by
                          the KitMetadataSchema class.

        """
        errors = KitMetadataSchema().validate(meta_dict)
        if errors:
            raise Exception('Kit metadata validation error: {}'.format(errors))

        requires_core = meta_dict.get('requires_core', VERSION)
        if not version_is_compatible(requires_core):
            raise Exception('The {} kit requires tortuga core >= {}'.format(
                meta_dict['name'], requires_core))

        meta_dict = copy.deepcopy(meta_dict)
        cls.name = meta_dict.pop('name')
        cls.version = meta_dict.pop('version')
        cls.iteration = meta_dict.pop('iteration')
        cls.spec = (cls.name, cls.version, cls.iteration)
        cls.meta = meta_dict

    def _load_component_installers(self):
        """
        Load component installers for this kit.

        """
        if self._component_installers_loaded:
            return

        kit_pkg_name = inspect.getmodule(self).__package__

        comp_pkg_name = '{}.components'.format(kit_pkg_name)

        logger.debug('Searching for component installers in package: %s',
                     comp_pkg_name)

        #
        # Look for the components sub-package
        #
        try:
            comp_pkg = importlib.import_module(comp_pkg_name)
        except ModuleNotFoundError:
            logger.warning('No component installers found for kit: %s',
                           kit_pkg_name)
            return

        #
        # Walk the components sub-package, looking for component installers
        #
        for loader, name, ispkg in pkgutil.walk_packages(comp_pkg.__path__):
            if not ispkg:
                continue

            full_pkg_path = '{}.{}'.format(comp_pkg_name, name)
            try:
                #
                # Look for the component module in the package
                #
                comp_inst_mod = importlib.import_module(
                    '{}.component'.format(full_pkg_path))

                #
                # Look for the ComponentInstaller class in the module
                #
                if not hasattr(comp_inst_mod, 'ComponentInstaller'):
                    logger.warning('ComponentInstaller class not found: %s',
                                   full_pkg_path)

                #
                # Initialize the ComponentInstaller class and register
                # it with the KitInstaller
                #
                comp_inst_class = comp_inst_mod.ComponentInstaller
                comp_inst = comp_inst_class(self)
                comp_inst.session = self.session
                self._component_installers[comp_inst_class.name] = \
                    comp_inst

                logger.debug('Component installer registered: %s',
                             comp_inst.spec)

            except ModuleNotFoundError:
                logger.debug('Package not a component: %s', full_pkg_path)

            self._component_installers_loaded = True

    def is_installable(self):
        """
        Determines whether or not this kit is installable under the given
        conditions/circumstances. Override this in your implementations as
        necessary.

        :return: True if it is installable, False otherwise.

        """
        return True

    def run_action(self, action_name, *args, **kwargs):
        """
        Runs the specified action.

        :param action_name: the name of the action to run

        """
        try:
            action = getattr(self, 'action_{}'.format(action_name))

            return action(*args, **kwargs)
        except KeyError:
            raise Exception('Unknown action: {}'.format(action_name))

    def get_kit(self):
        """
        Gets the Kit instance for this kit.

        :return: a Kit instance

        """
        kit = Kit(name=self.name,
                  version=self.version,
                  iteration=self.iteration)
        kit.setDescription(self.meta.get('description', None))
        for component_installer in self.get_all_component_installers():
            kit.addComponent(component_installer.get_component())
        return kit

    def get_eula(self):
        """
        Gets the EULA for this kit, if it exists.

        :return: a Eula instance if there is a EULA file, otherwise None.

        """
        eula = None
        eula_path = os.path.join(self.install_path, EULA_FILE)
        if os.path.exists(eula_path) and os.path.isfile(eula_path):
            eula_fp = open(eula_path)
            text = eula_fp.read()
            eula_fp.close()
            eula = Eula(text=text)
        else:
            logger.debug('EULA not found: %s', eula_path)

        return eula

    def get_component_installer(self, component_name: str):
        self._load_component_installers()
        return self._component_installers.get(component_name)

    def get_all_component_installers(self):
        self._load_component_installers()
        return [ci for ci in self._component_installers.values()]

    def register_database_tables(self):
        """
        Register database table mappers for this kit.

        """
        #
        # If another kit of the same name already exists, and has loaded
        # the database tables, then we don't need to do it a second time
        #
        for ki in get_all_kit_installers():
            if ki.spec[0] == self.spec[0] and ki.db_tables_loaded:
                self.__class__.db_tables_loaded = True
                return

        kit_pkg_name = inspect.getmodule(self).__package__
        db_table_pkg_name = '{}.db.models'.format(kit_pkg_name)
        logger.debug('Searching for database table mappers in package: %s',
                     db_table_pkg_name)

        try:
            importlib.import_module(db_table_pkg_name)
            self.__class__.db_tables_loaded = True

        except ModuleNotFoundError:
            logger.debug('No database table mappers found for kit: %s',
                         self.spec)

    def register_web_service_controllers(self):
        """
        Register web service controllers for this kit.

        """
        #
        # If another kit of the same name already exists, and has loaded
        # the ws controllers, then we don't need to do it a second time
        #
        for ki in get_all_kit_installers():
            if ki.spec[0] == self.spec[0] and ki.ws_controllers_loaded:
                self.__class__.ws_controllers_loaded = True
                return

        kit_pkg_name = inspect.getmodule(self).__package__
        ws_pkg_name = '{}.web_service.controllers'.format(kit_pkg_name)
        logger.debug('Searching for web service controllers in package: %s',
                     ws_pkg_name)

        try:
            importlib.import_module(ws_pkg_name)
            self.__class__.ws_controllers_loaded = True

        except ModuleNotFoundError:
            logger.debug('No web service controllers found for kit: %s',
                         self.spec)

    def register_event_listeners(self):
        """
        Register event listeners for this kit.

        """

        kit_pkg_name = inspect.getmodule(self).__package__

        listener_pkg_name = '{}.events.listeners'.format(kit_pkg_name)

        logger.debug('Searching for event listeners in package: %s',
                     listener_pkg_name)

        try:
            importlib.import_module(listener_pkg_name)
        except ModuleNotFoundError:
            logger.debug('No event listeners found for kit: %s', self.spec)

    def action_install_puppet_modules(self, *args, **kwargs):
        #
        # Prevent circular import
        #
        from .actions import UninstallPuppetModulesAction, \
            InstallPuppetModulesAction
        #
        # Do an uninstall first, just in case there is an old version of
        # the module still hanging around. This should fail silently if the
        # module is not installed.
        #
        UninstallPuppetModulesAction(self)(*args, **kwargs)
        #
        # Do the actual install
        #
        return InstallPuppetModulesAction(self)(*args, **kwargs)

    def action_pre_install(self):
        pass

    def action_pre_uninstall(self):
        pass

    def action_post_install(self):
        #
        # Check for python packages to install
        #
        pkg_dir = os.path.join(self.install_path, 'python_packages')
        if os.path.exists(pkg_dir):
            self._update_python_repo(pkg_dir)

        #
        # Install required python packages from requirements.txt
        #
        requirements_path = os.path.join(self.kit_path, 'requirements.txt')
        pip_install_requirements(requirements_path)

    def _update_python_repo(self, pkg_dir: str):
        """
        Updates the Tortuga Python repo with packages from the kit.

        :param pkg_dir: the source directory from which the packages will
                        be copied

        """
        #
        # Copy the files from the pkg_dir to the Tortuga repo
        #
        whl_path = os.path.join(pkg_dir, '*.whl')
        repo_path = os.path.join(self.config_manager.getTortugaIntWebRoot(),
                                 'python-tortuga')

        cmd = 'rsync -a {} {}'.format(whl_path, repo_path)

        logger.debug(cmd)

        executeCommand(cmd)

        #
        # Re-build the package index
        #
        dir2pi = os.path.join(self.config_manager.getBinDir(), 'dir2pi')

        cmd = '{} {}'.format(dir2pi, repo_path)

        logger.debug(cmd)

        executeCommand(cmd)

    def action_post_uninstall(self):
        pass

    def action_uninstall_puppet_modules(self, *args, **kwargs):
        #
        # Prevent circular import
        #
        from .actions import UninstallPuppetModulesAction
        return UninstallPuppetModulesAction(self)(*args, **kwargs)

    def action_get_metadata(self,
                            hardware_profile_name: Optional[str] = None,
                            software_profile_name: Optional[str] = None,
                            node_name: Optional[str] = None) -> dict:
        pass
Exemplo n.º 5
0
class KitActions(ActionsBase):
    '''
    A kit is a group of components that constitute a complete application.
    '''
    def __init__(self, moduledir=None):
        '''
        Arguments:

            moduledir   Path to the module's directory. Defaults to CWD.
                        E.g: "/opt/tortuga/kits/kit-ganglia-1.2.3"

        Attributes:

            name        Kit name.

            version     Kit version.

            moduledir   Fully-qualified path to the root of the kit as
                        installed on the filesystem. For example,
                        "/opt/tortuga/kits/kit-ganglia-1.2.3"

            components  a list of ComponentActions() objects

            _logger	A logger instance for creating log messages
            _config     configManager instance
            _root       $TORTUGA_ROOT; e.g: "/opt/tortuga"
        '''

        super(KitActions, self).__init__()

        self.name = self.__class__.__name__.lower()
        self.version = None

        if moduledir:
            self.moduledir = moduledir
        else:
            self.moduledir = os.getcwd()

        self.components = []

        # Most kits need these things; including here for convenience.

        self._config = ConfigManager()
        self._root = self._config.getRoot()

    @property
    def config(self):
        return self._config

    def getLogger(self):
        return self._logger

    def getConfigManager(self):
        return self._config

    def getRoot(self):
        return self._root

    # Overridden form ActionsBase
    def getConfigFile(self):
        return "%s/%s-kit.conf" % (self.getConfigBase(), self.name.lower())

    def getConfigBase(self):
        return "%s/%s" % (self.getConfigManager().getKitConfigBase(),
                          self.name.lower())

    def pre_install(self):
        ''' Pre-installation kit hook. '''
        pass

    def post_install(self):
        ''' Post-installation kit hook. '''
        pass

    def pre_uninstall(self):
        ''' Post-uninstallation kit hook. '''
        pass

    def post_uninstall(self):
        ''' Post-uninstallation kit hook. '''
        pass

    def add_component(self, component):
        '''Add the given component to the kit's list of components'''

        # Point the component to its parent
        component.kit = self
        self.components.append(component)

    def lookup_cname(self, cname):
        '''
        Return the ComponentActions object from the KitActions whose
        name is "cname"

        Raises:
            ComponentNotFound
        '''
        for c in self.components:
            if c.__component_name__ == cname:
                return c(self)

        raise ComponentNotFound("Can't find component [%s] in kit [%s]" %
                                (cname, self.__class__.__name__))

    def is_puppet_module_installed(self, name):
        cmd = '/opt/puppetlabs/bin/puppet module list --render-as=json'

        p = subprocess.Popen(cmd,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)

        puppet_module_list = json.load(p.stdout)

        retval = p.wait()

        if retval != 0:
            return None

        for modules in \
                puppet_module_list['modules_by_path'].values():
            for module in modules:
                if module.startswith('Module %s(' % (name)):
                    return True

        return False

    def installPuppetModule(self, modulePath):
        """
        Install "standard" Puppet module using "puppet module install --force"

        Raises:
            ConfigurationError
        """

        if not os.path.exists(modulePath):
            errmsg = ('Error: unable to install puppet module [%s].'
                      ' Module does not exist' % (modulePath))

            self.getLogger().error(errmsg)

            raise ConfigurationError(errmsg)

        cmd = ('/opt/puppetlabs/bin/puppet module install --color false'
               ' --force %s' % (modulePath))

        tortugaSubprocess.executeCommand(cmd)

    def uninstallPuppetModule(self, moduleName):
        cmd = ('/opt/puppetlabs/bin/puppet module uninstall'
               ' --color false --ignore-changes %s' % (moduleName))
        tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)

    def install_wheel_matching_filespec(self, whl_pathspec):
        # Find an whl matching the filespec
        whl_files = glob.glob(whl_pathspec)

        if not whl_files:
            raise FileNotFound('No files found matching spec %s' %
                               (whl_pathspec))

        # Use the first whl file found
        cmd = '%s/pip install %s' % (self._config.getBinDir(), whl_files[0])

        tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)

    def uninstall_wheel(self, wheel_name):
        cmd = 'pip uninstall %s' % (wheel_name)

        tortugaSubprocess.executeCommandAndIgnoreFailure(cmd)
Exemplo n.º 6
0
class ResourceAdapter(object):     \
        # pylint: disable=too-many-public-methods
    '''
    This is the base class for all resource adapters to derive from.
    The default actions simply print a debug message to show that the
    subclass did not implement the action.
    '''
    def __init__(self, addHostSession=None):
        if '__adaptername__' not in self.__class__.__dict__:
            raise NotImplementedError(
                'Subclasses of ResourceAdapter must have __adaptername__'
                ' defined')

        self._logger = logging.getLogger('tortuga.resourceAdapter.%s' %
                                         (self.__adaptername__))
        self._logger.addHandler(logging.NullHandler())

        self.__installer_public_hostname = None
        self.__installer_public_ipaddress = None
        self.__private_dns_zone = None

        # Initialize caches
        self.__addHostApi = None
        self.__nodeApi = None
        self.__osObject = None
        self.__sanApi = None

        # Initialize abort flag (to "not" aborting)
        self.__isAborted = False

        self._cm = ConfigManager()

        self._addHostSession = addHostSession

    @property
    def addHostSession(self):
        return self._addHostSession

    @property
    def cacheCfgFilePath(self):
        return os.path.join(self._cm.getRoot(), 'var',
                            '%s-instance.conf' % (self.__adaptername__))

    @property
    def cfgFileName(self):
        return os.path.join(
            self._cm.getKitConfigBase(),
            'adapter-defaults-%s.conf' % (self.__adaptername__))

    def hookAction(self, action, nodes, args=None):
        # Only the 'default' resource adapter overrides the hookAction()
        # method.
        pass

    def start(self,
              addNodesRequest,
              dbSession,
              dbHardwareProfile,
              dbSoftwareProfile=None):         \
            # pylint: disable=unused-argument

        self.__trace(addNodesRequest, dbSession, dbHardwareProfile,
                     dbSoftwareProfile)

    def validate_start_arguments(self, addNodesRequest, dbHardwareProfile,
                                 dbSoftwareProfile):
        self.__trace(addNodesRequest, dbHardwareProfile, dbSoftwareProfile)

    def stop(self, hardwareProfileName, deviceName):
        self.__trace(hardwareProfileName, deviceName)

    def updateNode(self, session, node, updateNodeRequest):         \
            # pylint: disable=unused-argument

        self.__trace(session, node, updateNodeRequest)

    def suspendActiveNode(self, nodeId):
        '''Change the given active node to an idle node'''
        self.__trace(nodeId)

    def idleActiveNode(self, nodeIds):
        '''Change the given active node to an idle node'''
        self.__trace(nodeIds)

    def activateIdleNode(self, node, softwareProfileName,
                         softwareProfileChanged):
        '''Change the given idle node to an active node'''
        self.__trace(node, softwareProfileName, softwareProfileChanged)

    def deleteNode(self, nodeIds):
        '''Remove the given node (active or idle) from the system'''
        self.__trace(nodeIds)

    def _async_delete_nodes(self, nodes):
        """
        Asynchronously delete nodes; calls "ResourceAdapter._delete_node()"
        method for each deleted nodes

        :param dbNodes: list of Nodes objects
        :return: None
        """
        greenlets = []

        for node in nodes:
            greenlets.append(gevent.spawn(self._delete_node, node))

        # TODO: implement timeout
        gevent.joinall(greenlets)

    def _delete_node(self, node):
        """
        Abstract method called to delete node from
        "ResourceAdapter._async_delete_nodes()"

        :param node: Nodes object
        """

    def transferNode(self, nodeIdSoftwareProfileTuples,
                     newSoftwareProfileName):
        '''Transfer the given idle node'''
        self.__trace(nodeIdSoftwareProfileTuples, newSoftwareProfileName)

    def startupNode(self, nodeIds, remainingNodeList=None, tmpBootMethod='n'):         \
            # pylint: disable=unused-argument
        '''Start the given node'''
        # By default raise unsupported operation
        raise UnsupportedOperation('Node does not support starting')

    def shutdownNode(self, nodes, bSoftReset=False):         \
            # pylint: disable=unused-argument
        '''Shutdown the given node'''
        # By default raise unsupported operation
        raise UnsupportedOperation('Node does not support shutdown')

    def rebootNode(self, nodes, bSoftReset=False):         \
            # pylint: disable=unused-argument
        '''Reboot the given node'''
        # By default raise unsupported operation
        raise UnsupportedOperation('Node does not support rebooting')

    def checkpointNode(self, nodeId):         \
            # pylint: disable=unused-argument
        '''Checkpoint the given node'''
        # By default raise unsupported operation
        raise UnsupportedOperation('Node does not support checkpointing')

    def revertNodeToCheckpoint(self, nodeId):         \
            # pylint: disable=unused-argument
        '''Revert the given node to the checkpoint'''
        # By default raise unsupported operation
        raise UnsupportedOperation('Node does not support checkpointing')

    def migrateNode(self, nodeId, remainingNodeList, liveMigrate):         \
            # pylint: disable=unused-argument
        '''Migrate the given node'''
        # By default raise unsupported operation
        raise UnsupportedOperation('Node does not support migrating')

    def addVolumeToNode(self, node, volume, isDirect):         \
            # pylint: disable=unused-argument
        '''Add a disk to a node'''
        # By default raise unsupported operation
        raise UnsupportedOperation(
            'Node does not support dynamic disk addition')

    def removeVolumeFromNode(self, node, volume):         \
            # pylint: disable=unused-argument
        '''Remove a disk from a node'''
        # By default raise unsupported operation
        raise UnsupportedOperation(
            'Node does not support dynamic disk deletion' % (node))

    def abort(self):
        '''abort node addition'''
        self._logger.debug('Setting abort flag')
        self.__isAborted = True

    def isAborted(self):
        '''Returns status of abort flag'''
        return self.__isAborted

    def __trace(self, *pargs, **kargs):
        stack = traceback.extract_stack()
        funcname = stack[-2][2]

        self._logger.debug('-- (pass) %s::%s %s %s' %
                           (self.__adaptername__, funcname, pargs, kargs))

    def getLogger(self):
        return self._logger

    def getResourceAdapterConfig(self, sectionName=None):
        """
        Raises:
            ResourceNotFound
        """

        self.getLogger().debug(
            'getResourceAdapterConfig(sectionName=[{0}])'.format(
                sectionName if sectionName else '(none)'))

        try:
            # Load default values
            defaultResourceAdapterConfigDict = self._loadConfigDict()

            if sectionName is None or sectionName == 'default':
                return defaultResourceAdapterConfigDict
        except ResourceNotFound:
            defaultResourceAdapterConfigDict = {}

        overrideConfigDict = self._loadConfigDict(sectionName)

        # Override defaults with hardware profile specific settings
        return dict(
            list(defaultResourceAdapterConfigDict.items()) +
            list(overrideConfigDict.items()))

    def _loadConfigDict(self, sectionName=None):
        """
        Raises:
            ResourceNotFound
        """

        if sectionName is None:
            sectionName = 'default'

        session = DbManager().openSession()

        try:
            self.getLogger().debug('_loadConfigDict()')

            result = ResourceAdapterCredentialsDbHandler().get(
                session, self.__adaptername__, sectionName)

            configDict = {}

            for entry in result['configuration']:
                configDict[entry['key']] = entry['value']
        finally:
            DbManager().closeSession()

        return configDict

    def getResourceAdapterConfigProfileByNodeName(self, name):
        """Get resource adapter configuration for existing node"""

        self.getLogger().debug(
            'getResourceAdapterConfigByNodeName(): name=[{0}]'.format(name))

        instance_cache = self.instanceCacheRefresh()

        return instance_cache.get(name, 'resource_adapter_configuration') \
            if instance_cache.has_section(name) and instance_cache.has_option(
                name, 'resource_adapter_configuration') else None

    def __getAddHostApi(self):
        '''Get and cache the Add Host API'''

        if self.__addHostApi is None:
            from tortuga.addhost.addHostServerLocal \
                import AddHostServerLocal

            self.__addHostApi = AddHostServerLocal()

        return self.__addHostApi

    def __getNodeApi(self):
        '''Get and cache the Node API'''

        if self.__nodeApi is None:
            from tortuga.node.nodeApi import NodeApi
            self.__nodeApi = NodeApi()
        return self.__nodeApi

    def __getOsObject(self):
        '''Get and cache the OS Object Factory'''

        if self.__osObject is None:
            from tortuga.os_utility import osUtility
            self.__osObject = osUtility.getOsObjectFactory()
        return self.__osObject

    def __getSanApi(self):
        '''Internal: Get and cache the SAN API'''

        if self.__sanApi is None:
            from tortuga.san import san
            self.__sanApi = san.San()
        return self.__sanApi

    # Properties for this object
    addHostApi = property(__getAddHostApi, None, None, None)
    nodeApi = property(__getNodeApi, None, None, None)
    osObject = property(__getOsObject, None, None, None)
    sanApi = property(__getSanApi, None, None, None)

    def statusMessage(self, msg):
        if self._addHostSession:
            AddHostManager().updateStatus(self._addHostSession, msg)
        else:
            # Just print out the message...this is a stop gap for resource
            # adapters running outside of the addHostManager framework
            sys.stdout.write(msg + '\n')
            sys.stdout.flush()

    def getOptions(self, dbSoftwareProfile, dbHardwareProfile):         \
            # pylint: disable=unused-argument

        return {}

    def instanceCacheWrite(self, cfg):
        # Write the instance cache back to disk

        self.getLogger().debug('instanceCacheWrite()')

        with open(self.cacheCfgFilePath, 'w') as fp:
            cfg.write(fp)

    def instanceCacheRefresh(self):
        self.getLogger().debug('instanceCacheRefresh()')

        cfg = configparser.ConfigParser()

        cfg.read(self.cacheCfgFilePath)

        return cfg

    def instanceCacheSet(self, name, metadata=None):
        self.getLogger().debug('instanceCacheSet(node=[%s], metadata=[%s])' %
                               (name, metadata))

        cfg = self.instanceCacheRefresh()

        if not cfg.has_section(name):
            cfg.add_section(name)

        # Write metadata to node section
        if metadata:
            for key, value in metadata.items():
                cfg.set(name, key, value)

        self.instanceCacheWrite(cfg)

    def instanceCacheSetBulk(self, instance_ids, nodes=None):
        self.getLogger().debug(
            'instanceCacheSetBulk(instance_ids=[%s], nodes=[%s])' %
            (' '.join(instance_ids), ' '.join(
                [node.name for node in nodes or []])))

        cfg = self.instanceCacheRefresh()

        if not nodes:
            if not cfg.has_section('unassigned'):
                cfg.add_section('unassigned')

                instances = set()
            else:
                val = cfg.get('unassigned', 'instances')

                instances = set(val.split(' '))

            instances |= set(instance_ids)

            cfg.set('unassigned', 'instances', ' '.join(instances))

        self.instanceCacheWrite(cfg)

    def instanceCacheGet(self, nodeName):
        self.getLogger().debug('instanceCacheGet(nodeName=[%s])' % (nodeName))

        cfg = self.instanceCacheRefresh()

        if not cfg.has_section(nodeName):
            raise ResourceNotFound(
                'No instance cache entry for [{0}]'.format(nodeName))

        # Read entire section into a dict
        result = {}

        for key, value in cfg.items(nodeName):
            result[key] = value

        return result

    def instanceCacheDelete(self, name):
        # Clear instance from configuration

        config = self.instanceCacheRefresh()

        if not config.has_section(name):
            self.getLogger().debug(
                'Cache clear: node [{0}] not found, no action'
                ' taken'.format(name))

            return

        self.getLogger().debug('Cache clear: node [{0}]'.format(name))

        config.remove_section(name)

        self.instanceCacheWrite(config)

    def instanceCacheUpdate(self, name, added=None, deleted=None):
        """
        'added' is a list of key-value tuples to be added
        'deleted' is a list of keys to be removed from the instance cache
        """

        self.getLogger().debug(
            'instanceCacheUpdate(): name=[{0}]'.format(name))

        config = self.instanceCacheRefresh()

        if not config.has_section(name):
            config.add_section(name)

        for key, value in added or []:
            config.set(name, key, value)

        for key in deleted or []:
            config.remove_option(name, key)

        self.instanceCacheWrite(config)

    def __findNicForProvisioningNetwork(self, nics, prov_network):
        """
        TODO: move this elsewhere

        Raises:
            NicNotFound
        """

        nics = [nic for nic in nics if nic.network == prov_network]

        if not nics:
            raise NicNotFound(
                'Unable to find NIC on provisioning network [%s]' %
                (prov_network.address + '/' + prov_network.netmask))

        return nics[0]

    def writeLocalBootConfiguration(self, node, hardwareprofile,
                                    softwareprofile):
        """
        Raises:
            NicNotFound
        """

        if not hardwareprofile.nics:
            # Hardware profile has no provisioning NICs defined. This
            # shouldn't happen...

            self.getLogger().debug(
                'No provisioning nics defined in hardware profile %s' %
                (hardwareprofile.name))

            return

        # Determine the provisioning nic for the hardware profile
        hwProfileProvisioningNic = hardwareprofile.nics[0]

        nic = None

        if hwProfileProvisioningNic.network:
            # Find the nic attached to the newly added node that is on
            # the same network as the provisioning nic.
            nic = self.__findNicForProvisioningNetwork(
                node.nics, hwProfileProvisioningNic.network)

        if not nic or not nic.mac:
            self.getLogger().warning(
                'MAC address not defined for nic (ip=[%s]) on node [%s]' %
                (nic.ip, node.name))

            return

        # Set up DHCP/PXE for newly addded node
        bhm = getOsObjectFactory().getOsBootHostManager()

        # Write out the PXE file
        bhm.writePXEFile(node,
                         hardwareprofile=hardwareprofile,
                         softwareprofile=softwareprofile,
                         localboot=False)

        # Add a DHCP lease
        bhm.addDhcpLease(node, nic)

    def removeLocalBootConfiguration(self, node):
        bhm = self.osObject.getOsBootHostManager()

        bhm.rmPXEFile(node)
        bhm.removeDhcpLease(node)

    def _pre_add_host(self, name, hwprofilename, swprofilename, ip):         \
            # pylint: disable=unused-argument

        # Perform "pre-add-host" operation
        command = ('sudo %s/pre-add-host'
                   ' --hardware-profile %s'
                   ' --software-profile %s'
                   ' --host-name %s' %
                   (self._cm.getBinDir(), hwprofilename, swprofilename, name))

        if ip:
            command += ' --ip %s' % (ip)

        self.getLogger().debug('calling command= [%s]' % (command))

        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             close_fds=True)

        p.communicate()

        p.wait()

    @property
    def installer_public_hostname(self):
        if self.__installer_public_hostname is None:

            cmd = '/opt/puppetlabs/bin/facter fqdn'

            with open(os.devnull, 'w') as devnull:
                p = subprocess.Popen(cmd,
                                     shell=True,
                                     stdout=subprocess.PIPE,
                                     stderr=devnull)

                stdout, _ = p.communicate()

                retval = p.wait()

            if retval == 0:
                self.__installer_public_hostname = stdout.decode().rstrip()

                self.getLogger().debug('using installerName [%s] from Facter' %
                                       (self.__installer_public_hostname))
            else:
                self.__installer_public_hostname = self._cm.getHost()

                self.getLogger().debug('using installerName [%s] from system' %
                                       (self.__installer_public_hostname))

        return self.__installer_public_hostname

    @property
    def installer_public_ipaddress(self):
        # Get installer IP
        if self.__installer_public_ipaddress is None:
            self.getLogger().debug('Looking up installer IP using DNS')

            aiInfo = socket.getaddrinfo(self.installer_public_hostname, None,
                                        socket.AF_INET, socket.SOCK_STREAM)

            self.__installer_public_ipaddress = aiInfo[0][4][0]

        return self.__installer_public_ipaddress

    @property
    def private_dns_zone(self):
        if self.__private_dns_zone is None:
            self.__private_dns_zone = \
                ParameterApi().getParameter('DNSZone').getValue()

        return self.__private_dns_zone

    def get_node_vcpus(self, name):         \
            # pylint: disable=unused-argument

        return 1

    def get_instance_size_mapping(self, value):
        """
        Helper method for matching the first field (instance size) in
        the resource adapter specific CSV file

        :return: instance type/size to vcpus mapping
        :returntype int:
        """

        fn = os.path.join(
            self._cm.getKitConfigBase(),
            '{0}-instance-sizes.csv'.format(self.__adaptername__))

        if not os.path.exists(fn):
            return 1

        try:
            with open(fn) as fp:
                reader = csv.reader(fp)
                for row in reader:
                    if row[0] == value:
                        return int(row[1])

            return 1
        except Exception as exc:
            self.getLogger().error(
                'Error processing instance type mapping'
                ' [{0}] (exc=[{1}]). Using default value'.format(fn, exc))

            return 1