コード例 #1
0
class SyncManager(TortugaObjectManager, Singleton):
    """Singleton class for cluster sync management"""

    # Singleton.
    __instanceLock = threading.RLock()

    # update delay increase (seconds)
    CLUSTER_UPDATE_DELAY_INCREASE = 30

    # after this limit is reached, warning will be logged
    CLUSTER_UPDATE_WARNING_LIMIT = 10

    def __init__(self):
        super(SyncManager, self).__init__()

        self._isUpdateScheduled = False
        self._isUpdateRunning = False
        self._sudoCmd = \
            osUtility.getOsObjectFactory().getOsSysManager().getSudoCommand()
        self._cm = ConfigManager()

    def __runClusterUpdate(self):
        """ Run cluster update. """
        self.getLogger().debug('Update timer running')

        updateCmd = '%s %s' % (self._sudoCmd,
                               os.path.join(self._cm.getRoot(),
                                            'bin/run_cluster_update.sh'))

        delay = 0
        updateCnt = 0
        while self.__resetIsUpdateScheduled():
            self._isUpdateRunning = True

            self.getLogger().debug('New cluster update delay: %s seconds' %
                                   (delay))

            time.sleep(delay)
            delay += SyncManager.CLUSTER_UPDATE_DELAY_INCREASE

            # Log warning if timer has been running for too many times.
            updateCnt += 1
            self.getLogger().debug('Cluster update timer count: %s' %
                                   (updateCnt))

            if updateCnt > SyncManager.CLUSTER_UPDATE_WARNING_LIMIT:
                self.getLogger().warn(
                    'Cluster updated more than %s times using the same'
                    ' timer (possible configuration problem)' %
                    (SyncManager.CLUSTER_UPDATE_WARNING_LIMIT))

            self.getLogger().debug('Starting cluster update using: %s' %
                                   (updateCmd))

            # Since we might sleep for a while, we need to
            # reset update flag just before we run update to avoid
            # unnecessary syncs.

            self.__resetIsUpdateScheduled()

            p = TortugaSubprocess(updateCmd)

            try:
                p.run()

                self.getLogger().debug('Cluster update successful')
            except CommandFailed:
                if p.getExitStatus() == tortugaStatus.\
                        TORTUGA_ANOTHER_INSTANCE_OWNS_LOCK_ERROR:
                    self.getLogger().debug(
                        'Another cluster update is already running, will'
                        ' try to reschedule it')

                    self._isUpdateRunning = False

                    self.scheduleClusterUpdate(
                        updateReason='another update already running',
                        delay=60)

                    break
                else:
                    self.getLogger().error(
                        'Update command "%s" failed (exit status: %s):'
                        ' %s' % (updateCmd, p.getExitStatus(), p.getStdErr()))

            self.getLogger().debug('Done with cluster update')

        self._isUpdateRunning = False

        self.getLogger().debug('Update timer exiting')

    def __resetIsUpdateScheduled(self):
        """ Reset cluster update flag, return old flag value. """
        SyncManager.__instanceLock.acquire()
        try:
            flag = self._isUpdateScheduled
            self._isUpdateScheduled = False
            return flag
        finally:
            SyncManager.__instanceLock.release()

    def scheduleClusterUpdate(self, updateReason=None, delay=5):
        """ Schedule cluster update. """
        SyncManager.__instanceLock.acquire()
        try:
            if self._isUpdateScheduled:
                # Already scheduled.
                return

            # Start update timer if needed.
            self._isUpdateScheduled = True
            if not self._isUpdateRunning:
                self.getLogger().debug(
                    'Scheduling cluster update in %s seconds,'
                    ' reason: %s' % (delay, updateReason))

                t = threading.Timer(delay, self.__runClusterUpdate)

                t.start()
            else:
                self.getLogger().debug(
                    'Will not schedule new update timer while the old'
                    ' timer is running')
        finally:
            SyncManager.__instanceLock.release()

    def getUpdateStatus(self):  # pylint: disable=no-self-use
        """ Check cluster update flag. """
        return RunManager().checkLock('cfmsync')
コード例 #2
0
class KitInstallerBase(ConfigurableMixin, metaclass=KitInstallerMeta):
    """
    Base class for kit installers.

    """
    config_type = 'kit'

    #
    # The kit installation directory
    #
    install_path = None

    #
    # Metadata, loaded via the load_meta class method.
    #
    name = None
    version = None
    iteration = None
    spec = (None, None, None)
    meta = {}

    #
    # Attributes, provided by instances of this class
    #
    puppet_modules = []

    def __init__(self):
        self.config_manager = ConfigManager()

        #
        # Setup paths
        #
        self.kit_path = os.path.dirname(inspect.getfile(self.__class__))
        self.puppet_modules_path = os.path.join(self.kit_path,
                                                'puppet_modules')
        self.files_path = os.path.join(self.kit_path, 'files')

        #
        # Initialize configuration
        #
        super().__init__()

        #
        # Load components and resource adapters
        #
        self._component_installers = {}
        self._component_installers_loaded = False

        #
        # Web service controller classes
        #
        self._ws_controller_classes = []
        self._ws_controller_classes_loaded = False

    def get_config_base(self):
        return self.config_manager.getKitConfigBase()

    @classmethod
    def load_meta(cls, meta_dict):
        """
        Loads the meta data for the kit into the class.

        :param meta_dict: A dict containing the metadata, as specified by
                          the KitMetadataSchema class.

        """
        errors = KitMetadataSchema().validate(meta_dict)
        if errors:
            raise Exception('Kit metadata validation error: {}'.format(errors))
        meta_dict = copy.deepcopy(meta_dict)
        cls.name = meta_dict.pop('name')
        cls.version = meta_dict.pop('version')
        cls.iteration = meta_dict.pop('iteration')
        cls.spec = (cls.name, cls.version, cls.iteration)
        cls.meta = meta_dict

    def _load_component_installers(self):
        """
        Load component installers for this kit.

        """
        if self._component_installers_loaded:
            return

        kit_pkg_name = inspect.getmodule(self).__package__
        comp_pkg_name = '{}.components'.format(kit_pkg_name)
        logger.debug(
            'Searching for component installers in package: {}'.format(
                comp_pkg_name))

        #
        # Look for the components sub-package
        #
        try:
            comp_pkg = importlib.import_module(comp_pkg_name)
        except ModuleNotFoundError:
            logger.warning('No component installers found for kit: {}'.format(
                kit_pkg_name))
            return

        #
        # Walk the components sub-package, looking for component installers
        #
        for loader, name, ispkg in pkgutil.walk_packages(comp_pkg.__path__):
            if not ispkg:
                continue

            full_pkg_path = '{}.{}'.format(comp_pkg_name, name)
            try:
                #
                # Look for the component module in the package
                #
                comp_inst_mod = importlib.import_module(
                    '{}.component'.format(full_pkg_path))

                #
                # Look for the ComponentInstaller class in the module
                #
                if not hasattr(comp_inst_mod, 'ComponentInstaller'):
                    logger.warning(
                        'ComponentInstaller class not found: {}'.format(
                            full_pkg_path))

                #
                # Initialize the ComponentInstaller class and register
                # it with the KitInstaller
                #
                comp_inst_class = comp_inst_mod.ComponentInstaller
                comp_inst = comp_inst_class(self)
                self._component_installers[comp_inst_class.name] = \
                    comp_inst
                logger.debug('Component installer registered: {}'.format(
                    comp_inst.spec))

            except ModuleNotFoundError:
                logger.debug(
                    'Package not a component: {}'.format(full_pkg_path))

            self._component_installers_loaded = True

    def is_installable(self):
        """
        Determines whether or not this kit is installable under the given
        conditions/circumstances. Override this in your implementations as
        necessary.

        :return: True if it is installable, False otherwise.

        """
        return True

    def run_action(self, action_name, *args, **kwargs):
        """
        Runs the specified action.

        :param action_name: the name of the action to run

        """
        try:
            logger.debug('Calling kit action: {} with arguments {}, {}'.format(
                action_name, args, kwargs))
            action = getattr(self, 'action_{}'.format(action_name))
            return action(*args, **kwargs)
        except KeyError:
            raise Exception('Unknown action: {}'.format(action_name))

    def get_kit(self):
        """
        Gets the Kit instance for this kit.

        :return: a Kit instance

        """
        kit = Kit(name=self.name,
                  version=self.version,
                  iteration=self.iteration)
        kit.setDescription(self.meta.get('description', None))
        for component_installer in self.get_all_component_installers():
            kit.addComponent(component_installer.get_component())
        return kit

    def get_eula(self):
        """
        Gets the EULA for this kit, if it exists.

        :return: a Eula instance if there is a EULA file, otherwise None.

        """
        eula = None
        eula_path = os.path.join(self.install_path, EULA_FILE)
        if os.path.exists(eula_path) and os.path.isfile(eula_path):
            eula_fp = open(eula_path)
            text = eula_fp.read()
            eula_fp.close()
            eula = Eula(text=text)
        else:
            logger.debug('EULA not found: {}'.format(eula_path))
        return eula

    def get_component_installer(self, component_name):
        self._load_component_installers()
        return self._component_installers[component_name]

    def get_all_component_installers(self):
        self._load_component_installers()
        return [ci for ci in self._component_installers.values()]

    def register_database_table_mappers(self):
        """
        Register database table mappers for this kit.

        """
        kit_pkg_name = inspect.getmodule(self).__package__
        db_table_pkg_name = '{}.db.tables'.format(kit_pkg_name)
        logger.debug(
            'Searching for database table mappers in package: {}'.format(
                db_table_pkg_name))
        try:
            importlib.import_module(db_table_pkg_name)
        except ModuleNotFoundError:
            logger.debug('No database table mappers found for kit: {}'.format(
                self.spec))

    def register_web_service_controllers(self):
        """
        Register web service controllers for this kit.

        """
        kit_pkg_name = inspect.getmodule(self).__package__
        ws_pkg_name = '{}.web_service.controllers'.format(kit_pkg_name)
        logger.debug(
            'Searching for web service controllers in package: {}'.format(
                ws_pkg_name))
        try:
            importlib.import_module(ws_pkg_name)
        except ModuleNotFoundError:
            logger.debug('No web service controllers found for kit: {}'.format(
                self.spec))

    def register_web_service_worker_actions(self):
        """
        Register web service worker actions for this kit.

        """
        kit_pkg_name = inspect.getmodule(self).__package__
        ws_pkg_name = '{}.web_service.worker'.format(kit_pkg_name)
        logger.debug(
            'Searching for web service worker actions in package: {}'.format(
                ws_pkg_name))
        try:
            importlib.import_module(ws_pkg_name)
        except ModuleNotFoundError:
            logger.debug(
                'No web service worker actions found for kit: {}'.format(
                    self.spec))

    def action_install_puppet_modules(self, *args, **kwargs):
        #
        # Prevent circular import
        #
        from .actions import InstallPuppetModulesAction
        return InstallPuppetModulesAction(self)(*args, **kwargs)

    def action_pre_install(self):
        pass

    def action_pre_uninstall(self):
        pass

    def action_post_install(self):
        #
        # Install required python packages from requirements.txt
        #
        requirements_path = os.path.join(self.kit_path, 'requirements.txt')
        pip_install_requirements(self, requirements_path)

    def action_post_uninstall(self):
        pass

    def action_uninstall_puppet_modules(self, *args, **kwargs):
        #
        # Prevent circular import
        #
        from .actions import UninstallPuppetModulesAction
        return UninstallPuppetModulesAction(self)(*args, **kwargs)
コード例 #3
0
def main(verbose, debug, no_autodetect, ignore_iam, unattended, region,
         profile):
    ec2_metadata = get_ec2_metadata()

    print('Configuring Tortuga AWS resource adapter')

    if unattended:
        ignore_iam = False
        verbose = True

    if not region:
        region = ec2_metadata['placement']['availability-zone'][:-1] \
            if ec2_metadata and not no_autodetect else None

        if verbose and region:
            print_statement('Region [{0}] obtained from EC2 instance metadata',
                            region)

    if region:
        print_statement('Detected AWS region: [{0}]', region)
    else:
        if not no_autodetect:
            error_message('Error: unable to determine current AWS region')

            if unattended:
                sys.exit(1)

        response = input(colorama.Style.BRIGHT +
                         'AWS region [{}]: '.format(DEFAULT_AWS_REGION) +
                         colorama.Style.RESET_ALL)

        if not response:
            region = DEFAULT_AWS_REGION
        else:
            region = response
            try:
                # validate region
                session = boto3.session.Session()
                if region not in session.get_available_regions('ec2'):
                    error_message('Error: invalid AWS region [{0}]', region)

                    sys.exit(1)
            except botocore.exceptions.EndpointConnectionError:
                error_message(
                    'Error connecting to EC2 endpoint (invalid region?)')

                sys.exit(1)

    creds = False
    iam_profile_name = None

    if not ignore_iam:
        # if IAM profile is not in use, query access/secret keys
        client = boto3.client('sts')

        response = None

        print(colorama.Fore.GREEN + colorama.Style.BRIGHT +
              'Checking for IAM profile...' + colorama.Style.RESET_ALL,
              end=' ')

        try:
            response = client.get_caller_identity()

            iam_arn = response['Arn']

            iam_user_policy = iam_arn.split(':')[5]

            assumed_role = False
            if iam_user_policy.startswith('assumed-role/'):
                _, iam_profile_name, _ = iam_user_policy.split('/', 2)
                assumed_role = True
            else:
                _, iam_profile_name = iam_user_policy.split('/', 1)

            print(iam_profile_name)

            if not assumed_role:
                print(colorama.Style.BRIGHT + colorama.Fore.YELLOW + '*' +
                      colorama.Style.RESET_ALL + format_string_with_arg(
                          ' Ensure IAM profile [{0}] is used to launch'
                          ' Grid Engine/Tortuga instance', iam_profile_name))

            creds = True
        except botocore.exceptions.NoCredentialsError:
            print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + 'not found' +
                  colorama.Style.RESET_ALL)
        except Exception as exc:  # noqa pylint: disable=broad-except
            print(colorama.Style.DIM +
                  '[debug] Error querying IAM profile name:'
                  ' {0}'.format(exc) + colorama.Style.RESET_ALL)
    else:
        if debug:
            print(colorama.Style.DIM +
                  '[debug] Ignoring IAM profile (--ignore-iam argument'
                  ' specified)' + colorama.Style.RESET_ALL)

    access_key = None
    secret_key = None

    if not creds:
        if unattended:
            # cannot query for credentials in unattended mode
            print('Unable to query user for credentials in unattended mode')
            print('Exiting.')

            sys.exit(1)

        if os.getenv('AWS_ACCESS_KEY') and os.getenv('AWS_SECRET_ACCESS_KEY'):
            print(colorama.Fore.GREEN + colorama.Style.BRIGHT +
                  'Using AWS_ACCESS_KEY/AWS_SECRET_ACCESS_KEY from'
                  ' environment' + colorama.Style.RESET_ALL)

            # use envvars for credentials
            access_key = os.getenv('AWS_ACCESS_KEY')

            secret_key = os.getenv('AWS_SECRET_ACCESS_KEY')

        if not access_key:
            print(
                format_string_with_arg(
                    'IAM profile not detected. Using'
                    ' AWS access and secret access keys.',
                    forecolour=colorama.Fore.YELLOW))

            access_key = input(colorama.Style.BRIGHT + 'AWS access key: ' +
                               colorama.Style.RESET_ALL)
            if not access_key:
                print('Aborted by user.')
                sys.exit(1)

            secret_key = getpass.getpass(colorama.Style.BRIGHT +
                                         'AWS secret key: ' +
                                         colorama.Style.RESET_ALL)
            if not secret_key:
                print('Aborted by user.')
                sys.exit(1)

        # validate AWS credentials
        print('Validating AWS access credentials...', end=' ')

        ec2 = boto3.client('ec2',
                           region_name=region,
                           aws_access_key_id=access_key,
                           aws_secret_access_key=secret_key)

        try:
            if debug:
                print()
                print(colorama.Style.DIM +
                      '[debug] Calling \'describe_images()\' to '
                      'validate credentials' + colorama.Style.RESET_ALL)

            ec2.describe_images(Owners=['self'])

            print(colorama.Fore.GREEN + colorama.Style.BRIGHT + 'ok.' +
                  colorama.Style.RESET_ALL)
        except botocore.exceptions.ClientError as exc:
            print(colorama.Fore.RED + colorama.Style.BRIGHT + 'failed.' +
                  colorama.Style.RESET_ALL)
            errmsg = 'Error validating provided access credentials'
            if exc.response['Error']['Code'] != 'AuthFailure':
                error_message(errmsg + ': {0}', exc)
            else:
                error_message(errmsg)

            sys.exit(1)
    else:
        # using available IAM profile
        ec2 = boto3.client('ec2', region_name=region)

    # Write/update "~/.aws/credentials"
    update_aws_credentials(region, access_key, secret_key)

    # keypair

    keypair = None

    try:
        # Extract keypair name from ssh public key metadata
        for _, key_values in ec2_metadata['public-keys'].items():
            keypair = key_values['openssh-key'].split(' ')[-1]

            break
    except Exception:  # noqa pylint: disable=broad-except
        pass

    while not keypair:
        response = input('Keypair [? for list]: ')
        if not response:
            break

        if response.startswith('?'):
            result = ec2.describe_key_pairs()

            for tmp_keypair in result['KeyPairs']:
                print('    ' + colorama.Fore.YELLOW + colorama.Style.BRIGHT +
                      tmp_keypair['KeyName'] + colorama.Style.RESET_ALL +
                      colorama.Style.DIM + ' (' +
                      tmp_keypair['KeyFingerprint'] + ')' +
                      colorama.Style.RESET_ALL)

            continue

        # validate keypair
        try:
            ec2.describe_key_pairs(KeyNames=[response])

            keypair = response

            break
        except botocore.exceptions.ClientError as exc:
            if exc.response['Error']['Code'] == 'InvalidKeyPair.NotFound':
                print('Keypair invalid or inaccessible.')
                continue

            print('{0}'.format(exc.message))

            continue

    if not keypair:
        print('Aborted by user.')
        sys.exit(1)

    # get values from EC2 metadata
    subnet_id = None
    group_id = None
    vpc_id = None

    if ec2_metadata:
        for _, values in \
                ec2_metadata['network']['interfaces']['macs'].items():
            subnet_id = values['subnet-id']
            group_id = values['security-group-ids']
            vpc_id = values['vpc-id']

            print(colorama.Style.BRIGHT + colorama.Fore.GREEN +
                  'Detected subnet [' + colorama.Style.RESET_ALL + subnet_id +
                  colorama.Style.BRIGHT + colorama.Fore.GREEN + '] (VPC [' +
                  colorama.Style.RESET_ALL + vpc_id + colorama.Style.BRIGHT +
                  colorama.Fore.GREEN + '])')

            print_statement('Detected security group [{}]', group_id)

            break

    # subnet_id
    if not subnet_id:
        subnets = ec2.describe_subnets()

        while not subnet_id:
            response = input('Subnet ID [? for list]: ')
            if not response:
                continue

            if response.startswith('?'):
                for subnet in subnets['Subnets']:
                    name = get_resource_name_from_tag(subnet)

                    buf = colorama.Fore.YELLOW + colorama.Style.BRIGHT + \
                        subnet['SubnetId'] + colorama.Style.RESET_ALL + \
                        colorama.Style.DIM

                    if name:
                        buf += ' ' + name

                    buf += ' ({0}) (VPC ID: {1})'.format(subnet['CidrBlock'],
                                                         subnet['VpcId']) + \
                        colorama.Style.RESET_ALL

                    print('    ' + buf)

                continue

            for subnet in subnets['Subnets']:
                if subnet['SubnetId'] == response:
                    subnet_id = response
                    vpc_id = subnet['VpcId']
                    break
            else:
                # print('Error: invalid subnet ID')
                error_message('Error: invalid subnet ID')

                continue

            break

    # security group(s)
    while not group_id:
        response = input('Security group ID (? for list): ')
        if not response:
            break

        if response.startswith('?'):
            result = ec2.describe_security_groups(Filters=[{
                'Name': 'vpc-id',
                'Values': [vpc_id]
            }])

            for secgroup in result['SecurityGroups']:
                print('    ' + colorama.Fore.YELLOW + colorama.Style.BRIGHT +
                      secgroup['GroupId'] + ' ' + colorama.Fore.WHITE +
                      colorama.Style.DIM + secgroup['GroupName'] +
                      colorama.Style.RESET_ALL)

            continue

        try:
            result = ec2.describe_security_groups(GroupIds=[response],
                                                  Filters=[{
                                                      'Name': 'vpc-id',
                                                      'Values': [vpc_id]
                                                  }])

            if result['SecurityGroups']:
                group_id = result['SecurityGroups'][0]['GroupId']
                break

        except botocore.exceptions.ClientError:
            pass

        print('Invalid security group ID')

    if not group_id:
        print('Aborted by user.')
        sys.exit(1)

    ami_id = ec2_metadata['ami-id'] if ec2_metadata else None
    while not ami_id:
        response = input('UGE/Tortuga AMI ID: ')
        if not response:
            print('Aborted by user.')

            sys.exit(1)

        try:
            result = ec2.describe_images(ImageIds=[response])
            if result['Images']:
                ami_id = response

                break
        except botocore.exceptions.ClientError as exc:
            if exc.response['Error']['Code'] == 'InvalidAMIID.Malformed':
                print('Malformed AMI ID')

                continue
            elif exc.response['Error']['Code'] == 'InvalidAMIID.NotFound':
                print('AMI not found or inaccessible')

                continue

            print('Invalid AMI ID: {0}'.format(exc.message))

            continue

        break

    # query default compute instance type
    for instance_type in (DEFAULT_INSTANCE_TYPE, 'm4.large', 'm3.large'):
        if instance_type != DEFAULT_INSTANCE_TYPE:
            print(colorama.Style.BRIGHT + colorama.Fore.YELLOW +
                  'Falling back to [' + colorama.Style.RESET_ALL +
                  instance_type + colorama.Style.BRIGHT +
                  colorama.Fore.YELLOW + ']...')

            sys.stdout.flush()

        print(colorama.Style.BRIGHT + colorama.Fore.GREEN +
              'Attempting to validate instance type [' +
              colorama.Style.RESET_ALL + instance_type +
              colorama.Style.BRIGHT + colorama.Fore.GREEN + ']... ' +
              colorama.Style.RESET_ALL,
              end='')

        sys.stdout.flush()

        # validate user-provided instance type
        result = validate_instance_type(ec2,
                                        instance_type,
                                        ami_id,
                                        subnet_id,
                                        debug=debug)
        if result:
            print(colorama.Style.BRIGHT + colorama.Fore.GREEN + 'done.' +
                  colorama.Style.RESET_ALL)
            break

        print(colorama.Style.BRIGHT + colorama.Fore.RED + 'failed.' +
              colorama.Style.RESET_ALL)
    else:
        # unable to determine valid instance type
        error_message('\nUnable to determine valid instance type')

        sys.exit(1)

    # determine which bootstrap/cloud-init script template to use
    cloud_init_script_template = None
    user_data_script_template = None

    aws_adapter_cfg = os.path.join(ConfigManager().getKitConfigBase(), 'aws',
                                   'adapter.ini')

    tags = ''

    if os.path.exists(aws_adapter_cfg):
        cfg = configparser.ConfigParser()
        cfg.read(aws_adapter_cfg)
        if cfg.has_section('aws'):
            if cfg.has_option('aws', 'cloud_init_script_template'):
                cloud_init_script_template = cfg.get(
                    'aws', 'cloud_init_script_template')
            elif cfg.has_option('aws', 'user_data_script_template'):
                user_data_script_template = cfg.get(
                    'aws', 'user_data_script_template')

            if cfg.has_option('aws', 'tags'):
                tags = cfg.get('aws', 'tags')

    if not user_data_script_template and not cloud_init_script_template:
        user_data_script_template = 'bootstrap.tmpl'

    adapter_cfg = {
        'associate_public_ip_address': 'true',
    }

    if user_data_script_template:
        adapter_cfg['user_data_script_template'] = user_data_script_template
    elif cloud_init_script_template:
        adapter_cfg['cloud_init_script_template'] = cloud_init_script_template

    if access_key and secret_key:
        adapter_cfg['awsaccesskey'] = access_key
        adapter_cfg['awssecretkey'] = secret_key

    # parse tags to determine if 'Name' has been defined
    tag_parser = settings.TagListSetting()
    parsed_tags = tag_parser.dump(tags)
    if 'Name' not in parsed_tags.keys():
        parsed_tags['Name'] = 'Tortuga compute node'
        parsed_tag_list = []
        for k, v in parsed_tags.items():
            parsed_tag_list.append('{}={}'.format(k, v))
        tags = ','.join(parsed_tags)

    override_adapter_cfg = {
        'keypair': keypair,
        'ami': ami_id,
        'instancetype': instance_type,
        'securitygroup': ','.join(group_id.split('\n')),
        'subnet_id': subnet_id,
        'tags': tags,
        'region': region,
    }

    adapter_cfg.update(override_adapter_cfg)

    _update_resource_adapter_configuration(adapter_cfg, profile)

    print_statement('Resource adapter configuration completed successfully.')
コード例 #4
0
ファイル: listen.py プロジェクト: tprestegard/tortuga
class WebsocketClient:
    """
    Websocket client class.

    """
    def __init__(self,
                 token: Optional[str] = None,
                 username: Optional[str] = None,
                 password: Optional[str] = None,
                 url: Optional[str] = None,
                 verify: bool = True):
        self._token = token
        self._username = username
        self._password = password
        self._url = url
        self._verify = verify
        self._websocket = None
        self._cm = ConfigManager()

    async def start(self):
        """
        Initializes the websocket and starts the event loop.

        """
        if self._url.startswith('wss:'):
            ssl_context = ssl.SSLContext()
            if self._verify:
                ssl_context.load_verify_locations(self._cm.getCaBundle())
            else:
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
        else:
            ssl_context = None

        async with websockets.connect(self._url, ssl=ssl_context) as ws:
            await self.send_recieve(ws)

    async def send_recieve(self, ws: websockets.WebSocketClientProtocol):
        """
        The main loop that sends/receives data.

        :param ws: the web socket client

        """
        while True:
            msg = await ws.recv()

            data = json.loads(msg)
            pretty_print(data)

            if data['type'] == 'message':
                if data['name'] == 'authentication-required':
                    await self.send_auth(ws)

                if data['name'] == 'authentication-succeeded':
                    await self.send_subscribe(ws)

    async def send_auth(self, ws: websockets.WebSocketClientProtocol):
        """
        Sends an authentication request.

        :param ws: the web socket client

        """
        if self._token:
            data = {
                'action': 'authenticate',
                'method': 'jwt',
                'data': {
                    'token': self._token
                }
            }
        else:
            data = {
                'action': 'authenticate',
                'method': 'password',
                'data': {
                    'username': self._username,
                    'password': self._password
                }
            }

        await ws.send(json.dumps(data))

    async def send_subscribe(self, ws: websockets.WebSocketClientProtocol):
        """
        Sends a subscription request.

        :param ws: the web socket client

        """
        data = {'action': 'subscribe'}

        await ws.send(json.dumps(data))
コード例 #5
0
class SoftwareProfileManager(TortugaObjectManager):     \
        # pylint: disable=too-many-public-methods

    BASE_KIT_NAME = 'base'

    def __init__(self):
        super(SoftwareProfileManager, self).__init__()
        self._sp_db_api = SoftwareProfileDbApi()
        self._node_db_api = NodeDbApi()
        self._component_db_api = ComponentDbApi()
        self._global_param_db_api = GlobalParameterDbApi()
        self._kit_db_api = KitDbApi()
        self._config_manager = ConfigManager()
        self._logger = logging.getLogger(SOFTWARE_PROFILE_NAMESPACE)

    def getSoftwareProfileList(self, session: Session, tags=None):
        """Return all of the softwareprofiles with referenced components
        in this softwareprofile
        """

        results = self._sp_db_api.getSoftwareProfileList(session, tags=tags)

        for software_profile_obj in results:
            # load any available software profile metadata
            software_profile_obj.setMetadata(
                self.get_software_profile_metadata(
                    session, software_profile_obj.getName()))

        return results

    def addAdmin(self, session: Session, softwareProfileName, adminUsername):
        """
        Add an admin as an authorized user.

            Returns:
                None
            Throws:
                TortugaException
                AdminNotFound
                SoftwareProfileNotFound
        """
        return self._sp_db_api.addAdmin(session, softwareProfileName,
                                        adminUsername)

    def deleteAdmin(self, session: Session, softwareProfileName,
                    adminUsername):
        """
        Remove an admin as an authorized user.

            Returns:
                None
            Throws:
                TortugaException
                AdminNotFound
                SoftwareProfileNotFound
        """
        return self._sp_db_api.deleteAdmin(session, softwareProfileName,
                                           adminUsername)

    def updateSoftwareProfile(self, session: Session, softwareProfileObject):
        self._logger.debug('Updating software profile: %s' %
                           (softwareProfileObject.getName()))
        #
        # First get the object from the db we are updating
        #
        existing_swp = self.getSoftwareProfileById(
            session, softwareProfileObject.getId())
        #
        # Set parameters that we will not allow updating
        #
        softwareProfileObject.setOsInfo(existing_swp.getOsInfo())
        softwareProfileObject.setOsId(existing_swp.getOsId())
        softwareProfileObject.setType(existing_swp.getType())
        #
        # Do the DB update
        #
        self._sp_db_api.updateSoftwareProfile(session, softwareProfileObject)
        #
        # Get the new version
        #
        new_swp = self.getSoftwareProfileById(session,
                                              softwareProfileObject.getId())
        #
        # If the tags have changed, fire the tags changed event
        #
        if existing_swp.getTags() != new_swp.getTags():
            SoftwareProfileTagsChanged.fire(
                softwareprofile_id=str(new_swp.getId()),
                softwareprofile_name=new_swp.getName(),
                tags=new_swp.getTags(),
                previous_tags=existing_swp.getTags())

    def getSoftwareProfile(
            self,
            session: Session,
            name: str,
            optionDict: Optional[Dict[str, bool]] = None) -> SoftwareProfile:
        """
        Retrieve software profile by name

        """
        software_profile_obj: SoftwareProfile = \
            self._sp_db_api.getSoftwareProfile(
                session, name, optionDict=optionDict)

        # load any available software profile metadata
        software_profile_obj.setMetadata(
            self.get_software_profile_metadata(session, name))

        return software_profile_obj

    def getSoftwareProfileById(
            self,
            session: Session,
            id_: int,
            optionDict: Optional[Dict[str, bool]] = None) -> SoftwareProfile:
        """
        Retrieve software profile by id

        """
        software_profile_obj: SoftwareProfile = \
            self._sp_db_api.getSoftwareProfileById(
                session, id_, optionDict=optionDict)

        # load any available software profile metadata
        software_profile_obj.setMetadata(
            self.get_software_profile_metadata(session,
                                               software_profile_obj.getName()))

        return software_profile_obj

    def _getCoreComponentForOsInfo(self, session: Session, osInfo):
        # Find core component

        baseKit = None

        for baseKit in self._kit_db_api.getKitList(session):
            if not baseKit.getName() == self.BASE_KIT_NAME:
                continue

            break
        else:
            raise KitNotFound('Kit [%s] not found.' % (self.BASE_KIT_NAME))

        baseComp = None

        for baseComp in baseKit.getComponentList():
            if baseComp.getName() != 'core':
                continue

            break
        else:
            raise ComponentNotFound('Component [%s] not found in kit [%s]' %
                                    ('core', baseKit.getName()))

        comp = self._component_db_api.getBestMatchComponent(
            session, baseComp.getName(), baseComp.getVersion(), osInfo,
            baseKit.getId())

        comp.setKit(baseKit)

        return comp

    def _getOsInfo(self, session: Session, bOsMediaRequired: bool):
        if not bOsMediaRequired:
            # As a placeholder, use the same OS as the installer

            # Find installer node entry
            node = self._node_db_api.getNode(session,
                                             ConfigManager().getInstaller(),
                                             {'softwareprofile': True})

            return node.getSoftwareProfile().getOsInfo()

        # Use available operating system kit; raise exception if
        # multiple available

        os_kits = self._kit_db_api.getKitList(session, os_kits_only=True)
        if not os_kits:
            raise KitNotFound('No operating system kit installed')

        if len(os_kits) > 1:
            raise KitNotFound(
                'Multiple OS kits defined; use --os option to specify'
                ' operating system')

        kit = self._kit_db_api.getKit(session, os_kits[0].getName(),
                                      os_kits[0].getVersion(), '0')

        components = kit.getComponentList()

        if not components:
            raise ComponentNotFound('Malformed operating system kit [%s]' %
                                    (os_kits))

        osinfo_list = components[0].getOsInfoList()
        if len(osinfo_list) > 1:
            raise ComponentNotFound(
                'Multiple operating system components for kit [%s];'
                ' use --os argument to specify operating system' %
                (os_kits[0]))

        return osinfo_list[0]

    def createSoftwareProfile(self,
                              session: Session,
                              swProfileSpec,
                              settingsDict=None):
        """
        Exceptions:
            ConfigurationError
            NetworkNotFound
            ComponentNotFound
            KitNotFound
            OSError
        """

        if settingsDict == None:
            settingsDict = {}

        bOsMediaRequired = settingsDict.get('bOsMediaRequired', True)
        unmanagedProfile = settingsDict.get('unmanagedProfile', False)

        # Validate software profile name
        validation.validateProfileName(swProfileSpec.getName())

        # Insert default description for software profile
        if swProfileSpec.getDescription() is None:
            swProfileSpec.setDescription('%s Nodes' %
                                         (swProfileSpec.getName()))

        self._logger.debug('Creating software profile [%s]' % (swProfileSpec))

        osInfo = swProfileSpec.getOsInfo() \
            if swProfileSpec.getOsInfo() else self._getOsInfo(
                session, bOsMediaRequired)

        # If we're creating an unmanaged software profile (no
        # DHCP/PXE/kickstart/OS) just create it now and we're done
        if unmanagedProfile:
            self._sp_db_api.addSoftwareProfile(session, swProfileSpec)

        else:
            if bOsMediaRequired and swProfileSpec.getOsInfo():
                try:
                    self._kit_db_api.getKit(
                        session,
                        swProfileSpec.getOsInfo().getName(),
                        swProfileSpec.getOsInfo().getVersion(), '0')
                except KitNotFound:
                    self._logger.error('OS kit for [%s] not found' %
                                       (swProfileSpec.getOsInfo()))

                    raise
            else:
                swProfileSpec.setOsInfo(osInfo)

            # Get component manager for appropriate OS family
            osConfig = osHelper.getOsInfo(osInfo.getName(),
                                          osInfo.getVersion(),
                                          osInfo.getArch())

            osObjFactory = osUtility.getOsObjectFactory(
                osConfig.getOsFamilyInfo().getName())

            # Need to be fancy with components
            spComponents = swProfileSpec.getComponents()
            swProfileSpec.setComponents(TortugaObjectList())

            bFoundOsComponent = False
            bFoundCoreComponent = False
            components = []

            # Iterate over components, adding them to the software profile
            for c in spComponents:
                cobj = self._component_db_api.getBestMatchComponent(
                    session, c.getName(), c.getVersion(), osInfo,
                    c.getKit().getId())

                k = cobj.getKit()

                if k.getIsOs():
                    # This component is a member of the OS kit, set the flag
                    bFoundOsComponent = True
                elif k.getName() == 'base' and c.getName() == 'core':
                    # Found the 'core' component in 'base' kit
                    bFoundCoreComponent = True

                components.append(cobj)

            # If the operating system is undefined for this software
            # profile, use the same OS as the installer.
            if bOsMediaRequired and not bFoundOsComponent:
                # Find OS component
                osCompName = '%s-%s-%s' % (
                    osInfo.getName(), osInfo.getVersion(), osInfo.getArch())

                self._logger.debug('Automatically adding OS component [%s]'
                                   ' (not specified in template)' %
                                   (osCompName))

                try:
                    osComponent = self._component_db_api.getComponent(
                        session, osCompName, osInfo.getVersion(), osInfo,
                        {'kit': True})

                    components.append(osComponent)
                except ComponentNotFound:
                    # Cannot find OS component, don't freak out
                    pass

            # Ensure 'core' component is enabled
            if not bFoundCoreComponent:
                # Attempt to automatically add the core component, only
                # if one exists for this OS

                try:
                    comp = self._getCoreComponentForOsInfo(session, osInfo)

                    self._logger.debug('Automatically adding [core] component'
                                       ' (not specified in template)')

                    components.append(comp)
                except ComponentNotFound:
                    self._logger.warning(
                        'OS [{}] does not have a compatible \'core\''
                        ' component'.format(osInfo))

                # Initialize values for kernel, kernelParams, and initrd
                if not swProfileSpec.getKernel():
                    swProfileSpec.setKernel(
                        osObjFactory.getOsSysManager().getKernel(osInfo))

                if not swProfileSpec.getInitrd():
                    swProfileSpec.setInitrd(
                        osObjFactory.getOsSysManager().getInitrd(osInfo))

            # Add the software profile
            self._sp_db_api.addSoftwareProfile(session, swProfileSpec)

            # Enable components in one fell swoop
            for comp in components:
                self._logger.debug('Enabling component [%s]' %
                                   (comp.getName()))

                if comp.getKit().getIsOs():
                    # Don't use enableComponent() on OS kit
                    self._component_db_api.addComponentToSoftwareProfile(
                        session, comp.getId(), swProfileSpec.getId())

                    continue

                self.enableComponent(session, swProfileSpec.getName(),
                                     comp.getKit().getName(),
                                     comp.getKit().getVersion(),
                                     comp.getKit().getIteration(),
                                     comp.getName(), comp.getVersion())

        #
        # Fire the tags changed event for all creates that have tags
        #
        # Get the latest version from the db in case the create method
        # added some embellishments
        #
        swp = self.getSoftwareProfile(session, swProfileSpec.getName())
        if swp.getTags():
            SoftwareProfileTagsChanged.fire(softwareprofile_id=str(
                swp.getId()),
                                            softwareprofile_name=swp.getName(),
                                            tags=swp.getTags(),
                                            previous_tags={})

    def _getComponent(self, kit, compName, compVersion):         \
            # pylint: disable=no-self-use

        # Iterate over component list, looking for a match
        comp = None

        for comp in kit.getComponentList():
            if comp.getName() == compName and \
                    comp.getVersion() == compVersion:
                break
        else:
            raise ComponentNotFound("Component [%s-%s] not found in kit [%s]" %
                                    (compName, compVersion, kit))

        return comp

    def _get_kit_by_component(self,
                              session: Session,
                              comp_name,
                              comp_version=None):
        """
        Gets a kit by compoent name/version.
        :param comp_name:    the name of the component
        :param comp_version: the version of the component

        :raises KitNotFound:
        :raises ComponentNotFound:

        """
        kit_list = self._kit_db_api.getKitList(session)
        kits = [
            kit for kit in kit_list for component in kit.getComponentList()
            if component.getName() == comp_name and (
                comp_version is None or component.getVersion() == comp_version)
        ]
        if not kits:
            raise KitNotFound('Kit containing component [%s] not found' %
                              (comp_name))

        if len(kits) > 1:
            raise ComponentNotFound(
                'Kit name must be specified, multiple kits contain '
                'component: {}'.format(comp_name))

        return kits[0]

    def enableComponent(self,
                        session: Session,
                        software_profile_name: str,
                        kit_name: str,
                        kit_version: str,
                        kit_iteration: str,
                        comp_name: str,
                        comp_version: Optional[str] = None):
        """
        Enable a component on a software profile.

        :param software_profile_name: the name of the software profile
        :param kit_name:              the name of the kit
        :param kit_version:           the version of the kit
        :param kit_iteration:         the iteration of the kit
        :param comp_name:             the name of the component
        :param comp_version:          the version of the component

        :raises KitNotFound:
        :raises SoftwareProfileNotFound:
        :raises ComponentNotFound:

        """
        kit, comp_version = self._get_kit_and_component_version(
            session, kit_name, kit_version, kit_iteration, comp_name,
            comp_version)

        software_profile = self.getSoftwareProfile(session,
                                                   software_profile_name,
                                                   {'os': True})

        if kit.getIsOs():
            best_match_component = self._enable_os_kit_component(
                session, kit, comp_name, comp_version, software_profile)
        else:
            best_match_component = self._enable_kit_component(
                session, kit, comp_name, comp_version, software_profile)

        if not best_match_component:
            self._logger.info('Component not enabled: {}'.format(comp_name))
        else:
            self._logger.info(
                'Enabled component on software profile: {} -> {}'.format(
                    best_match_component, software_profile))

    def _get_kit_and_component_version(self,
                                       session: Session,
                                       kit_name,
                                       kit_version,
                                       kit_iteration,
                                       comp_name,
                                       comp_version=None):
        """
        Gets a Kit instance and component version.

        :param kit_name:      the name of the kit
        :param kit_version:   the version of the kit
        :param kit_iteration: the iteration of the kit
        :param comp_name:     the component name
        :param comp_version:  the component version (optional)

        :return: a tuple, consisting of (Kit, component_version)

        """
        kit = None
        if kit_name is None:
            kit = self._get_kit_by_component(session,
                                             comp_name,
                                             comp_version=comp_version)
            #
            # Get component version if required
            #
            if comp_version is None:
                for component in kit.getComponentList():
                    if component.getName() == comp_name:
                        comp_version = component.getVersion()
                        break
        elif kit_version is None or kit_iteration is None:
            kits_found = 0
            for k in self._kit_db_api.getKitList(session):
                if k.getName() == kit_name and \
                        (kit_version is None or
                         k.getVersion() == kit_version) and \
                        (kit_iteration is None or
                         k.getIteration() == kit_iteration):
                    kit = k
                    kits_found += 1

            if kits_found > 1:
                if kit_version is not None:
                    raise KitNotFound('Multiple kits found: {}-{}'.format(
                        kit_name, kit_version))
                else:
                    raise KitNotFound(
                        'Multiple kits found {}'.format(kit_name))
        else:
            kit = self._kit_db_api.getKit(session, kit_name, kit_version,
                                          kit_iteration)

        if kit is None:
            raise KitNotFound('Kit [%s] not found' %
                              (Kit(kit_name, kit_version, kit_iteration)))

        return kit, comp_version

    def _enable_kit_component(self, session: Session, kit, comp_name,
                              comp_version, software_profile):
        """
        Enables a regular kit component on a specific software profile.

        :param kit:              the Kit instance, whose component is being
                                 enabled
        :param comp_name:        the name of the component to enable
        :param comp_version:     the version of the component to enable
        :param software_profile: the software profile on which the component
                                 will be enabled

        :return:                 the Component instance that was enabled

        """
        kit_spec = (kit.getName(), kit.getVersion(), kit.getIteration())

        installer = get_kit_installer(kit_spec)()
        installer.session = session
        comp_installer = installer.get_component_installer(comp_name)

        if comp_installer is None:
            raise ComponentNotFound('Component [%s] not found in kit [%s]' %
                                    (comp_name, kit))

        if not comp_installer.is_enableable(software_profile):
            self._logger.warning('Component cannot be enabled: {}'.format(
                comp_installer.spec))
            return None
        comp_installer.run_action('pre_enable', software_profile.getName())

        best_match_component = self._add_component_to_software_profile(
            session, kit, comp_name, comp_version, software_profile)

        comp_installer.run_action('enable', software_profile.getName())
        comp_installer.run_action('post_enable', software_profile.getName())

        return best_match_component

    def _enable_os_kit_component(self, session: Session, kit, comp_name,
                                 comp_version, software_profile):
        """
        Enables an OS kit component on a specific software profile.

        :param kit:              the OS Kit instance, whose component is being
                                 enabled
        :param comp_name:        the name of the component to enable
        :param comp_version:     the version of the component to enable
        :param software_profile: the software profile on which the component
                                 will be enabled

        :return:                 the Component instance that was enabled

        """
        return self._add_component_to_software_profile(session, kit, comp_name,
                                                       comp_version,
                                                       software_profile)

    def _add_component_to_software_profile(self, session: Session, kit,
                                           comp_name, comp_version,
                                           software_profile):
        """
        Adds a kit to a software profile. This is a data-only operation,
        as no pre/post enable actions are called.

        :param kit:              the OS Kit instance, whose component is being
                                 added
        :param comp_name:        the name of the component to add
        :param comp_version:     the version of the component to add
        :param software_profile: the software profile to which the component
                                 will be added

        :return:                 the Component instance that was added

        """
        best_match_component = \
            self._component_db_api.getBestMatchComponent(
                session,
                comp_name, comp_version, software_profile.getOsInfo(),
                kit.getId())

        self._component_db_api.addComponentToSoftwareProfile(
            session, best_match_component.getId(), software_profile.getId())

        return best_match_component

    def disableComponent(self,
                         session: Session,
                         software_profile_name,
                         kit_name,
                         kit_version,
                         kit_iteration,
                         comp_name,
                         comp_version=None):         \
            # pylint: disable=unused-argument
        """
        Disables a component on a software profile.

        :param software_profile_name: the name of the software profile
        :param kit_name:              the name of the kit
        :param kit_version:           the version of the kit
        :param kit_iteration:         the iteration of the kit
        :param comp_name:             the name of the component
        :param comp_version:          the version of the component

        :raises KitNotFound:
        :raises SoftwareProfileNotFound:
        :raises ComponentNotFound:

        """
        kit, comp_version = self._get_kit_and_component_version(
            session, kit_name, kit_version, kit_iteration, comp_name)

        software_profile = self.getSoftwareProfile(session,
                                                   software_profile_name,
                                                   {'os': True})

        if kit.getIsOs():
            best_match_component = self._disable_os_kit_component(
                session, kit, comp_name, comp_version, software_profile)
        else:
            best_match_component = self._disable_kit_component(
                session, kit, comp_name, comp_version, software_profile)

        self._logger.info(
            'Disabled component on software profile: {} -> {}'.format(
                best_match_component, software_profile))

    def _disable_kit_component(self, session, kit, comp_name, comp_version,
                               software_profile):
        """
        Disables a regular kit component on a specific software profile.

        :param kit:              the Kit instance, whose component is being
                                 disabled
        :param comp_name:        the name of the component to disable
        :param comp_version:     the version of the component to disable
        :param software_profile: the software profile on which the component
                                 will be disable

        :return:                 the Component instance that was disabled

        """
        kit_spec = (kit.getName(), kit.getVersion(), kit.getIteration())

        installer = get_kit_installer(kit_spec)()
        installer.session = session

        comp_installer = installer.get_component_installer(comp_name)

        if comp_installer is None:
            raise ComponentNotFound('Component [%s] not found in kit [%s]' %
                                    (comp_name, kit))

        comp_installer.run_action('pre_disable', software_profile.getName())
        comp_installer.run_action('disable', software_profile.getName())

        best_match_component = \
            self._remove_component_from_software_profile(
                session, kit, comp_name, comp_version, software_profile)

        comp_installer.run_action('post_disable', software_profile.getName())

        return best_match_component

    def _disable_os_kit_component(self, session, kit, comp_name, comp_version,
                                  software_profile):
        """
        Enables an OS kit component on a specific software profile.

        :param kit:              the OS Kit instance, whose component is being
                                 disabled
        :param comp_name:        the name of the component to disable
        :param comp_version:     the version of the component to disable
        :param software_profile: the software profile on which the component
                                 will be disabled

        :return:                 the Component instance that was disabled

        """
        return self._remove_component_from_software_profile(
            session, kit, comp_name, comp_version, software_profile)

    def _remove_component_from_software_profile(self, session: Session, kit,
                                                comp_name, comp_version,
                                                software_profile):
        """
        Removes a kit to a software profile. This is a data-only operation,
        as no pre/post disable actions are called.

        :param kit:              the OS Kit instance, whose component is being
                                 removed
        :param comp_name:        the name of the component to remove
        :param comp_version:     the version of the component to remove
        :param software_profile: the software profile to which the component
                                 will be removed

        :return:                 the Component instance that was removed

        """
        best_match_component = self._component_db_api.getBestMatchComponent(
            session, comp_name, comp_version, software_profile.getOsInfo(),
            kit.getId())

        self._component_db_api.deleteComponentFromSoftwareProfile(
            session, best_match_component.getId(), software_profile.getId())

        return best_match_component

    def deleteSoftwareProfile(self, session: Session, name):
        """
        Delete software profile by name

        Raises:
            SoftwareProfileNotFound
        """

        self._sp_db_api.deleteSoftwareProfile(session, name)

        # Remove all flags for software profile
        swProfileFlagPath = os.path.join(self._config_manager.getRoot(),
                                         'var/run/actions/%s' % (name))
        if os.path.exists(swProfileFlagPath):
            shutil.rmtree(swProfileFlagPath)

        self._logger.info('Deleted software profile [%s]' % (name))

    def getNodeList(self, session: Session, softwareProfileName):
        return self._sp_db_api.getNodeList(session, softwareProfileName)

    def getEnabledComponentList(self, session: Session, name):
        """ Get the list of enabled components """
        return self._sp_db_api.getEnabledComponentList(session, name)

    def getPartitionList(self, session: Session, softwareProfileName):
        """ Get list of partitions. """
        return self._sp_db_api.getPartitionList(session, softwareProfileName)

    def addUsableHardwareProfileToSoftwareProfile(
            self, session: Session, hardwareProfileName: str,
            softwareProfileName: str) -> None:
        """
        Map software profile to hardware profile
        """

        self._logger.info(
            'Mapping hardware profile [%s] to software profile [%s]',
            hardwareProfileName, softwareProfileName)

        self._sp_db_api.addUsableHardwareProfileToSoftwareProfile(
            session, hardwareProfileName, softwareProfileName)

    def deleteUsableHardwareProfileFromSoftwareProfile(self, session: Session,
                                                       hardwareProfileName,
                                                       softwareProfileName):
        return self._sp_db_api.deleteUsableHardwareProfileFromSoftwareProfile(
            session, hardwareProfileName, softwareProfileName)

    def copySoftwareProfile(self, session: Session, srcSoftwareProfileName,
                            dstSoftwareProfileName):
        validation.validateProfileName(dstSoftwareProfileName)
        self._logger.info('Copying software profile [%s] to [%s]',
                          srcSoftwareProfileName, dstSoftwareProfileName)
        self._sp_db_api.copySoftwareProfile(session, srcSoftwareProfileName,
                                            dstSoftwareProfileName)
        #
        # Fire the tags changed event for all copies that have tags
        #
        swp = self.getSoftwareProfile(session, dstSoftwareProfileName)
        if swp.getTags():
            SoftwareProfileTagsChanged.fire(softwareprofile_id=str(
                swp.getId()),
                                            softwareprofile_name=swp.getName(),
                                            tags=swp.getTags(),
                                            previous_tags={})

    def getUsableNodes(self, session: Session, softwareProfileName):
        return self._sp_db_api.getUsableNodes(session, softwareProfileName)

    def get_software_profile_metadata(self, session: Session,
                                      name: str) -> Dict[str, str]:
        """
        Call action_get_metadata() method for all kits
        """

        self._logger.debug('Retrieving metadata for software profile [%s]',
                           name)

        metadata: Dict[str, str] = {}

        for kit in self._kit_db_api.getKitList(session):
            if kit.getIsOs():
                # ignore OS kits
                continue

            kit_installer = get_kit_installer(
                (kit.getName(), kit.getVersion(), kit.getIteration()))()
            kit_installer.session = session

            # we are only interested in software profile metadata
            item = kit_installer.action_get_metadata(
                software_profile_name=name)

            if item:
                metadata.update(item)

        return metadata
コード例 #6
0
class ComponentInstaller(ComponentInstallerBase):
    """
    Tortuga DHCP component.

    """
    name = 'dhcpd'
    version = '7.0.1'
    os_list = [
        {
            'family': 'rhel',
            'version': '6',
            'arch': 'x86_64'
        },
        {
            'family': 'rhel',
            'version': '7',
            'arch': 'x86_64'
        },
    ]
    installer_only = True

    def __init__(self, kit):
        """
        Initialise parent class.
        """
        super().__init__(kit)

        self._provider = DhcpdDhcpProvider(self)
        self._manager = self._get_os_dhcpd_manager('dhcpd')
        self._config = ConfigManager()

    def _get_os_dhcpd_manager(self, name):
        """
        Get dhcpd manager for the appropriate os.

        :param name: the name of the dhcpd manager to get
        :returns:    the dhcpd manager instance

        """
        dir_name = '{}/util'.format(self.kit_installer.kit_path)
        dhcpd_manager = \
            getOsObjectFactory().getOsKitApplicationManager(name, dir_name)
        return dhcpd_manager

    def _get_provisioning_networks(self):
        """
        Get provisioning networks.

        :returns: Generator provisioning networks
        """
        for network in NetworksDbHandler().getNetworkList(self.session):
            if network.type == 'provision':
                yield network

    def _get_provisioning_nics(self, node):
        """
        Get provisioning nics.

        :param node: Node object
        :returns: Generator nics
        """
        for nic in node.getNics():
            if nic.getNetwork().getType() == 'provision':
                yield nic

    def _get_provisioning_nics_ip(self, node):
        """
        Get provisioning nics IP addresses.

        :param node: Node object
        :returns: Generator IPv4Address
        """
        for nic in self._get_provisioning_nics(node):
            yield ipaddress.IPv4Address(nic.getIp())

    @staticmethod
    def _get_local_nics(nics):
        """
        Get valid NICs.

        :returns: Generator nics
        """
        for nic in nics:
            if nic.boot and nic.mac:
                yield nic

    def _get_installer_ip(self, network_id):
        """
        Return IP address of provisioning interface on installer

        :raises NicNotFound:

        """

        installer_node = NodeApi().getInstallerNode(self.session)

        prov_nics = self._get_provisioning_nics(installer_node)
        for prov_nic in prov_nics:
            if prov_nic.getNetwork().getId() == network_id:
                return ipaddress.IPv4Address(prov_nic.getIp())
        raise NicNotFound(
            'Network has no corresponding provisioning NIC on installer')

    def _dhcp_subnets(self):
        """
        DHCP subnet dictionary.

        :returns: Dictionary IPv4Network network address IPv4Network subnet
        """
        subnets = {}

        for network in self._get_provisioning_networks():
            subnet = {'nodes': []}
            installer_ip = self._get_installer_ip(network.id)
            subnet['installerIp'] = installer_ip

            if not network.gateway:
                logger.info(
                    '[dhcpd] Gateway not defined for network [{}/{}], using'
                    ' IP [{}]'.format(network.address, network.netmask,
                                      installer_ip))

                subnet['gateway'] = installer_ip
            else:
                subnet['gateway'] = network.gateway

            for nic in self._get_local_nics(network.nics):
                node = nic.node
                if node.hardwareprofile.location != 'local' \
                        or node.state == 'Deleted' \
                        or node.name == self._config.getInstaller():
                    continue

                node = {
                    'ip': nic.ip,
                    'mac': nic.mac,
                    'fqdn': node.name,
                    'hostname': node.name.split('.', 1)[0],
                    'unmanaged': False
                }

                subnet['nodes'].append(node)

            subnet_address = ipaddress.IPv4Network('{}/{}'.format(
                network.address, network.netmask))

            subnets[subnet_address] = subnet

        return subnets

    @property
    def _get_kit_settings_dictionary(self):
        """
        :returns: Dictionary
        """
        settings = {}

        config = configparser.ConfigParser()
        config.read(
            os.path.join(self._config.getKitConfigBase(), 'tortuga.ini'))

        if config.has_section('tortuga_kit_base'):
            if config.has_option('tortuga_kit_base', 'disable_services'):
                settings['disable_services'] = \
                    config.get('tortuga_kit_base', 'disable_services') \
                    .split(' ')

        return settings

    def _configure(self, softwareProfileName, fd, *args, **kwargs):
        """
        Shim for unused arguments.

        :param softwareProfileName:
        :param fd:
        :param *args:
        :param **kwargs:
        :returns: None
        """
        self.action_configure(softwareProfileName, *args, **kwargs)

    def action_configure(self, _, *args, **kwargs):
        """
        Configure.

        :param _: Unused
        :param *args: Unused
        :param **kwargs: Unused
        :returns: None
        """

        try:
            result = GlobalParameterDbApi().getParameter(
                self.session, 'DHCPLeaseTime')

            dhcp_lease_time = int(result.getValue())
        except ParameterNotFound:
            dhcp_lease_time = 2400

        try:
            result = GlobalParameterDbApi().getParameter(
                self.session, 'DNSZone')

            dns_zone = result.getValue()
        except ParameterNotFound:
            dns_zone = ''

        installer_node = NodeApi().getInstallerNode(self.session)

        self._manager.configure(dhcp_lease_time,
                                dns_zone,
                                self._get_provisioning_nics_ip(installer_node),
                                self._dhcp_subnets(),
                                installerNode=installer_node,
                                bUpdateSysconfig=kwargs.get(
                                    'bUpdateSysconfig', True),
                                kit_settings=self._get_kit_settings_dictionary)

    def action_post_install(self, *args, **kwargs):
        """
        Triggered post install.

        :param *args: List Objects
        :param **kwargs: Dictionary Objects
        :returns: None
        """
        self._provider.write()
        self.action_configure(None, args, kwargs, bUpdateSysconfig=True)

    def action_add_host(self, hardware_profile_name, software_profile_name,
                        nodes, *args, **kwargs):
        """
        Triggerd at add host.

        :returns: None
        """
        self.action_configure(software_profile_name,
                              None,
                              args,
                              kwargs,
                              bUpdateSysconfig=False)

    def action_delete_host(self, hardware_profile_name, software_profile_name,
                           nodes, *args, **kwargs):
        """
        Triggered delete host.

        :returns: None
        """
        self.action_configure(software_profile_name,
                              None,
                              args,
                              kwargs,
                              bUpdateSysconfig=False)
コード例 #7
0
class SyncManager(TortugaObjectManager):
    """Class for cluster sync management"""

    __instanceLock = threading.RLock()

    # update delay increase (seconds)
    CLUSTER_UPDATE_DELAY_INCREASE = 30

    # after this limit is reached, warning will be logged
    CLUSTER_UPDATE_WARNING_LIMIT = 10

    def __init__(self):
        super(SyncManager, self).__init__()

        self._isUpdateScheduled = False
        self._isUpdateRunning = False
        self._cm = ConfigManager()
        self._logger = logging.getLogger(SYNC_NAMESPACE)

    def __runClusterUpdate(self, opts={}):
        """ Run cluster update. """
        self._logger.debug('Update timer running, opts={}'.format(opts))

        updateCmd = os.path.join(self._cm.getBinDir(), 'run_cluster_update.sh')

        delay = 0
        updateCnt = 0
        while self.__resetIsUpdateScheduled():
            self._isUpdateRunning = True

            self._logger.debug('New cluster update delay: %s seconds' %
                               (delay))

            time.sleep(delay)
            delay += SyncManager.CLUSTER_UPDATE_DELAY_INCREASE

            # Log warning if timer has been running for too many times.
            updateCnt += 1
            self._logger.debug('Cluster update timer count: %s' % (updateCnt))

            if updateCnt > SyncManager.CLUSTER_UPDATE_WARNING_LIMIT:
                self._logger.warning(
                    'Cluster updated more than %s times using the same'
                    ' timer (possible configuration problem)' %
                    (SyncManager.CLUSTER_UPDATE_WARNING_LIMIT))

            self._logger.debug('Starting cluster update using: %s' %
                               (updateCmd))

            # Since we might sleep for a while, we need to
            # reset update flag just before we run update to avoid
            # unnecessary syncs.

            self.__resetIsUpdateScheduled()

            if 'node' in opts:
                node_update = opts['node']
                env = {
                    **os.environ, 'FACTER_node_tags_update':
                    json.dumps(node_update)
                }
                self._logger.debug('FACTER_node_tags_update={}'.format(
                    env['FACTER_node_tags_update']))
                p = TortugaSubprocess(updateCmd, env=env)
            elif 'software_profile' in opts:
                sp_update = opts['software_profile']
                env = {
                    **os.environ, 'FACTER_softwareprofile_tags_update':
                    json.dumps(sp_update)
                }
                self._logger.debug(
                    'FACTER_softwareprofile_tags_update={}'.format(
                        env['FACTER_softwareprofile_tags_update']))
                p = TortugaSubprocess(updateCmd, env=env)
            else:
                p = TortugaSubprocess(updateCmd)

            try:
                p.run()
                self._logger.debug('Cluster update successful')
                self._logger.debug('stdout: {}'.format(
                    p.getStdOut().decode().rstrip()))
                self._logger.debug('stderr: {}'.format(
                    p.getStdErr().decode().rstrip()))
            except CommandFailed:
                if p.getExitStatus() == tortugaStatus.\
                        TORTUGA_ANOTHER_INSTANCE_OWNS_LOCK_ERROR:
                    self._logger.debug(
                        'Another cluster update is already running, will'
                        ' try to reschedule it')

                    self._isUpdateRunning = False

                    self.scheduleClusterUpdate(
                        updateReason='another update already running',
                        delay=60,
                        opts=opts)

                    break
                else:
                    self._logger.error(
                        'Update command "%s" failed (exit status: %s):'
                        ' %s' % (updateCmd, p.getExitStatus(), p.getStdErr()))

            self._logger.debug('Done with cluster update')

        self._isUpdateRunning = False

        self._logger.debug('Update timer exiting')

    def __resetIsUpdateScheduled(self):
        """ Reset cluster update flag, return old flag value. """
        SyncManager.__instanceLock.acquire()
        try:
            flag = self._isUpdateScheduled
            self._isUpdateScheduled = False
            return flag
        finally:
            SyncManager.__instanceLock.release()

    def scheduleClusterUpdate(self, updateReason=None, delay=5, opts={}):
        """ Schedule cluster update. """
        SyncManager.__instanceLock.acquire()
        try:
            if self._isUpdateScheduled:
                # Already scheduled.
                return

            # Start update timer if needed.
            self._isUpdateScheduled = True
            if not self._isUpdateRunning:
                self._logger.debug('Scheduling cluster update in %s seconds,'
                                   ' reason: %s, opts: %s' %
                                   (delay, updateReason, opts))

                t = threading.Timer(delay,
                                    self.__runClusterUpdate,
                                    kwargs=dict(opts=opts))

                t.start()
            else:
                self._logger.debug(
                    'Will not schedule new update timer while the old'
                    ' timer is running')
        finally:
            SyncManager.__instanceLock.release()

    def getUpdateStatus(self):  # pylint: disable=no-self-use
        """ Check cluster update flag. """
        return RunManager().checkLock('cfmsync')
コード例 #8
0
class RuleEngine(RuleEngineInterface):
    def __init__(self, minTriggerInterval=60):
        self._cm = ConfigManager()
        self._lock = threading.RLock()
        self._processingLock = threading.RLock()
        self._minTriggerInterval = minTriggerInterval
        self._ruleDict = {}
        self._disabledRuleDict = {}  # Used for rules in the disabled state
        self._eventRuleDict = {}  # used for "event" type monitoring
        self._pollTimerDict = {}  # used for "poll" monitoring
        self._receiveRuleDict = {}  # used for "receive" type monitoring
        self._receiveQ = queue.Queue(0)  # infinite size FIFO queue
        self._rulesDir = self._cm.getRulesDir()
        self._logger = logging.getLogger('tortuga.rule.%s' %
                                         self.__class__.__name__)
        self._logger.addHandler(logging.NullHandler())
        self.__initRules()

        # the following are used for "receive" type monitoring
        self._processingTimer = None
        self._processingTimerRunning = False

    def __getRuleDirName(self, applicationName):
        return '%s/%s' % (self._rulesDir, applicationName)

    def __getRuleFileName(self, applicationName, ruleName):
        return '%s/%s.xml' % (self.__getRuleDirName(applicationName), ruleName)

    def __readRuleFile(self, applicationName, ruleName):
        with open(self.__getRuleFileName(applicationName,
                                         ruleName)) as ruleFile:
            content = ruleFile.read()

        return content

    def __writeRuleFile(self, rule):
        ruleDir = self.__getRuleDirName(rule.getApplicationName())

        if not os.path.exists(ruleDir):
            os.makedirs(ruleDir)

        with open(
                self.__getRuleFileName(rule.getApplicationName(),
                                       rule.getName()), 'w') as ruleFile:
            ruleFile.write('%s\n' % (rule.getXmlRep()))

    def __getRuleId(self, applicationName, ruleName):
        # pylint: disable=no-self-use
        return '%s/%s' % (applicationName, ruleName)

    def __checkRuleExists(self, ruleId):
        if ruleId not in self._ruleDict:
            raise RuleNotFound('Rule [%s] not found.' % ruleId)

    def __checkRuleDoesNotExist(self, ruleId):
        """
        Raises:
            RuleAlreadyExists
        """

        if ruleId in self._ruleDict:
            raise RuleAlreadyExists('Rule [%s] already exists.' % ruleId)

    def __checkRuleEnabled(self, ruleId):
        """
        Raises:
            RuleNotFound
            RuleDisabled
        """

        self.__checkRuleExists(ruleId)

        if ruleId in self._disabledRuleDict:
            raise RuleDisabled('Rule [%s] is disabled.' % ruleId)

    def __initRules(self):
        """ Initialize all known rules. """

        self._logger.debug('[%s] Initializing known rules' %
                           (self.__class__.__name__))

        fileList = osUtility.findFiles(self._rulesDir)

        parser = RuleXmlParser()

        for f in fileList:
            try:
                rule = parser.parse(f)
                ruleId = self.__getRuleId(rule.getApplicationName(),
                                          rule.getName())

                self._logger.debug('[%s] Found rule [%s]' %
                                   (self.__class__.__name__, ruleId))

                self.addRule(rule)
            except Exception as ex:
                self._logger.error('[%s] Invalid rule file [%s] (Error: %s)' %
                                   (self.__class__.__name__, f, ex))

    def __evaluateNumbers(self, metric, operator, triggerValue):         \
            # pylint: disable=exec-used

        trigger = None

        try:
            triggerString = '%s %s %s' % (metric, operator, triggerValue)

            self._logger.debug('[%s] Evaluating as numbers: %s' %
                               (self.__class__.__name__, triggerString))

            exec('trigger = %s' % triggerString)

            return trigger
        except Exception as ex:
            self._logger.debug('[%s] Could not evaluate as numbers: %s' %
                               (self.__class__.__name__, ex))

        return None

    def __evaluateStrings(self, metric, operator, triggerValue):         \
            # pylint: disable=exec-used

        trigger = None

        try:
            triggerString = '"%s" %s "%s"' % (metric, operator, triggerValue)

            self._logger.debug('[%s] Evaluating as strings: %s' %
                               (self.__class__.__name__, triggerString))

            exec('trigger = %s' % triggerString)

            return trigger
        except Exception as ex:
            self._logger.debug('[%s] Could not evaluate as strings: %s' %
                               (self.__class__.__name__, ex))

        return None

    def __parseMonitorData(self, monitorData=''):
        if not monitorData:
            return None

        self._logger.debug('[%s] Parsing data: %s' %
                           (self.__class__.__name__, monitorData))

        try:
            return libxml2.parseDoc(monitorData)
        except Exception as ex:
            self._logger.error('[%s] Could not parse data: %s' %
                               (self.__class__.__name__, ex))

        return None

    def __evaluateConditions(self,
                             rule,
                             monitorXmlDoc=None,
                             xPathReplacementDict=None):
        # Return True if all rule conditions were satisfied.
        triggerAction = False

        try:
            if monitorXmlDoc is not None:
                triggerAction = True
                for condition in rule.getConditionList():
                    self._logger.debug('[%s] Evaluating: [%s]' %
                                       (self.__class__.__name__, condition))

                    metricXPath = condition.getMetricXPath()

                    metric = self.__replaceXPathVariables(
                        metricXPath, xPathReplacementDict or {})

                    if metric == metricXPath:
                        # No replacement was done, try to evaluate xpath.
                        metric = monitorXmlDoc.xpathEval('%s' % metricXPath)

                    self._logger.debug('[%s] Got metric: [%s]' %
                                       (self.__class__.__name__, metric))

                    if metric == "" or metric == "nan":
                        self._logger.debug(
                            '[%s] Metric is not defined, will not trigger'
                            ' action' % (self.__class__.__name__))

                        triggerAction = False

                        break

                    operator = condition.getEvaluationOperator()

                    triggerValue = self.__replaceXPathVariables(
                        condition.getTriggerValue(), xPathReplacementDict
                        or {})

                    trigger = self.__evaluateNumbers(metric, operator,
                                                     triggerValue)

                    if trigger is None:
                        trigger = self.__evaluateStrings(
                            metric, operator, triggerValue)

                    self._logger.debug('[%s] Evaluation result: [%s]' %
                                       (self.__class__.__name__, trigger))

                    if not trigger:
                        triggerAction = False
                        break
            else:
                self._logger.debug(
                    '[%s] No monitor xml doc, will not trigger action' %
                    (self.__class__.__name__))
        except Exception as ex:
            self._logger.error('[%s] Could not evaluate data: %s' %
                               (self.__class__.__name__, ex))

            self._logger.debug('[%s] Will not trigger action' %
                               (self.__class__.__name__))

        self._logger.debug('[%s] Returning trigger action flag: [%s]' %
                           (self.__class__.__name__, triggerAction))

        return triggerAction

    def __evaluateXPathVariables(self, xmlDoc, xPathVariableList):
        resultDict = {}

        if not xmlDoc:
            return resultDict

        self._logger.debug('[%s] xPath variable list: %s' %
                           (self.__class__.__name__, xPathVariableList))

        for v in xPathVariableList:
            name = v.getName()

            value = ''

            try:
                self._logger.debug(
                    '[%s] Evaluating xPath variable %s: %s' %
                    (self.__class__.__name__, name, v.getXPath()))

                value = xmlDoc.xpathEval('%s' % v.getXPath())
            except Exception as ex:
                self._logger.error(
                    '[%s] Could not evaluate xPath variable [%s]: %s' %
                    (self.__class__.__name__, name, ex))

                self._logger.debug('[%s] Will replace it with empty string' %
                                   (self.__class__.__name__))

            resultDict[name] = value

        self._logger.debug('[%s] XPath variable replacement dictionary: %s' %
                           (self.__class__.__name__, resultDict))

        return resultDict

    def __replaceXPathVariables(self, inputString, xPathReplacementDict):         \
            # pylint: disable=no-self-use

        outputString = inputString

        #self._logger.debug('Original string: %s' % inputString)

        for key in xPathReplacementDict:
            #self._logger.debug('Replacing: %s' % key)

            outputString = outputString.replace(
                key, '%s' % xPathReplacementDict[key])

        #self._logger.debug('New string: %s' % outputString)

        return outputString

    def __poll(self, rule):
        ruleId = self.__getRuleId(rule.getApplicationName(), rule.getName())

        self._logger.debug('[%s] Begin poll timer for [%s]' %
                           (self.__class__.__name__, ruleId))

        if not self.hasRule(ruleId):
            self._logger.debug('[%s] Timer execution cancelled for [%s]' %
                               (self.__class__.__name__, ruleId))

            return

        rule.ruleInvoked()

        self._logger.debug('[%s] Timer execution started for [%s]' %
                           (self.__class__.__name__, ruleId))

        appMonitor = rule.getApplicationMonitor()

        queryCmd = appMonitor.getQueryCommand()

        self._logger.debug('[%s] Query command: %s' %
                           (self.__class__.__name__, queryCmd))

        actionCmd = appMonitor.getActionCommand()

        self._logger.debug('[%s] Action command: %s' %
                           (self.__class__.__name__, actionCmd))

        xPathReplacementDict = {}

        try:
            invokeAction = True
            queryStdOut = None

            if queryCmd:
                self._logger.debug('[%s] About to invoke: [%s]' %
                                   (self.__class__.__name__, queryCmd))

                try:
                    p = tortugaSubprocess.executeCommand(
                        'source %s/tortuga.sh && ' % (self._cm.getEtcDir()) +
                        queryCmd)

                    queryStdOut = p.getStdOut()

                    appMonitor.queryInvocationSucceeded()
                except Exception as ex:
                    appMonitor.queryInvocationFailed()
                    raise

                monitorXmlDoc = self.__parseMonitorData(queryStdOut)

                xPathReplacementDict = self.__evaluateXPathVariables(
                    monitorXmlDoc, rule.getXPathVariableList())

                invokeAction = self.__evaluateConditions(
                    rule, monitorXmlDoc, xPathReplacementDict)

            if invokeAction:
                try:
                    actionCmd = self.__replaceXPathVariables(
                        actionCmd, xPathReplacementDict)

                    self._logger.debug('[%s] About to invoke: [%s]' %
                                       (self.__class__.__name__, actionCmd))

                    p = tortugaSubprocess.executeCommand(
                        'source %s/tortuga.sh && ' % (self._cm.getEtcDir()) +
                        actionCmd)

                    appMonitor.actionInvocationSucceeded()

                    self._logger.debug('[%s] Done with command: [%s]' %
                                       (self.__class__.__name__, actionCmd))
                except Exception as ex:
                    appMonitor.actionInvocationFailed()
                    raise
            else:
                self._logger.debug('[%s] Will skip action: [%s]' %
                                   (self.__class__.__name__, actionCmd))
        except TortugaException as ex:
            self._logger.error('[%s] %s' % (self.__class__.__name__, ex))

        scheduleTimer = True

        if self.hasRule(ruleId):
            # Check if we need to stop invoking this rule.
            maxActionInvocations = appMonitor.getMaxActionInvocations()

            successfulActionInvocations = \
                appMonitor.getSuccessfulActionInvocations()

            if maxActionInvocations:
                if int(maxActionInvocations) <= successfulActionInvocations:
                    # Rule must be disabled.
                    self._logger.debug(
                        '[%s] Max. number of successful invocations (%s)'
                        ' reached for rule [%s]' %
                        (self.__class__.__name__, maxActionInvocations,
                         ruleId))

                    scheduleTimer = False
                    self.disableRule(rule.getApplicationName(), rule.getName())
        else:
            # Rule is already deleted.
            scheduleTimer = False

        if scheduleTimer:
            pollPeriod = float(appMonitor.getPollPeriod())

            # Make sure we do not fire too often.
            lastSuccessfulActionTime = \
                appMonitor.getLastSuccessfulActionInvocationTime()

            if lastSuccessfulActionTime:
                now = time.time()

                possibleNewSuccessfulActionTime = \
                    now + pollPeriod - lastSuccessfulActionTime

                if possibleNewSuccessfulActionTime < self._minTriggerInterval:
                    pollPeriod = self._minTriggerInterval

                    self._logger.debug(
                        '[%s] Increasing poll period to [%s] for'
                        ' rule [%s]' %
                        (self.__class__.__name__, pollPeriod, ruleId))

            self._logger.debug('[%s] Scheduling new timer for rule [%s] in'
                               ' [%s] seconds' %
                               (self.__class__.__name__, ruleId, pollPeriod))

            t = threading.Timer(pollPeriod, self.__poll, args=[rule])

            t.daemon = True

            self.__runPollTimer(ruleId, t)
        else:
            self._logger.debug(
                '[%s] Will not schedule new timer for rule [%s]' %
                (self.__class__.__name__, rule))

    def __runPollTimer(self, ruleId, pollTimer):
        self._pollTimerDict[ruleId] = pollTimer

        self._logger.debug('[%s] Starting poll timer for [%s]' %
                           (self.__class__.__name__, ruleId))

        pollTimer.start()

    def __cancelPollTimer(self, ruleId):
        if ruleId not in self._pollTimerDict:
            self._logger.debug('[%s] No poll timer for [%s]' %
                               (self.__class__.__name__, ruleId))

            return

        pollTimer = self._pollTimerDict[ruleId]

        self._logger.debug('[%s] Stopping poll timer for [%s]' %
                           (self.__class__.__name__, ruleId))

        pollTimer.cancel()

        del self._pollTimerDict[ruleId]

    def __process(self):
        self._logger.debug('[%s] Begin processing timer' %
                           (self.__class__.__name__))

        while True:
            qSize = self._receiveQ.qsize()

            self._logger.debug('[%s] Current receive Q size: %s' %
                               (self.__class__.__name__, qSize))

            if qSize == 0:
                break

            applicationName, applicationData = self._receiveQ.get()

            self._logger.debug('[%s] Processing data for [%s]' %
                               (self.__class__.__name__, applicationName))

            monitorXmlDoc = self.__parseMonitorData(applicationData)

            for ruleId in self._receiveRuleDict.keys():
                rule = self._receiveRuleDict.get(ruleId)

                # Rule might have been cancelled before we use it.
                if not rule:
                    continue

                # Check if this is appropriate for the data.
                if rule.getApplicationName() != applicationName:
                    continue

                self._logger.debug('[%s] Processing data using rule [%s]' %
                                   (self.__class__.__name__, ruleId))

                rule.ruleInvoked()

                appMonitor = rule.getApplicationMonitor()

                actionCmd = appMonitor.getActionCommand()

                self._logger.debug('[%s] Action command: [%s]' %
                                   (self.__class__.__name__, actionCmd))

                try:
                    xPathReplacementDict = self.__evaluateXPathVariables(
                        monitorXmlDoc, rule.getXPathVariableList())

                    invokeAction = self.__evaluateConditions(
                        rule, monitorXmlDoc, xPathReplacementDict)

                    if invokeAction:
                        try:
                            actionCmd = self.__replaceXPathVariables(
                                actionCmd, xPathReplacementDict)

                            self._logger.debug(
                                '[%s] About to invoke: [%s]' %
                                (self.__class__.__name__, actionCmd))

                            tortugaSubprocess.executeCommand(
                                'source %s/tortuga.sh && ' %
                                (self._cm.getEtcDir()) + actionCmd)

                            appMonitor.actionInvocationSucceeded()

                            self._logger.debug(
                                '[%s] Done with command: [%s]' %
                                (self.__class__.__name__, actionCmd))

                            maxActionInvocations = \
                                appMonitor.getMaxActionInvocations()

                            successfulActionInvocations = \
                                appMonitor.getSuccessfulActionInvocations()

                            if maxActionInvocations:
                                if int(maxActionInvocations) <= \
                                        successfulActionInvocations:
                                    # Rule must be disabled.
                                    self._logger.debug(
                                        '[%s] Max. number of successful'
                                        ' invocations (%s) reached for'
                                        ' rule [%s]' %
                                        (self.__class__.__name__,
                                         maxActionInvocations, ruleId))

                                    self.disableRule(rule.getApplicationName(),
                                                     rule.getName())
                        except Exception as ex:
                            appMonitor.actionInvocationFailed()
                    else:
                        self._logger.debug(
                            '[%s] Will skip action: [%s]' %
                            (self.__class__.__name__, actionCmd))
                except TortugaException as ex:
                    self._logger.error('[%s] %s' %
                                       (self.__class__.__name__, ex))

            self._logger.debug('[%s] No more rules appropriate for [%s]' %
                               (self.__class__.__name__, applicationName))

        # No more data to process, exit timer.
        self._logger.debug('[%s] No more data to process' %
                           (self.__class__.__name__))

        self.__cancelProcessingTimer()

    def __runProcessingTimer(self):
        self._processingLock.acquire()

        try:
            if not self._processingTimerRunning:
                self._logger.debug('[%s] Starting processing timer' %
                                   (self.__class__.__name__))

                self._processingTimer = threading.Timer(5, self.__process)
                self._processingTimer.daemon = True
                self._processingTimer.start()
                self._processingTimerRunning = True
            else:
                self._logger.debug('[%s] Processing timer already running' %
                                   (self.__class__.__name__))
        finally:
            self._processingLock.release()

    def __cancelProcessingTimer(self):
        self._processingLock.acquire()

        try:
            self._processingTimerRunning = False

            self._logger.debug('[%s] Processing timer stopped' %
                               (self.__class__.__name__))
        finally:
            self._processingLock.release()

    def hasRule(self, ruleId):
        return ruleId in self._ruleDict

    def addRule(self, rule):
        self._lock.acquire()

        try:
            return self.__addRule(rule)
        finally:
            self._lock.release()

    def __addRule(self, rule):
        ruleId = self.__getRuleId(rule.getApplicationName(), rule.getName())

        self._logger.debug('[%s] Adding rule: [%s]' %
                           (self.__class__.__name__, ruleId))

        self.__checkRuleDoesNotExist(ruleId)

        # Write rule file.
        self.__writeRuleFile(rule)

        rule.decode()

        self._ruleDict[ruleId] = rule
        if rule.isStatusEnabled():
            self.__enableRule(rule)
        else:
            # Rule is disabled, just put it in the 'disabled' dict
            self._disabledRuleDict[ruleId] = rule

        return ruleId

    def __enableRule(self, rule):
        ruleId = self.__getRuleId(rule.getApplicationName(), rule.getName())

        self._logger.debug('[%s] Enabling rule: [%s]' %
                           (self.__class__.__name__, ruleId))

        appMonitor = rule.getApplicationMonitor()

        monitorType = appMonitor.getType()

        rule.setStatusEnabled()

        if monitorType == 'poll':
            self._logger.debug('[%s] [%s] is poll rule' %
                               (self.__class__.__name__, ruleId))

            pollPeriod = appMonitor.getPollPeriod()

            if not pollPeriod:
                pollPeriod = self._minTriggerInterval

            self._logger.debug(
                '[%s] Preparing poll timer with period %s second(s)' %
                (self.__class__.__name__, pollPeriod))

            t = threading.Timer(float(pollPeriod), self.__poll, args=[rule])
            t.daemon = True
            self.__runPollTimer(ruleId, t)
        elif monitorType == 'receive':
            self._logger.debug('[%s] [%s] is receive rule' %
                               (self.__class__.__name__, ruleId))

            self._receiveRuleDict[ruleId] = rule
        else:
            # assume this is 'event' rule
            self._logger.debug('[%s] [%s] is event rule' %
                               (self.__class__.__name__, ruleId))

            self._eventRuleDict[ruleId] = rule

        if ruleId in self._disabledRuleDict:
            del self._disabledRuleDict[ruleId]

    def enableRule(self, applicationName, ruleName):
        """
        Raises:
            RuleAlreadyEnabled
        """

        self._lock.acquire()

        try:
            ruleId = self.__getRuleId(applicationName, ruleName)

            self.__checkRuleExists(ruleId)

            if ruleId not in self._disabledRuleDict:
                raise RuleAlreadyEnabled('Rule [%s] is already enabled.' %
                                         (ruleId))

            rule = self._ruleDict[ruleId]

            self.__enableRule(rule)

            rule.encode()

            self.__writeRuleFile(rule)

            rule.decode()
        finally:
            self._lock.release()

    def deleteRule(self, applicationName, ruleName):
        self._lock.acquire()
        try:
            return self.__deleteRule(applicationName, ruleName)
        finally:
            self._lock.release()

    def __deleteRule(self, applicationName, ruleName):
        ruleId = self.__getRuleId(applicationName, ruleName)

        self._logger.debug('[%s] Deleting rule %s' %
                           (self.__class__.__name__, ruleId))

        self.__checkRuleExists(ruleId)

        rule = self._ruleDict[ruleId]

        if rule.isStatusEnabled():
            self.__disableRule(rule)

        del self._disabledRuleDict[ruleId]

        del self._ruleDict[ruleId]

        osUtility.removeFile(self.__getRuleFileName(applicationName, ruleName))

    # Put rule in the 'disabled' state.
    def disableRule(self, applicationName, ruleName):
        """
        Raises:
            RuleAlreadyDisabled
        """

        self._lock.acquire()

        try:
            ruleId = self.__getRuleId(applicationName, ruleName)

            self.__checkRuleExists(ruleId)

            if ruleId in self._disabledRuleDict:
                raise RuleAlreadyDisabled('Rule [%s] is already disabled.' %
                                          ruleId)

            rule = self._ruleDict[ruleId]

            self.__disableRule(rule)

            rule.encode()

            self.__writeRuleFile(rule)

            rule.decode()
        finally:
            self._lock.release()

    def __disableRule(self, rule, status='disabled by administrator'):
        ruleId = self.__getRuleId(rule.getApplicationName(), rule.getName())

        self._logger.debug('[%s] Disabling rule [%s]' %
                           (self.__class__.__name__, ruleId))

        appMonitor = rule.getApplicationMonitor()

        monitorType = appMonitor.getType()

        rule.setStatus(status)

        if monitorType == 'poll':
            self.__cancelPollTimer(ruleId)
        elif monitorType == 'receive':
            del self._receiveRuleDict[ruleId]
        else:
            del self._eventRuleDict[ruleId]

        self._disabledRuleDict[ruleId] = rule

    def getRule(self, applicationName, ruleName):
        self._lock.acquire()

        try:
            return self.__getRule(applicationName, ruleName)
        finally:
            self._lock.release()

    def __getRule(self, applicationName, ruleName):
        ruleId = self.__getRuleId(applicationName, ruleName)

        self.__checkRuleExists(ruleId)

        return copy.deepcopy(self._ruleDict[ruleId])

    def getRuleList(self):
        self._lock.acquire()
        try:
            return self.__getRuleList()
        finally:
            self._lock.release()

    def __getRuleList(self):
        ruleList = TortugaObjectList()

        for ruleId in self._ruleDict.keys():
            ruleList.append(copy.deepcopy(self._ruleDict[ruleId]))

        return ruleList

    def receiveApplicationData(self, applicationName, applicationData):
        self._lock.acquire()
        try:
            return self.__receiveApplicationData(applicationName,
                                                 applicationData)
        finally:
            self._lock.release()

    def __receiveApplicationData(self, applicationName, applicationData):
        self._logger.debug('[%s] Received data for [%s]' %
                           (self.__class__.__name__, applicationName))

        self._receiveQ.put((applicationName, applicationData))

        self.__runProcessingTimer()

    def executeRule(self, applicationName, ruleName, applicationData):
        self._lock.acquire()
        try:
            return self.__executeRule(applicationName, ruleName,
                                      applicationData)
        finally:
            self._lock.release()

    def __executeRule(self, applicationName, ruleName, applicationData):
        """
        Raises:
            RuleDisabled
        """

        ruleId = self.__getRuleId(applicationName, ruleName)

        self._logger.debug('[%s] Received request to execute rule [%s]' %
                           (self.__class__.__name__, ruleId))

        self.__checkRuleExists(ruleId)

        if ruleId in self._disabledRuleDict:
            raise RuleDisabled('Rule [%s] is disabled.' % (ruleId))

        rule = self._ruleDict[ruleId]

        appMonitor = rule.getApplicationMonitor()

        monitorType = appMonitor.getType()

        if monitorType == 'poll':
            self._logger.debug('[%s] [%s] is poll rule' %
                               (self.__class__.__name__, ruleId))

            self.__cancelPollTimer(ruleId)

            self.__poll(rule)
        elif monitorType == 'receive':
            self._logger.debug('[%s] [%s] is receive rule' %
                               (self.__class__.__name__, ruleId))

            self._receiveQ.put((applicationName, applicationData))

            self.__runProcessingTimer()
        else:
            # assume this is 'event' rule
            self._logger.debug('[%s] [%s] is event rule' %
                               (self.__class__.__name__, ruleId))

            self.__execute(rule)

    def __execute(self, rule):
        ruleId = self.__getRuleId(rule.getApplicationName(), rule.getName())

        self._logger.debug('[%s] Begin execution for [%s]' %
                           (self.__class__.__name__, ruleId))

        rule.ruleInvoked()

        appMonitor = rule.getApplicationMonitor()

        queryCmd = appMonitor.getQueryCommand()

        self._logger.debug('[%s] Query command: [%s]' %
                           (self.__class__.__name__, queryCmd))

        actionCmd = appMonitor.getActionCommand()

        self._logger.debug('[%s] Action command: [%s]' %
                           (self.__class__.__name__, actionCmd))

        xPathReplacementDict = {}

        try:
            invokeAction = True
            queryStdOut = None

            if queryCmd:
                self._logger.debug('[%s] About to invoke: [%s]' %
                                   (self.__class__.__name__, queryCmd))

                try:
                    p = tortugaSubprocess.executeCommand(
                        'source %s/tortuga.sh && ' % (self._cm.getEtcDir()) +
                        queryCmd)

                    queryStdOut = p.getStdOut()

                    appMonitor.queryInvocationSucceeded()
                except Exception as ex:
                    appMonitor.queryInvocationFailed()
                    raise

                monitorXmlDoc = self.__parseMonitorData(queryStdOut)

                xPathReplacementDict = self.__evaluateXPathVariables(
                    monitorXmlDoc, rule.getXPathVariableList())

                invokeAction = self.__evaluateConditions(
                    rule, monitorXmlDoc, xPathReplacementDict)

            if invokeAction:
                try:
                    actionCmd = self.__replaceXPathVariables(
                        actionCmd, xPathReplacementDict)

                    self._logger.debug('[%s] About to invoke: [%s]' %
                                       (self.__class__.__name__, actionCmd))

                    p = tortugaSubprocess.executeCommand(
                        'source %s/tortuga.sh && ' % (self._cm.getEtcDir()) +
                        actionCmd)

                    appMonitor.actionInvocationSucceeded()

                    self._logger.debug('[%s] Done with command: [%s]' %
                                       (self.__class__.__name__, actionCmd))
                except Exception as ex:
                    appMonitor.actionInvocationFailed()
                    raise
            else:
                self._logger.debug('[%s] Will skip action: [%s]' %
                                   (self.__class__.__name__, actionCmd))
        except TortugaException as ex:
            self._logger.error('[%s] %s' % (self.__class__.__name__, ex))

        if self.hasRule(ruleId):
            # Check if we need to stop invoking this rule.
            maxActionInvocations = appMonitor.getMaxActionInvocations()

            successfulActionInvocations = \
                appMonitor.getSuccessfulActionInvocations()

            if maxActionInvocations:
                if int(maxActionInvocations) <= successfulActionInvocations:
                    # Rule must be disabled.
                    self._logger.debug(
                        '[%s] Max. number of successful invocations (%s)'
                        ' reached for rule [%s]' %
                        (self.__class__.__name__, maxActionInvocations,
                         ruleId))

                    self.disableRule(rule.getApplicationName(), rule.getName())
コード例 #9
0
ファイル: script.py プロジェクト: tprestegard/tortuga
class TortugaScriptConfig(Config):
    AUTH_METHOD_PASSWORD = '******'
    AUTH_METHOD_TOKEN = 'token'

    NAVOPS_CLI = '/opt/navops-launch/bin/navopsctl'
    DEFAULT_FILENAME = os.path.join(os.path.expanduser('~'), '.tortuga',
                                    'config')

    def __init__(self, **kwargs):
        #
        # Internal properties
        #
        self._filename = None
        self._cm = ConfigManager()

        #
        # Determine the username/password to use as default
        #
        default_username = self._cm.getCfmUser()
        default_password = self._cm.getCfmPassword()
        if default_password == 'not-set':
            default_username = None
            default_password = None

        #
        # Check for default navops cli location
        #
        default_navops_cli = self.NAVOPS_CLI
        if not os.path.exists(default_navops_cli):
            default_navops_cli = None

        #
        # Configuration settings
        #
        self.url = kwargs.get('url', self._cm.getInstallerUrl())
        self.token = kwargs.get('token', None)
        self.navops_cli = kwargs.get('navops_cli', default_navops_cli)
        self.username = kwargs.get('username', default_username)
        self.password = kwargs.get('password', default_password)
        self.verify = kwargs.get('verify', True)

    def _load_from_environment(self):
        if os.getenv('TORTUGA_WS_URL'):
            self.url = os.getenv('TORTUGA_WS_URL')
        if os.getenv('TORTUGA_WS_USERNAME'):
            self.username = os.getenv('TORTUGA_WS_USERNAME')
        if os.getenv('TORTUGA_WS_PASSWORD'):
            self.password = os.getenv('TORTUGA_WS_PASSWORD')
        if os.getenv('TORTUGA_WS_TOKEN'):
            self.token = os.getenv('TORTUGA_WS_TOKEN')
        if os.getenv('TORTUGA_WS_NO_VERIFY'):
            self.verify = False

    @classmethod
    def load(cls, filename: str = None) -> 'TortugaScriptConfig':
        #
        # If a file name is provided, then we try to load that first
        #
        if filename:
            config = cls._load_from_file(filename)
        #
        # If no filename is provided, then we have to figure out where to
        # get a configuration
        #
        else:
            #
            # First, check if the user has a config in their home directory
            #
            if os.path.exists(cls.DEFAULT_FILENAME):
                config = cls._load_from_file(cls.DEFAULT_FILENAME)
            #
            # Otherwise, create a new config from scratch
            #
            else:
                config = cls()
        #
        # Override the config with any settings provided from the
        # environment
        #
        config._load_from_environment()

        return config

    @classmethod
    def _load_from_file(cls, filename) -> 'TortugaScriptConfig':
        if not os.path.exists(filename):
            raise ConfigFileNotFoundException(
                'Config file not found: {}'.format(filename))

        with open(filename) as fp:
            try:
                config_data = json.load(fp)
            except json.JSONDecodeError:
                raise ConfigException(
                    'Invalid config file: {}'.format(filename))

        try:
            unmarshalled = TortugaScriptConfigSchema().load(config_data)
        except ValidationError:
            raise ConfigException('Invalid config file: {}'.format(filename))

        return TortugaScriptConfig(**unmarshalled.data)

    def save(self, filename: str = None):
        if not filename:
            if self._filename:
                filename = filename
            else:
                filename = TortugaScriptConfig.DEFAULT_FILENAME

        if not os.path.exists(filename):
            os.makedirs(os.path.dirname(filename), exist_ok=True, mode=0o700)

        marshalled = TortugaScriptConfigSchema().dump(self)
        with open(filename, 'w') as fp:
            json.dump(marshalled.data, fp, indent=4)

    def get_auth_method(self) -> str:
        """
        Gets the authentication method that should be used.

        :return str: token or password

        :raises ConfigException: if no auth method is configured

        """
        #
        # For the CFM user, always use password authentication
        #
        if self.username == self._cm.getCfmUser() and self.password:
            return self.AUTH_METHOD_PASSWORD

        #
        # For all other cases, if the navops CLI is present, or there
        # is a token, use token-based authentication
        #
        if self.navops_cli or self.token:
            return self.AUTH_METHOD_TOKEN

        #
        # Otherwise, fall back to password authentication
        #
        if self.username and self.password:
            return self.AUTH_METHOD_PASSWORD

        raise ConfigException('Authentication required. Use "tortuga login".')

    def get_token(self) -> str:
        """
        Gets the current authentication token.

        :return str: the token

        :raises ConfigException: if token is unavailable

        """
        if self.navops_cli:
            try:
                return self._get_navops_token()
            except ConfigException:
                pass

        if self.token:
            return self.token

        raise ConfigException('Authentication required. Use "tortuga login".')

    def _get_navops_token(self) -> str:
        cmd = '{} token'.format(self.navops_cli)

        try:
            p = executeCommand(cmd)
        except Exception as ex:
            logger.info(str(ex))
            raise ConfigException(str(ex))

        if p.getExitStatus() != 0:
            raise ConfigException(p.getStdErr())

        return p.getStdOut().decode()
コード例 #10
0
    def createHardwareProfile(self,
                              session: Session,
                              hwProfileSpec: HardwareProfile,
                              settingsDict: Optional[Union[dict,
                                                           None]] = None):
        bUseDefaults = settingsDict['defaults'] \
            if settingsDict and 'defaults' in settingsDict else False

        osInfo = settingsDict['osInfo'] \
            if settingsDict and \
            settingsDict and 'osInfo' in settingsDict else None

        validation.validateProfileName(hwProfileSpec.getName())

        if hwProfileSpec.getDescription() is None:
            hwProfileSpec.setDescription('%s Nodes' %
                                         (hwProfileSpec.getName()))

        installerNode = self._nodeDbApi.getNode(session,
                                                ConfigManager().getInstaller(),
                                                {'softwareprofile': True})

        if bUseDefaults:
            if not hwProfileSpec.getNetworks():
                # No <network>...</network> entries found in the template,
                # use the default provisioning interface from the primary
                # installer.

                # Find first provisioning network and use it
                for nic in installerNode.getNics():
                    network = nic.getNetwork()
                    if network.getType() == 'provision':
                        # for now set the default interface to be index 0
                        # with the same device
                        networkDevice = fixNetworkDeviceName(
                            nic.getNetworkDevice().getName())

                        network.setNetworkDevice(
                            NetworkDevice(name=networkDevice))

                        hwProfileSpec.getNetworks().append(network)

                        break
                else:
                    raise NetworkNotFound(
                        'Unable to find provisioning network')
            else:
                # Ensure network device is defined
                installerNic = None

                for network in hwProfileSpec.getNetworks():
                    for installerNic in installerNode.getNics():
                        installerNetwork = installerNic.getNetwork()

                        if network.getId() and \
                           network.getId() == installerNetwork.getId():
                            break
                        elif network.getAddress() and \
                            network.getAddress() == \
                            installerNetwork.getAddress() and \
                            network.getNetmask() and \
                            network.getNetmask() == \
                                installerNetwork.getNetmask():
                            break
                    else:
                        # Unable to find network matching specification in
                        # template.

                        raise NetworkNotFound(
                            'Unable to find provisioning network [%s]' %
                            (network))

                    networkDevice = fixNetworkDeviceName(
                        installerNic.getNetworkDevice().getName())

                    network.setNetworkDevice(NetworkDevice(name=networkDevice))

        if not osInfo:
            osInfo = installerNode.getSoftwareProfile().getOsInfo()

        osObjFactory = osUtility.getOsObjectFactory(osInfo.getName())

        if not hwProfileSpec.getKernel():
            hwProfileSpec.setKernel(
                osObjFactory.getOsSysManager().getKernel(osInfo))

        if not hwProfileSpec.getInitrd():
            hwProfileSpec.setInitrd(
                osObjFactory.getOsSysManager().getInitrd(osInfo))

        self._hpDbApi.addHardwareProfile(session, hwProfileSpec)

        # Iterate over all networks in the newly defined hardware profile
        # and build assocations to provisioning NICs
        if bUseDefaults:
            for network in \
                [network for network in hwProfileSpec.getNetworks()
                 if network.getType() == 'provision']:
                # Get provisioning nic for network
                try:
                    provisioningNic = self.getProvisioningNicForNetwork(
                        session, network.getAddress(), network.getNetmask())
                except NicNotFound:
                    # There is currently no provisioning NIC defined for
                    # this network.  This is not a fatal error.
                    continue

                self.setProvisioningNic(session, hwProfileSpec.getName(),
                                        provisioningNic.getId())

        #
        # Fire the tags changed event for all creates that have tags
        #
        # Get the latest version from the db in case the create method
        # added some embellishments
        #
        hwp = self.getHardwareProfile(session, hwProfileSpec.getName())
        if hwp.getTags():
            HardwareProfileTagsChanged.fire(hardwareprofile_id=str(
                hwp.getId()),
                                            hardwareprofile_name=hwp.getName(),
                                            tags=hwp.getTags(),
                                            previous_tags={})
コード例 #11
0
ファイル: tortugaWsApi.py プロジェクト: ilumb/tortuga
class TortugaWsApi:
    """
    Base tortuga ws api class.
    """
    def __init__(self, username=None, password=None):
        self._logger = logging.getLogger('tortuga.wsapi.{0}'.format(
            self.__class__.__name__))
        self._logger.addHandler(logging.NullHandler())

        self._cm = ConfigManager()

        if username is None and password is None:
            self._logger.debug('[%s] Using built-in user credentials' %
                               (self.__module__))

            username = self._cm.getCfmUser()
            password = self._cm.getCfmPassword()

        self._username = username
        self._password = password
        self._sm = None

    def _getWsUrl(self, url):
        """Extract scheme and net location from provided url. Use defaults
        if none exist."""

        result = urlparse(url)

        scheme = result.scheme if result.scheme else \
            self._cm.getAdminScheme()

        netloc = result.netloc if result.netloc else \
            '{0}:{1}'.format(self._cm.getInstaller(), self._cm.getAdminPort())

        return '{0}://{1}'.format(scheme, netloc)

    def _getSessionManager(self):
        if not self._sm:
            self._sm = sessionManager.createSession()
        return self._sm

    def getLogger(self):
        """ Get logger for this class. """
        return self._logger

    def getConfigManager(self):
        """ Return configmanager reference """
        return self._cm

    def sendSessionRequest(self,
                           url,
                           method='GET',
                           contentType='application/json',
                           data='',
                           acceptType='application/json'):
        """
        Send authorized session request

        Raises:
            UserNotAuthorized
        """

        sm = self._getSessionManager()

        if not sm.hasSession():
            if self._username is None:
                raise UserNotAuthorized('Username not supplied')

            if self._password is None:
                raise UserNotAuthorized('Password not supplied')

            wsUrl = self._getWsUrl(url)

            # establishSession() sets the 'wsUrl' so the explicit call
            # to setHost() is not required
            sm.establishSession(wsUrl, self._username, self._password)

        return sm.sendRequest(url,
                              method,
                              contentType,
                              data,
                              acceptType=acceptType)

    def sendRequest(self,
                    url,
                    method='GET',
                    contentType='application/json',
                    data='',
                    acceptType='application/json'):
        """ Send unauthorized request. """

        sm = self._getSessionManager()

        # Because there's no call to establishSession(), explicitly call
        # setHost()
        sm.setHost(self._getWsUrl(url))

        return self._getSessionManager().sendRequest(url, method, contentType,
                                                     data, acceptType)
コード例 #12
0
 def __init__(self, osFamilyInfo):
     self._osFamilyInfo = osFamilyInfo
     self._logger = logging.getLogger('tortuga.os')
     self._logger.addHandler(logging.NullHandler())
     self._cm = ConfigManager()
コード例 #13
0
class OsBootHostManagerCommon(OsObjectManager):
    """Methods for manipulating PXE files"""
    def __init__(self):
        OsObjectManager.__init__(self)

        # Cache this for later
        try:
            self.passdata = pwd.getpwnam('apache')
        except KeyError:
            self.passdata = pwd.getpwnam(os.getenv('USER'))

        self._cm = ConfigManager()

    def deletePuppetNodeCert(self, nodeName):
        # Remove the Puppet certificate when the node is reinstalled

        self.getLogger().debug('deletePuppetNodeCert(node=[%s])' % (nodeName))

        puppetSslDir = '/etc/puppetlabs/puppet/ssl'
        puppetReportDir = '/var/lib/puppet/reports'
        puppetYamlDir = '/var/lib/puppet/yaml'

        filesToRemove = [
            os.path.join(puppetSslDir, 'public_keys/%s.pem' % (nodeName)),
            os.path.join(puppetSslDir, 'ca/signed/%s.pem' % (nodeName)),
            os.path.join(puppetSslDir, 'private_keys/%s.pem' % (nodeName)),
            os.path.join(puppetSslDir, 'certs/%s.pem' % (nodeName)),
            os.path.join(puppetYamlDir, 'node/%s.yaml' % (nodeName)),
            os.path.join(puppetYamlDir, 'facts/%s.yaml' % (nodeName)),
        ]

        for fn in filesToRemove:
            try:
                os.unlink(fn)
            except OSError as exc:
                if exc.errno != 2:
                    self.getLogger().error(
                        'Error attempting to remove %s (reason: %s)' %
                        (fn, exc))

        fn = os.path.join(puppetReportDir, nodeName)
        try:
            shutil.rmtree(fn)
        except OSError as exc:
            if exc.errno != 2:
                self.getLogger().error(
                    'Error attempting to remove %s (reason: %s)' % (fn, exc))

    def nodeCleanup(self, nodeName):
        """
        Remove files related to the node
        """

        # Remove 'private' directory
        private_dir = os.path.join(self._cm.getRoot(), 'private', nodeName)

        if os.path.exists(private_dir):
            shutil.rmtree(private_dir)

    def addDhcpLease(self, node, nic):
        # Add DHCP lease to DHCP server
        pass

    def removeDhcpLease(self, nodeName):
        # Remove the DHCP lease from the DHCP server.  This will be
        # a no-op on any platform that doesn't support the operation
        # (ie. any platform not running ISC DHCPD)
        pass

    def setNodeForNetworkBoot(self, dbNode):
        # Update node status to "Expired" and boot from network
        dbNode.state = 'Expired'
        dbNode.bootFrom = 0

        self.deletePuppetNodeCert(dbNode.name)

        # Write the updated file
        self.writePXEFile(dbNode)
コード例 #14
0
def main():
    cm = ConfigManager()

    p = argparse.ArgumentParser()

    p.add_argument('-f',
                   '--force',
                   dest='force',
                   action='store_true',
                   default=False)

    p.add_argument('name', help='Software profile name')
    p.add_argument('kit', help='Kit descriptor (NAME-VERSION-ITERATION)')
    p.add_argument('component', help='Component descriptor (NAME-VERSION)')

    args = p.parse_args()

    kitNameAndVersion, kitIteration = args.kit.rsplit('-', 1)
    kitName, kitVersion = kitNameAndVersion.split('-', 1)

    compName, _ = args.component.split('-', 2)

    flagFile = os.path.join(
        cm.getRoot(), 'var/run/actions/%s/component_%s_%s_post_install' %
        (args.name, args.kit, args.component))

    if os.path.exists(flagFile):
        if not args.force:
            sys.stderr.write(
                'post-install component action for [%s] already run\n' %
                (compName))
            sys.exit(0)

        # Remove the existing flag file, we're forcing a run
        os.unlink(flagFile)

    load_kits()
    kit_spec = (kitName, kitVersion, kitIteration)

    try:
        with DbManager().session() as session:
            kit_installer = get_kit_installer(kit_spec)()
            kit_installer.session = session
            c = kit_installer.get_component_installer(compName)
            if c is None:
                raise ComponentNotFound(
                    'Component [%s] not found in kit [%s]' %
                    (compName, kitName))

            c.run_action('post_install')

        logger.debug(
            'post_install component action run for [%s] from kit [%s]' %
            (args.component, args.kit))

        # Ensure destination directory exists
        if not os.path.exists(os.path.dirname(flagFile)):
            os.makedirs(os.path.dirname(flagFile))

        # touch flagFile
        open(flagFile, 'w').close()

    except Exception as exc:  # noqa pylint: disable=broad-except
        print('Error: {}'.format(exc), file=sys.stderr)
        sys.exit(0)
コード例 #15
0
 def __init__(self, osFamilyInfo: OsFamilyInfo) -> None:
     self._osFamilyInfo = osFamilyInfo
     self._logger = logging.getLogger(OS_NAMESPACE)
     self._cm = ConfigManager()
コード例 #16
0
ファイル: repoManager.py プロジェクト: tprestegard/tortuga
class RepoManager:
    """
    Class for repository management.
    """
    def __init__(self):
        """ Initialize repository manager instance. """
        self._logger = logging.getLogger(REPO_NAMESPACE)
        self._kitArchiveDir = None
        self._cm = ConfigManager()
        self._repoRoot = self._cm.getReposDir()
        self._repoMap = {}
        self.__configure()

    def __configureRepo(self, osInfo, localPath, remoteUrl=None):         \
            # pylint: disable=unused-argument
        """ Configure repo for a given OS"""
        repo = None
        osName = osInfo.getName()
        factoryModule = '%sObjectFactory' % osName
        factoryClass = '%sObjectFactory' % osName.capitalize()

        _temp = __import__('tortuga.os_utility.{0}'.format(factoryModule),
                           globals(), locals(), [factoryClass], 0)

        Factory = getattr(_temp, factoryClass)

        repo = Factory().getRepo(osInfo, localPath, remoteUrl)

        repo.createRoot()

        self._logger.debug('Configured repo [%s] for [%s]' % (repo, osInfo))

        return repo

    def __configure(self):
        """ Configure all repositories from the config file. """

        self._kitArchiveDir = self._cm.getKitDir()
        configFile = self._cm.getRepoConfigFile()

        if not os.path.exists(configFile):
            self._logger.debug('Repo configuration file [%s] not found' %
                               (configFile))
        else:
            self._logger.debug('Reading repo configuration file [%s]' %
                               (configFile))

        configParser = configparser.ConfigParser()
        configParser.read(configFile)

        try:
            osKeyList = configParser.sections()

            if osKeyList:
                self._logger.debug('Found OS sections [%s]' %
                                   (' '.join(osKeyList)))

            for osKey in osKeyList:
                self._logger.debug('Parsing OS section [%s]' % (osKey))

                osData = osKey.split('__')
                osName = osData[0]
                osVersion = osData[1]
                osArch = osData[2]
                osInfo = OsInfo(osName, osVersion, osArch)

                # Ignore all repos that weren't enabled
                bEnabled = True
                if configParser.has_option(osKey, 'enabled'):
                    value = configParser.get(osKey, 'enabled')
                    bEnabled = value.lower() == 'true'

                if not bEnabled:
                    self._logger.debug('Repo for [%s] is disabled' % (osInfo))

                    continue

                localPath = None
                if configParser.has_option(osKey, 'localPath'):
                    localPath = configParser.get(osKey, 'localPath', True)

                if not localPath:
                    localPath = self._repoRoot

                remoteUrl = None
                if configParser.has_option(osKey, 'remoteUrl'):
                    remoteUrl = configParser.get(osKey, 'remoteUrl', True)

                repo = self.__configureRepo(osInfo, localPath, remoteUrl)

                self._repoMap[osKey] = repo

            if not os.path.exists(self._kitArchiveDir):
                self._logger.debug('Creating kit archive directory [%s]' %
                                   (self._kitArchiveDir))

                os.makedirs(self._kitArchiveDir)

            osInfo = osUtility.getNativeOsInfo()

            repo = self.__configureRepo(osInfo, self._repoRoot)

            osKey = '%s__%s__%s' % (osInfo.getName(), osInfo.getVersion(),
                                    osInfo.getArch())

            # Configure repo for native os if there are no repos configured.
            if osKey not in self._repoMap:
                self._repoMap[osKey] = repo
        except ConfigurationError:
            raise
        except Exception as ex:
            raise ConfigurationError(exception=ex)

    def getRepo(self, osInfo=None):
        """ Return repo given os info. """

        if not osInfo:
            osInfo = osUtility.getNativeOsInfo()

        osKey = '%s__%s__%s' % (osInfo.getName(), osInfo.getVersion(),
                                osInfo.getArch())

        return self._repoMap.get(osKey)

    def getRepoList(self):
        """ Return all repos. """
        return list(self._repoMap.values())

    def getKitArchiveDir(self):
        """ Return kit archive directory. """
        return self._kitArchiveDir
コード例 #17
0
ファイル: actionManager.py プロジェクト: ilumb/tortuga
    def __init__(self):
        super(ActionManager, self).__init__()

        self._actionBase = "%s%s" % (ConfigManager().getRoot(),
                                     ConfigManager().getActionLog())
コード例 #18
0
class NodeManager(TortugaObjectManager):     \
        # pylint: disable=too-many-public-methods

    def __init__(self):
        super(NodeManager, self).__init__()

        self._nodeDbApi = NodeDbApi()
        self._hardwareProfileDbApi = HardwareProfileDbApi()
        self._cm = ConfigManager()
        self._san = san.San()

    def __validateHostName(self, hostname: str, name_format: str) -> NoReturn:
        """
        Raises:
            ConfigurationError
        """

        bWildcardNameFormat = (name_format == '*')

        if hostname and not bWildcardNameFormat:
            # Host name specified, but hardware profile does not
            # allow setting the host name
            raise ConfigurationError(
                'Hardware profile does not allow setting host names'
                ' of imported nodes')
        elif not hostname and bWildcardNameFormat:
            # Host name not specified but hardware profile expects it
            raise ConfigurationError(
                'Hardware profile requires host names to be set')

    def createNewNode(self,
                      session: Session,
                      addNodeRequest: dict,
                      dbHardwareProfile: HardwareProfiles,
                      dbSoftwareProfile: Optional[SoftwareProfiles] = None,
                      validateIp: bool = True,
                      bGenerateIp: bool = True,
                      dns_zone: Optional[str] = None) -> Nodes:
        """
        Convert the addNodeRequest into a Nodes object

        Raises:
            NicNotFound
        """

        self.getLogger().debug(
            'createNewNode(): session=[%s], addNodeRequest=[%s],'
            ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s],'
            ' validateIp=[%s], bGenerateIp=[%s]' %
            (id(session), addNodeRequest, dbHardwareProfile.name,
             dbSoftwareProfile.name if dbSoftwareProfile else '(none)',
             validateIp, bGenerateIp))

        # This is where the Nodes() object is first created.
        node = Nodes()

        # Set the default node state
        node.state = 'Discovered'

        if 'rack' in addNodeRequest:
            node.rack = addNodeRequest['rack']

        node.addHostSession = addNodeRequest['addHostSession']

        hostname = addNodeRequest['name'] \
            if 'name' in addNodeRequest else None

        # Ensure no conflicting options (ie. specifying host name for
        # hardware profile in which host names are generated)
        self.__validateHostName(hostname, dbHardwareProfile.nameFormat)

        node.name = hostname

        # Complete initialization of new node record
        nic_defs = addNodeRequest['nics'] \
            if 'nics' in addNodeRequest else []

        AddHostServerLocal().initializeNode(session,
                                            node,
                                            dbHardwareProfile,
                                            dbSoftwareProfile,
                                            nic_defs,
                                            bValidateIp=validateIp,
                                            bGenerateIp=bGenerateIp,
                                            dns_zone=dns_zone)

        # Set hardware profile of new node
        node.hardwareProfileId = dbHardwareProfile.id

        # Set software profile of new node; if the software profile is None,
        # attempt to set the software profile to the idle software profile
        # of the associated hardware profile. This may also be None, in
        # which case the software profile is undefined.
        node.softwareprofile = dbSoftwareProfile \
            if dbSoftwareProfile else dbHardwareProfile.idlesoftwareprofile

        node.isIdle = dbSoftwareProfile.isIdle \
            if dbSoftwareProfile else True

        # Return the new node
        return node

    def getNode(self, name, optionDict=None):
        """Get node by name"""

        optionDict_ = optionDict.copy() if optionDict else {}

        optionDict_.update({'hardwareprofile': True})

        node = self._nodeDbApi.getNode(name, optionDict_)

        hwprofile = self._hardwareProfileDbApi.getHardwareProfile(
            node.getHardwareProfile().getName(), {'resourceadapter': True})

        adapter_name = hwprofile.getResourceAdapter().getName() \
            if hwprofile.getResourceAdapter() else 'default'

        # Query vcpus from resource adapter
        ResourceAdapterClass = resourceAdapterFactory.getResourceAdapterClass(
            adapter_name)

        # Update Node object
        node.setVcpus(ResourceAdapterClass().get_node_vcpus(node.getName()))

        return node

    def getNodeById(self, nodeId, optionDict=None):
        """
        Get node by node id

        Raises:
            NodeNotFound
        """
        return self._nodeDbApi.getNodeById(int(nodeId), optionDict)

    def getNodeByIp(self, ip):
        """
        Get node by IP address

        Raises:
            NodeNotFound
        """

        return self._nodeDbApi.getNodeByIp(ip)

    def getNodeList(self, tags=None):
        """Return all nodes"""
        return self._nodeDbApi.getNodeList(tags=tags)

    def updateNode(self, nodeName, updateNodeRequest):
        self.getLogger().debug('updateNode(): name=[{0}]'.format(nodeName))

        session = DbManager().openSession()

        try:
            node = NodesDbHandler().getNode(session, nodeName)

            if 'nics' in updateNodeRequest:
                nic = updateNodeRequest['nics'][0]

                if 'ip' in nic:
                    node.nics[0].ip = nic['ip']
                    node.nics[0].boot = True

            # Call resource adapter
            NodesDbHandler().updateNode(session, node, updateNodeRequest)

            run_post_install = False

            if 'state' in updateNodeRequest:
                run_post_install = node.state == 'Allocated' and \
                    updateNodeRequest['state'] == 'Provisioned'

                node.state = updateNodeRequest['state']

            session.commit()

            if run_post_install:
                self.getLogger().debug(
                    'updateNode(): run-post-install for node [{0}]'.format(
                        node.name))

                self.__scheduleUpdate()
        except Exception:
            session.rollback()

            self.getLogger().exception(
                'Exception updating node [{0}]'.format(nodeName))
        finally:
            DbManager().closeSession()

    def updateNodeStatus(self, nodeName, state=None, bootFrom=None):
        """Update node status

        If neither 'state' nor 'bootFrom' are not None, this operation will
        update only the 'lastUpdated' timestamp.

        Returns:
            bool indicating whether state and/or bootFrom differed from
            current value
        """

        value = 'None' if bootFrom is None else \
            '1 (disk)' if int(bootFrom) == 1 else '0 (network)'

        self.getLogger().debug(
            'updateNodeStatus(): node=[%s], state=[%s], bootFrom=[%s]' %
            (nodeName, state, value))

        session = DbManager().openSession()

        try:
            node = NodesDbHandler().getNode(session, nodeName)

            result = self._updateNodeStatus(node,
                                            state=state,
                                            bootFrom=bootFrom)

            session.commit()

            return result
        finally:
            DbManager().closeSession()

    def _updateNodeStatus(self, dbNode, state=None, bootFrom=None):
        """
        Internal method which takes a 'Nodes' object instead of a node
        name.
        """

        result = NodesDbHandler().updateNodeStatus(dbNode, state, bootFrom)

        # Only change local boot configuration if the hardware profile is
        # not marked as 'remote' and we're not acting on the installer node.
        if dbNode.softwareprofile and \
                dbNode.softwareprofile.type != 'installer' and \
                dbNode.hardwareprofile.location not in \
                ('remote', 'remote-vpn'):
            osUtility.getOsObjectFactory().getOsBootHostManager().\
                writePXEFile(dbNode, localboot=bootFrom)

        return result

    def __process_nodeErrorDict(self, nodeErrorDict):
        result = {}
        nodes_deleted = []

        for key, nodeList in nodeErrorDict.items():
            result[key] = [dbNode.name for dbNode in nodeList]

            if key == 'NodesDeleted':
                for node in nodeList:
                    node_deleted = {
                        'name': node.name,
                        'hardwareprofile': node.hardwareprofile.name,
                        'addHostSession': node.addHostSession,
                    }

                    if node.softwareprofile:
                        node_deleted['softwareprofile'] = \
                            node.softwareprofile.name

                    nodes_deleted.append(node_deleted)

        return result, nodes_deleted

    def deleteNode(self, nodespec):
        """
        Delete node by nodespec

        Raises:
            NodeNotFound
        """

        installer_hostname = socket.getfqdn().split('.', 1)[0]

        session = DbManager().openSession()

        try:
            nodes = []

            for node in self.__expand_nodespec(session, nodespec):
                if node.name.split('.', 1)[0] == installer_hostname:
                    self.getLogger().info(
                        'Ignoring request to delete installer node'
                        ' ([{0}])'.format(node.name))

                    continue

                nodes.append(node)

            if not nodes:
                raise NodeNotFound('No nodes matching nodespec [%s]' %
                                   (nodespec))

            self.__preDeleteHost(nodes)

            nodeErrorDict = NodesDbHandler().deleteNode(session, nodes)

            # REALLY!?!? Convert a list of Nodes objects into a list of
            # node names so we can report the list back to the end-user.
            # This needs to be FIXED!

            result, nodes_deleted = self.__process_nodeErrorDict(nodeErrorDict)

            session.commit()

            # ============================================================
            # Perform actions *after* node deletion(s) have been committed
            # to database.
            # ============================================================

            self.__postDeleteHost(nodes_deleted)

            addHostSessions = set(
                [tmpnode['addHostSession'] for tmpnode in nodes_deleted])

            if addHostSessions:
                AddHostManager().delete_sessions(addHostSessions)

            bhm = osUtility.getOsObjectFactory().getOsBootHostManager()

            for nodeName in result['NodesDeleted']:
                # Remove the Puppet cert
                bhm.deletePuppetNodeCert(nodeName)

                bhm.nodeCleanup(nodeName)

                self.getLogger().info('Node [%s] deleted' % (nodeName))

            # Schedule a cluster update
            self.__scheduleUpdate()

            return result
        except TortugaException:
            session.rollback()

            raise
        except Exception:
            session.rollback()

            self.getLogger().exception('Exception in NodeManager.deleteNode()')

            raise
        finally:
            DbManager().closeSession()

    def __process_delete_node_result(self, nodeErrorDict):
        # REALLY!?!? Convert a list of Nodes objects into a list of
        # node names so we can report the list back to the end-user.
        # This needs to be FIXED!

        result = {}
        nodes_deleted = []

        for key, nodeList in nodeErrorDict.items():
            result[key] = [dbNode.name for dbNode in nodeList]

            if key == 'NodesDeleted':
                for node in nodeList:
                    node_deleted = {
                        'name': node.name,
                        'hardwareprofile': node.hardwareprofile.name,
                    }

                    if node.softwareprofile:
                        node_deleted['softwareprofile'] = \
                            node.softwareprofile.name

                    nodes_deleted.append(node_deleted)

        return result, nodes_deleted

    def __preDeleteHost(self, nodes):
        self.getLogger().debug('__preDeleteHost(): nodes=[%s]' %
                               (' '.join([node.name for node in nodes])))

        if not nodes:
            self.getLogger().debug('No nodes deleted in this operation')

            return

        kitmgr = KitActionsManager()

        for node in nodes:
            kitmgr.pre_delete_host(
                node.hardwareprofile.name,
                node.softwareprofile.name if node.softwareprofile else None,
                nodes=[node.name])

    def __postDeleteHost(self, nodes_deleted):
        # 'nodes_deleted' is a list of dicts of the following format:
        #
        # {
        #     'name': 'compute-01',
        #     'softwareprofile': 'Compute',
        #     'hardwareprofile': 'LocalIron',
        # }
        #
        # if the node does not have an associated software profile, the
        # dict does not contain the key 'softwareprofile'.

        self.getLogger().debug('__postDeleteHost(): nodes_deleted=[%s]' %
                               (nodes_deleted))

        if not nodes_deleted:
            self.getLogger().debug('No nodes deleted in this operation')

            return

        kitmgr = KitActionsManager()

        for node_dict in nodes_deleted:
            kitmgr.post_delete_host(node_dict['hardwareprofile'],
                                    node_dict['softwareprofile'] if
                                    'softwareprofile' in node_dict else None,
                                    nodes=[node_dict['name']])

    def __scheduleUpdate(self):
        tortugaSubprocess.executeCommand(
            os.path.join(self._cm.getRoot(), 'bin/schedule-update'))

    def getInstallerNode(self, optionDict=None):
        return self._nodeDbApi.getNode(self._cm.getInstaller(),
                                       optionDict=optionDict)

    def getProvisioningInfo(self, nodeName):
        return self._nodeDbApi.getProvisioningInfo(nodeName)

    def getKickstartFile(self, node, hardwareprofile, softwareprofile):
        """
        Generate kickstart file for specified node

        Raises:
            OsNotSupported
        """

        osFamilyName = softwareprofile.os.family.name

        try:
            osSupportModule = __import__('tortuga.os.%s.osSupport' %
                                         (osFamilyName),
                                         fromlist=['OSSupport'])
        except ImportError:
            raise OsNotSupported('Operating system family [%s] not supported' %
                                 (osFamilyName))

        OSSupport = osSupportModule.OSSupport

        tmpOsFamilyInfo = OsFamilyInfo(softwareprofile.os.family.name,
                                       softwareprofile.os.family.version,
                                       softwareprofile.os.family.arch)

        return OSSupport(tmpOsFamilyInfo).getKickstartFileContents(
            node, hardwareprofile, softwareprofile)

    def __transferNodeCommon(self, session, dbDstSoftwareProfile, results):         \
            # pylint: disable=no-self-use

        # Aggregate list of transferred nodes based on hardware profile
        # to call resource adapter minimal number of times.

        hwProfileMap = {}

        for transferResultDict in results:
            dbNode = transferResultDict['node']
            dbHardwareProfile = dbNode.hardwareprofile

            if dbHardwareProfile not in hwProfileMap:
                hwProfileMap[dbHardwareProfile] = [transferResultDict]
            else:
                hwProfileMap[dbHardwareProfile].append(transferResultDict)

        session.commit()

        nodeTransferDict = {}

        # Kill two birds with one stone... do the resource adapter
        # action as well as populate the nodeTransferDict. This saves
        # having to iterate twice on the same result data.
        for dbHardwareProfile, nodesDict in hwProfileMap.items():
            adapter = resourceAdapterFactory.getApi(
                dbHardwareProfile.resourceadapter.name)

            dbNodeTuples = []

            for nodeDict in nodesDict:
                dbNode = nodeDict['node']
                dbSrcSoftwareProfile = nodeDict['prev_softwareprofile']

                if dbSrcSoftwareProfile.name not in nodeTransferDict:
                    nodeTransferDict[dbSrcSoftwareProfile.name] = {
                        'added': [],
                        'removed': [dbNode],
                    }
                else:
                    nodeTransferDict[dbSrcSoftwareProfile.name]['removed'].\
                        append(dbNode)

                if dbDstSoftwareProfile.name not in nodeTransferDict:
                    nodeTransferDict[dbDstSoftwareProfile.name] = {
                        'added': [dbNode],
                        'removed': [],
                    }
                else:
                    nodeTransferDict[dbDstSoftwareProfile.name]['added'].\
                        append(dbNode)

                # The destination software profile is available through
                # node relationship.
                dbNodeTuples.append((dbNode, dbSrcSoftwareProfile))

            adapter.transferNode(dbNodeTuples, dbDstSoftwareProfile)

            session.commit()

        # Now call the 'refresh' action to all participatory components
        KitActionsManager().refresh(nodeTransferDict)

        return results

    def transferNode(self, nodespec, dstSoftwareProfileName, bForce=False):
        """
        Transfer nodes defined by 'nodespec' to 'dstSoftwareProfile'

        Raises:
            NodeNotFound
            SoftwareProfileNotFound
            NodeTransferNotValid
        """

        session = DbManager().openSession()

        try:
            nodes = self.__expand_nodespec(session, nodespec)

            if not nodes:
                raise NodeNotFound('No nodes matching nodespec [%s]' %
                                   (nodespec))

            dbDstSoftwareProfile = SoftwareProfilesDbHandler().\
                getSoftwareProfile(session, dstSoftwareProfileName)

            results = NodesDbHandler().transferNode(session,
                                                    nodes,
                                                    dbDstSoftwareProfile,
                                                    bForce=bForce)

            return self.__transferNodeCommon(session, dbDstSoftwareProfile,
                                             results)
        finally:
            DbManager().closeSession()

    def transferNodes(self,
                      srcSoftwareProfileName,
                      dstSoftwareProfileName,
                      count,
                      bForce=False):
        """
        Transfer 'count' nodes from 'srcSoftwareProfile' to
        'dstSoftwareProfile'

        Raises:
            SoftwareProfileNotFound
        """

        session = DbManager().openSession()

        try:
            # It is not necessary to specify a source software profile. If
            # not specified, pick any eligible nodes in the hardware profile
            # mapped to the destination software profile. Don't ask me who
            # uses this capability, but it's here if you need it...

            dbSrcSoftwareProfile = SoftwareProfilesDbHandler().\
                getSoftwareProfile(
                    session, srcSoftwareProfileName) \
                if srcSoftwareProfileName else None

            dbDstSoftwareProfile = SoftwareProfilesDbHandler().\
                getSoftwareProfile(session, dstSoftwareProfileName)

            results = NodesDbHandler().transferNodes(session,
                                                     dbSrcSoftwareProfile,
                                                     dbDstSoftwareProfile,
                                                     int(float(count)),
                                                     bForce=bForce)

            return self.__transferNodeCommon(session, dbDstSoftwareProfile,
                                             results)
        finally:
            DbManager().closeSession()

    def idleNode(self, nodespec):
        """
        Raises:
            NodeNotFound
        """

        session = DbManager().openSession()

        try:
            nodes = self.__expand_nodespec(session, nodespec)

            if not nodes:
                raise NodeNotFound('No nodes matching nodespec [%s]' %
                                   (nodespec))

            result = NodesDbHandler().idleNode(session, nodes)

            # Convert list of Nodes to list of node names for providing
            # user feedback.

            result_dict = {}
            for key, dbNodes in result.items():
                result_dict[key] = [dbNode.name for dbNode in dbNodes]

            session.commit()

            # Remove Puppet certificate(s) for idled node(s)
            for node_name in result_dict['success']:
                # Remove Puppet certificate for idled node
                bhm = osUtility.getOsObjectFactory().getOsBootHostManager()
                bhm.deletePuppetNodeCert(node_name)

            # Schedule a cluster update
            self.__scheduleUpdate()

            return result_dict
        except TortugaException as ex:
            session.rollback()

            raise
        except Exception as ex:
            session.rollback()

            self.getLogger().exception('[%s] %s' %
                                       (self.__class__.__name__, ex))

            raise
        finally:
            DbManager().closeSession()

    def __process_activateNode_results(self, tmp_results, dstswprofilename):
        results = {}

        for key, values in tmp_results.items():
            # With the exception of the "ProfileMappingNotAllowed" dict
            # item, all items in the dict are lists of nodes.
            if key != 'ProfileMappingNotAllowed':
                results[key] = [dbNode.name for dbNode in values]
            else:
                results[key] = \
                    [(value[0].name, value[1], value[2])
                     for value in values]

        if tmp_results['success']:
            # Iterate over activated nodes, creating dict keyed on
            # 'addHostSession'
            addHostSessions = {}

            for node in tmp_results['success']:
                if node.addHostSession not in addHostSessions:
                    addHostSessions[node.addHostSession] = []

                addHostSessions[node.addHostSession] = \
                    node.hardwareprofile.name

            # For each 'addHostSession', call postAddHost()
            for addHostSession, hwprofile in addHostSessions.items():
                AddHostManager().postAddHost(hwprofile, dstswprofilename,
                                             addHostSession)

        return results

    def activateNode(self, nodespec, softwareProfileName):
        """
        Raises:
            SoftwareProfileNotFound
            NodeNotFound
            TortugaException
        """

        session = DbManager().openSession()

        try:
            dbSoftwareProfile = SoftwareProfilesDbHandler().\
                getSoftwareProfile(session, softwareProfileName) \
                if softwareProfileName else None

            dbNodes = self.__expand_nodespec(session, nodespec)

            if not dbNodes:
                raise NodeNotFound('No nodes matching nodespec [%s]' %
                                   (nodespec))

            tmp_results = NodesDbHandler().activateNode(
                session, dbNodes, dbSoftwareProfile)

            results = self.__process_activateNode_results(
                tmp_results, softwareProfileName)

            session.commit()

            # Schedule a cluster update
            self.__scheduleUpdate()

            return results
        except TortugaException as ex:
            session.rollback()
            raise
        except Exception as ex:
            session.rollback()
            self.getLogger().exception('%s' % ex)
            raise
        finally:
            DbManager().closeSession()

    def startupNode(self, nodespec, remainingNodeList=None, bootMethod='n'):
        """
        Raises:
            NodeNotFound
        """

        return self._nodeDbApi.startupNode(nodespec,
                                           remainingNodeList=remainingNodeList
                                           or [],
                                           bootMethod=bootMethod)

    def shutdownNode(self, nodespec, bSoftShutdown=False):
        """
        Raises:
            NodeNotFound
        """

        return self._nodeDbApi.shutdownNode(nodespec, bSoftShutdown)

    def build_node_filterspec(self, nodespec):
        filter_spec = []

        for nodespec_token in nodespec.split(','):
            # Convert shell-style wildcards into SQL wildcards
            if '*' in nodespec_token or '?' in nodespec_token:
                filter_spec.append(
                    nodespec_token.replace('*', '%').replace('?', '_'))

                continue

            if '.' not in nodespec_token:
                filter_spec.append(nodespec_token)
                filter_spec.append(nodespec_token + '.%')

                continue

            # Add nodespec "AS IS"
            filter_spec.append(nodespec_token)

        return filter_spec

    def __expand_nodespec(self, session, nodespec):         \
            # pylint: disable=no-self-use

        # Expand wildcards in nodespec. Each token in the nodespec can
        # be wildcard that expands into one or more nodes.

        return NodesDbHandler().getNodesByNameFilter(
            session, self.build_node_filterspec(nodespec))

    def rebootNode(self, nodespec, bSoftReset=False, bReinstall=False):
        """
        Raises:
            NodeNotFound
        """

        session = DbManager().openSession()

        try:
            nodes = self.__expand_nodespec(session, nodespec)
            if not nodes:
                raise NodeNotFound('No nodes matching nodespec [%s]' %
                                   (nodespec))

            bhm = osUtility.getOsObjectFactory().getOsBootHostManager()

            if bReinstall:
                for dbNode in nodes:
                    bhm.setNodeForNetworkBoot(dbNode)

            results = NodesDbHandler().rebootNode(session, nodes, bSoftReset)

            session.commit()

            return results
        finally:
            DbManager().closeSession()

    def checkpointNode(self, nodeName):
        return self._nodeDbApi.checkpointNode(nodeName)

    def revertNodeToCheckpoint(self, nodeName):
        return self._nodeDbApi.revertNodeToCheckpoint(nodeName)

    def migrateNode(self, nodeName, remainingNodeList, liveMigrate):
        return self._nodeDbApi.migrateNode(nodeName, remainingNodeList,
                                           liveMigrate)

    def evacuateChildren(self, nodeName):
        self._nodeDbApi.evacuateChildren(nodeName)

    def getChildrenList(self, nodeName):
        return self._nodeDbApi.getChildrenList(nodeName)

    def setParentNode(self, nodeName, parentNodeName):
        self._nodeDbApi.setParentNode(nodeName, parentNodeName)

    def addStorageVolume(self, nodeName, volume, isDirect="DEFAULT"):
        """
        Raises:
            VolumeDoesNotExist
            UnsupportedOperation
        """

        node = self.getNode(nodeName, {'hardwareprofile': True})

        # Only allow persistent volumes to be attached...
        vol = self._san.getVolume(volume)
        if vol is None:
            raise VolumeDoesNotExist('Volume [%s] does not exist' % (volume))

        if not vol.getPersistent():
            raise UnsupportedOperation(
                'Only persistent volumes can be attached')

        api = resourceAdapterFactory.getApi(
            node.getHardwareProfile().getResourceAdapter().getName())

        if isDirect == "DEFAULT":
            return api.addVolumeToNode(node, volume)

        return api.addVolumeToNode(node, volume, isDirect)

    def removeStorageVolume(self, nodeName, volume):
        """
        Raises:
            VolumeDoesNotExist
            UnsupportedOperation
        """

        node = self.getNode(nodeName, {'hardwareprofile': True})

        api = resourceAdapterFactory.getApi(
            node.getHardwareProfile().getResourceAdapter().getName())

        vol = self._san.getVolume(volume)

        if vol is None:
            raise VolumeDoesNotExist('The volume [%s] does not exist' %
                                     (volume))

        if not vol.getPersistent():
            raise UnsupportedOperation(
                'Only persistent volumes can be detached')

        return api.removeVolumeFromNode(node, volume)

    def getStorageVolumes(self, nodeName):
        return self._san.getNodeVolumes(self.getNode(nodeName).getName())

    def getNodesByNodeState(self, state):
        return self._nodeDbApi.getNodesByNodeState(state)

    def getNodesByNameFilter(self, _filter):
        return self._nodeDbApi.getNodesByNameFilter(_filter)
コード例 #19
0
ファイル: osSupportBase.py プロジェクト: joedborg/tortuga
 def __init__(self, osFamilyInfo: OsFamilyInfo) -> None:
     self._osFamilyInfo = osFamilyInfo
     self._logger = logging.getLogger('tortuga.os')
     self._cm = ConfigManager()
コード例 #20
0
ファイル: osObjectManager.py プロジェクト: ilumb/tortuga
 def __init__(self):
     self._logger = logging.\
         getLogger('tortuga.%s' % (self.__class__.__name__))
     self._logger.addHandler(logging.NullHandler())
     self._cm = ConfigManager()
コード例 #21
0
class TortugaCli(metaclass=ABCMeta):
    """
    Base tortuga command line interface class.
    """
    def __init__(self, validArgCount=0):
        self._logger = logging.getLogger(CLI_NAMESPACE)

        self._parser = argparse.ArgumentParser()
        self._args = []
        self._validArgCount = validArgCount
        self._url = None
        self._username = None
        self._password = None
        self._verify = True
        self._optionGroupDict = {}
        self._cm = ConfigManager()

        self.__initializeLocale()

    def __initializeLocale(self):
        """Initialize the gettext domain """
        langdomain = 'tortugaStrings'

        # Locate the Internationalization stuff
        localedir = '../share/locale' \
            if os.path.exists('../share/locale') else \
            os.path.join(self._cm.getRoot(), 'share/locale')

        gettext.install(langdomain, localedir)

    def getParser(self):
        """ Get parser for this class. """
        return self._parser

    def addOption(self, *args, **kwargs):
        """ Add option. """
        self._parser.add_argument(*args, **kwargs)

    def addOptionToGroup(self, groupName, *args, **kwargs):
        """
        Add option for the given group name.
        Group should be created using addOptionGroup().
        """
        group = self._optionGroupDict.get(groupName)
        group.add_argument(*args, **kwargs)

    def addOptionGroup(self, groupName, desc):
        """ Add option group. """
        group = self._parser.add_argument_group(groupName, desc)
        self._optionGroupDict[groupName] = group
        return group

    def parseArgs(self, usage=None):
        """
        Parse args

        Raises:
            InvalidArgument
        """
        common_group = _('Common Tortuga Options')
        self.addOptionGroup(common_group, None)

        self.addOptionToGroup(common_group,
                              '-V',
                              action='store_true',
                              dest='cmdVersion',
                              default=False,
                              help=_('print version and exit'))

        self.addOptionToGroup(common_group,
                              '-d',
                              '--debug',
                              dest='consoleLogLevel',
                              default='warning',
                              help=_('set debug level; valid values are: '
                                     'critical, error, warning, info, debug'))

        self.addOptionToGroup(common_group,
                              '--url',
                              help=_('Tortuga web service URL'))

        self.addOptionToGroup(common_group,
                              '--username',
                              dest='username',
                              help=_('Tortuga web service user name'))

        self.addOptionToGroup(common_group,
                              '--password',
                              dest='password',
                              help=_('Tortuga web service password'))

        self.addOptionToGroup(common_group,
                              '--no-verify',
                              dest='verify',
                              action='store_false',
                              default=True,
                              help=_("Don't verify the API SSL certificate"))

        if usage:
            self._parser.description = usage

        try:
            self._args = self._parser.parse_args()
        except SystemExit as rc:
            sys.stdout.flush()
            sys.stderr.flush()
            sys.exit(int(str(rc)))

        if self._args.cmdVersion:
            print(
                _('{0} version: {1}'.format(os.path.basename(sys.argv[0]),
                                            self._cm.getTortugaRelease())))
            sys.exit(0)

        self._setup_logging(self._args.consoleLogLevel)

        self._url, self._username, self._password, self._verify = \
            self._get_web_service_options()

        return self._args

    def _setup_logging(self, log_level_name: str):
        """
        Setup logging for the specified log level.

        :param str log_level_name: the name of the log level to use

        """
        log_level_name = log_level_name.upper()
        if log_level_name not in [
                'CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'
        ]:
            print('Invalid debug level: {}'.format(log_level_name))
            sys.exit(0)

        log_level = getattr(logging, log_level_name)

        logger = logging.getLogger(ROOT_NAMESPACE)
        logger.setLevel(log_level)

        ch = logging.StreamHandler()
        ch.setLevel(log_level)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        ch.setFormatter(formatter)

        logger.addHandler(ch)

    def _get_web_service_options(self):
        """
        Read Tortuga web service credentials from config file, environment,
        or command-line. Command-line overrides either config file or
        environment.

        :return: tuple of (url, username, password)
        """
        username = password = url = None

        cfg_file = os.path.join(os.path.expanduser('~'), '.local', 'tortuga',
                                'credentials')

        if os.path.exists(cfg_file):
            cfg = configparser.ConfigParser()

            cfg.read(cfg_file)

            username = cfg.get('default', 'username') \
                if cfg.has_section('default') and \
                cfg.has_option('default', 'username') else None

            password = cfg.get('default', 'password') \
                if cfg.has_section('default') and \
                cfg.has_option('default', 'password') else None

            url = cfg.get('default', 'url') \
                if cfg.has_section('default') and \
                cfg.has_option('default', 'url') else None

        # TORTUGA_WS_URL
        if self._args.url:
            # Command-line "--server" argument overrides env var and
            # setting contained within '/etc/profile.nii'
            url = self._args.url
        elif os.getenv('TORTUGA_WS_URL'):
            url = os.getenv('TORTUGA_WS_URL')

        # TORTUGA_WS_USERNAME
        if self._args.username:
            username = self._args.username
        elif os.getenv('TORTUGA_WS_USERNAME'):
            username = os.getenv('TORTUGA_WS_USERNAME')

        # TORTUGA_WS_PASSWORD
        if self._args.password:
            password = self._args.password
        elif os.getenv('TORTUGA_WS_PASSWORD'):
            password = os.getenv('TORTUGA_WS_PASSWORD')

        #
        # CLI arguments should override the environment variable
        #
        if os.getenv('TORTUGA_WS_NO_VERIFY'):
            verify = False
        else:
            verify = self._args.verify

        return url, username, password, verify

    def usage(self, s=None):
        """
        Print usage information
        """

        if s:
            sys.stderr.write(_('Error: {0}').format(s) + '\n')

        self._parser.print_help()

        sys.exit(1)

    def getArgs(self):
        '''Returns the command line argument list'''
        return self._args

    def getUrl(self):
        return self._url

    def getUsername(self):
        """ Get user name. """
        return self._username

    def getPassword(self):
        """ Get password. """
        return self._password

    @abstractmethod
    def runCommand(self):         \
            # pylint: disable=no-self-use
        """
        This method must be implemented by the derived class.
        """

    def run(self):
        """
        Invoke runCommand() in derivative class and handle exceptions.
        """
        try:
            self.runCommand()
        except TortugaException as ex:
            print(ex.getErrorMessage())
            raise SystemExit(ex.getErrorCode())
        except SystemExit:
            raise
        except Exception as ex:
            print(str(ex))
            raise SystemExit(-1)

    def _parseDiskSize(self, diskSizeParam):         \
            # pylint: disable=no-self-use
        """
        Parses diskSizeParam, returns an int value representing
        number of megabytes

        Raises:
            ValueError
        """
        if diskSizeParam.endswith('TB'):
            return int(float(diskSizeParam[:-2]) * 1000000)

        if diskSizeParam.endswith('GB'):
            return int(float(diskSizeParam[:-2]) * 1000)
        elif diskSizeParam.endswith('MB'):
            # Must be an integer
            return int(diskSizeParam[:-2])

        return int(diskSizeParam)

    def _getDiskSizeDisplayStr(self, volSize):         \
            # pylint: disable=no-self-use

        if volSize < 1000:
            result = '%s MB' % (volSize)
        elif volSize < 1000000:
            result = '%.3f GB' % (float(volSize) / 1000)
        else:
            result = '%.3f TB' % (float(volSize) / 1000000)

        return result
コード例 #22
0
class NodeManager(TortugaObjectManager):     \
        # pylint: disable=too-many-public-methods

    def __init__(self):
        super(NodeManager, self).__init__()

        self._nodeDbApi = NodeDbApi()
        self._cm = ConfigManager()
        self._bhm = osUtility.getOsObjectFactory().getOsBootHostManager(
            self._cm)
        self._syncApi = SyncApi()
        self._nodesDbHandler = NodesDbHandler()
        self._addHostManager = AddHostManager()
        self._logger = logging.getLogger(NODE_NAMESPACE)

    def __validateHostName(self, hostname: str, name_format: str) -> None:
        """
        Raises:
            ConfigurationError
        """

        bWildcardNameFormat = (name_format == '*')

        if hostname and not bWildcardNameFormat:
            # Host name specified, but hardware profile does not
            # allow setting the host name
            raise ConfigurationError(
                'Hardware profile does not allow setting host names'
                ' of imported nodes')
        elif not hostname and bWildcardNameFormat:
            # Host name not specified but hardware profile expects it
            raise ConfigurationError(
                'Hardware profile requires host names to be set')

    def createNewNode(self,
                      session: Session,
                      addNodeRequest: dict,
                      dbHardwareProfile: HardwareProfileModel,
                      dbSoftwareProfile: Optional[SoftwareProfileModel] = None,
                      validateIp: bool = True,
                      bGenerateIp: bool = True,
                      dns_zone: Optional[str] = None) -> NodeModel:
        """
        Convert the addNodeRequest into a Nodes object

        Raises:
            NicNotFound
        """

        self._logger.debug(
            'createNewNode(): session=[%s], addNodeRequest=[%s],'
            ' dbHardwareProfile=[%s], dbSoftwareProfile=[%s],'
            ' validateIp=[%s], bGenerateIp=[%s]' %
            (id(session), addNodeRequest, dbHardwareProfile.name,
             dbSoftwareProfile.name if dbSoftwareProfile else '(none)',
             validateIp, bGenerateIp))

        hostname = addNodeRequest['name'] \
            if 'name' in addNodeRequest else None

        # Ensure no conflicting options (ie. specifying host name for
        # hardware profile in which host names are generated)
        self.__validateHostName(hostname, dbHardwareProfile.nameFormat)

        node = NodeModel(name=hostname)

        if 'rack' in addNodeRequest:
            node.rack = addNodeRequest['rack']

        node.addHostSession = addNodeRequest['addHostSession']

        # Complete initialization of new node record
        nic_defs = addNodeRequest['nics'] \
            if 'nics' in addNodeRequest else []

        AddHostServerLocal().initializeNode(session,
                                            node,
                                            dbHardwareProfile,
                                            dbSoftwareProfile,
                                            nic_defs,
                                            bValidateIp=validateIp,
                                            bGenerateIp=bGenerateIp,
                                            dns_zone=dns_zone)

        # Set hardware profile of new node
        node.hardwareprofile = dbHardwareProfile

        node.softwareprofile = dbSoftwareProfile

        # Return the new node
        return node

    def getNode(self, session: Session, name, optionDict: OptionDict = None) \
            -> Node:
        """
        Get node by name

        Raises:
            NodeNotFound
        """

        return self.__populate_nodes(session, [
            self._nodeDbApi.getNode(
                session, name, optionDict=get_default_relations(optionDict))
        ])[0]

    def getNodeById(self,
                    session: Session,
                    nodeId: int,
                    optionDict: OptionDict = None) -> Node:
        """
        Get node by node id

        Raises:
            NodeNotFound
        """

        return self.__populate_nodes(session, [
            self._nodeDbApi.getNodeById(
                session,
                int(nodeId),
                optionDict=get_default_relations(optionDict))
        ])[0]

    def getNodeByIp(self,
                    session: Session,
                    ip: str,
                    optionDict: Dict[str, bool] = None) -> Node:
        """
        Get node by IP address

        Raises:
            NodeNotFound
        """

        return self.__populate_nodes(session, [
            self._nodeDbApi.getNodeByIp(
                session, ip, optionDict=get_default_relations(optionDict))
        ])[0]

    def getNodeList(self,
                    session,
                    tags=None,
                    optionDict: Optional[OptionDict] = None) -> List[Node]:
        """
        Return all nodes

        """
        return self.__populate_nodes(
            session,
            self._nodeDbApi.getNodeList(
                session,
                tags=tags,
                optionDict=get_default_relations(optionDict)))

    def __populate_nodes(self, session: Session,
                         nodes: List[Node]) -> List[Node]:
        """
        Expand non-database fields in Node objects

        """
        class SoftwareProfileMetadataCache(defaultdict):
            def __missing__(self, key):
                metadata = \
                    SoftwareProfileManager().get_software_profile_metadata(
                        session, key
                    )

                self[key] = metadata

                return metadata

        swprofile_map = SoftwareProfileMetadataCache()

        for node in nodes:
            if not node.getSoftwareProfile():
                continue

            node.getSoftwareProfile().setMetadata(
                swprofile_map[node.getSoftwareProfile().getName()])

        return nodes

    def updateNode(self, session: Session, nodeName: str,
                   updateNodeRequest: dict) -> None:
        """
        Calls updateNode() method of resource adapter
        """

        self._logger.debug('updateNode(): name=[{0}]'.format(nodeName))

        try:
            node = self._nodesDbHandler.getNode(session, nodeName)

            if 'nics' in updateNodeRequest:
                nic = updateNodeRequest['nics'][0]

                if 'ip' in nic:
                    node.nics[0].ip = nic['ip']
                    node.nics[0].boot = True

            # Call resource adapter
            # self._nodesDbHandler.updateNode(session, node, updateNodeRequest)

            adapter = self.__getResourceAdapter(node.hardwareprofile)

            adapter.updateNode(session, node, updateNodeRequest)

            run_post_install = False

            #
            # Capture previous state and node data as dict for firing the
            # event later on
            #
            previous_state = node.state
            node_dict = Node.getFromDbDict(node.__dict__).getCleanDict()

            if 'state' in updateNodeRequest:
                run_post_install = \
                    node.state == state.NODE_STATE_ALLOCATED and \
                    updateNodeRequest['state'] == state.NODE_STATE_PROVISIONED

                node.state = updateNodeRequest['state']
                node_dict['state'] = updateNodeRequest['state']

            session.commit()

            #
            # If the node state has changed, then fire the node state changed
            # event
            #
            if node_dict['state'] != previous_state:
                NodeStateChanged.fire(node=node_dict,
                                      previous_state=previous_state)

            if run_post_install:
                self._logger.debug(
                    'updateNode(): run-post-install for node [{0}]'.format(
                        node.name))

                self.__scheduleUpdate()
        except Exception:
            session.rollback()

            raise

    def updateNodeStatus(self,
                         session: Session,
                         nodeName: str,
                         node_state: Optional[str] = None,
                         bootFrom: int = None):
        """Update node status

        If neither 'state' nor 'bootFrom' are not None, this operation will
        update only the 'lastUpdated' timestamp.

        Returns:
            bool indicating whether state and/or bootFrom differed from
            current value
        """

        value = 'None' if bootFrom is None else \
            '1 (disk)' if int(bootFrom) == 1 else '0 (network)'

        self._logger.debug(
            'updateNodeStatus(): node=[%s], node_state=[{%s}],'
            ' bootFrom=[{%s}]', nodeName, node_state, value)

        dbNode = self._nodesDbHandler.getNode(session, nodeName)

        #
        # Capture previous state and node data in dict form for the
        # event later on
        #
        previous_state = dbNode.state
        node_dict = Node.getFromDbDict(dbNode.__dict__).getCleanDict()

        # Bitfield representing node changes (0 = state change,
        # 1 = bootFrom # change)
        changed = 0

        if node_state is not None and node_state != dbNode.state:
            # 'state' changed
            changed |= 1

        if bootFrom is not None and bootFrom != dbNode.bootFrom:
            # 'bootFrom' changed
            changed |= 2

        if changed:
            # Create custom log message
            msg = 'Node [%s] state change:' % (dbNode.name)

            if changed & 1:
                msg += ' state: [%s] -> [%s]' % (dbNode.state, node_state)

                dbNode.state = node_state
                node_dict['state'] = node_state

            if changed & 2:
                msg += ' bootFrom: [%d] -> [%d]' % (dbNode.bootFrom, bootFrom)

                dbNode.bootFrom = bootFrom

            self._logger.info(msg)
        else:
            self._logger.info('Updated timestamp for node [%s]' %
                              (dbNode.name))

        dbNode.lastUpdate = time.strftime('%Y-%m-%d %H:%M:%S',
                                          time.localtime(time.time()))

        result = bool(changed)

        # Only change local boot configuration if the hardware profile is
        # not marked as 'remote' and we're not acting on the installer
        # node.
        if dbNode.softwareprofile and \
                dbNode.softwareprofile.type != 'installer' and \
                dbNode.hardwareprofile.location != 'remote':
            # update local boot configuration for on-premise nodes
            self._bhm.writePXEFile(session, dbNode, localboot=bootFrom)

        session.commit()

        #
        # If the node state has changed, fire the node state changed
        # event
        #
        if state and (previous_state != state):
            NodeStateChanged.fire(node=node_dict,
                                  previous_state=previous_state)

        return result

    def __process_nodeErrorDict(self, nodeErrorDict):
        result = {}
        nodes_deleted = []

        for key, nodeList in nodeErrorDict.items():
            result[key] = [dbNode.name for dbNode in nodeList]

            if key == 'NodesDeleted':
                for node in nodeList:
                    node_deleted = {
                        'name': node.name,
                        'hardwareprofile': node.hardwareprofile.name,
                        'addHostSession': node.addHostSession,
                    }

                    if node.softwareprofile:
                        node_deleted['softwareprofile'] = \
                            node.softwareprofile.name

                    nodes_deleted.append(node_deleted)

        return result, nodes_deleted

    def deleteNode(self, session, nodespec: str, force: bool = False):
        """
        Delete node by nodespec

        Raises:
            NodeNotFound
        """

        kitmgr = KitActionsManager()
        kitmgr.session = session

        try:
            nodes = self._nodesDbHandler.expand_nodespec(
                session, nodespec, include_installer=False)
            if not nodes:
                raise NodeNotFound('No nodes matching nodespec [%s]' %
                                   (nodespec))

            self.__validate_delete_nodes_request(nodes, force)

            self.__preDeleteHost(kitmgr, nodes)

            nodeErrorDict = self.__delete_node(session, nodes)

            # REALLY!?!? Convert a list of Nodes objects into a list of
            # node names so we can report the list back to the end-user.
            # This needs to be FIXED!

            result, nodes_deleted = self.__process_nodeErrorDict(nodeErrorDict)

            session.commit()

            # ============================================================
            # Perform actions *after* node deletion(s) have been committed
            # to database.
            # ============================================================

            self.__postDeleteHost(kitmgr, nodes_deleted)

            addHostSessions = set(
                [tmpnode['addHostSession'] for tmpnode in nodes_deleted])

            if addHostSessions:
                self._addHostManager.delete_sessions(addHostSessions)

            for nodeName in result['NodesDeleted']:
                # Remove the Puppet cert
                self._bhm.deletePuppetNodeCert(nodeName)

                self._bhm.nodeCleanup(nodeName)

                self._logger.info('Node [%s] deleted' % (nodeName))

            # Schedule a cluster update
            self.__scheduleUpdate()

            return result
        except Exception:
            session.rollback()

            raise

    def __validate_delete_nodes_request(self, nodes: List[NodeModel],
                                        force: bool):
        """
        Raises:
            DeleteNodeFailed
        """

        swprofile_distribution: Dict[SoftwareProfileModel, int] = {}

        for node in nodes:
            if node.softwareprofile not in swprofile_distribution:
                swprofile_distribution[node.softwareprofile] = 0

            swprofile_distribution[node.softwareprofile] += 1

        errors: List[str] = []

        for software_profile, num_nodes_deleted in \
                swprofile_distribution.items():
            if software_profile.lockedState == 'HardLocked':
                errors.append(
                    f'Nodes cannot be deleted from hard locked software'
                    ' profile [{software_profile.name}]')

                continue

            if software_profile.minNodes and \
                    len(software_profile.nodes) - num_nodes_deleted < \
                        software_profile.minNodes:
                if force and software_profile.lockedState == 'SoftLocked':
                    # allow deletion of nodes when force is set and profile
                    # is soft locked
                    continue

                # do not allow number of software profile nodes to drop
                # below configured minimum
                errors.append(
                    'Software profile [{}] requires minimum of {} nodes;'
                    ' denied request to delete {} node(s)'.format(
                        software_profile.name, software_profile.minNodes,
                        num_nodes_deleted))

                continue

            if software_profile.lockedState == 'SoftLocked' and not force:
                errors.append(
                    'Nodes cannot be deleted from soft locked software'
                    f' profile [{software_profile.name}]')

        if errors:
            raise OperationFailed('\n'.join(errors))

    def __delete_node(self, session: Session, dbNodes: List[NodeModel]) \
            -> Dict[str, List[NodeModel]]:
        """
        Raises:
            DeleteNodeFailed
        """

        result: Dict[str, list] = {
            'NodesDeleted': [],
            'DeleteNodeFailed': [],
            'SoftwareProfileLocked': [],
            'SoftwareProfileHardLocked': [],
        }

        nodes: Dict[HardwareProfileModel, List[NodeModel]] = {}
        events_to_fire: List[dict] = []

        #
        # Mark node states as deleted in the database
        #
        for dbNode in dbNodes:
            #
            # Capture previous state and node data as a dict for firing
            # the event later on
            #
            event_data = {
                'previous_state': dbNode.state,
                'node': Node.getFromDbDict(dbNode.__dict__).getCleanDict()
            }

            dbNode.state = state.NODE_STATE_DELETED
            event_data['node']['state'] = 'Deleted'

            if dbNode.hardwareprofile not in nodes:
                nodes[dbNode.hardwareprofile] = [dbNode]
            else:
                nodes[dbNode.hardwareprofile].append(dbNode)

        session.commit()

        #
        # Fire node state change events
        #
        for event in events_to_fire:
            NodeStateChanged.fire(node=event['node'],
                                  previous_state=event['previous_state'])

        #
        # Call resource adapter with batch(es) of node lists keyed on
        # hardware profile.
        #
        for hwprofile, hwprofile_nodes in nodes.items():
            # Get the ResourceAdapter
            adapter = self.__get_resource_adapter(session, hwprofile)

            # Call the resource adapter
            adapter.deleteNode(hwprofile_nodes)

            # Iterate over all nodes in hardware profile, completing the
            # delete operation.
            for dbNode in hwprofile_nodes:
                for tag in dbNode.tags:
                    if len(tag.nodes) == 1 and \
                            not tag.softwareprofiles and \
                            not tag.hardwareprofiles:
                        session.delete(tag)

                # Delete the Node
                self._logger.debug('Deleting node [%s]' % (dbNode.name))

                session.delete(dbNode)

                result['NodesDeleted'].append(dbNode)

        return result

    def __get_resource_adapter(self, session: Session,
                               hardwareProfile: HardwareProfileModel):
        """
        Raises:
            OperationFailed
        """

        if not hardwareProfile.resourceadapter:
            raise OperationFailed(
                'Hardware profile [%s] does not have an associated'
                ' resource adapter' % (hardwareProfile.name))

        adapter = resourceAdapterFactory.get_api(
            hardwareProfile.resourceadapter.name)

        adapter.session = session

        return adapter

    def __process_delete_node_result(self, nodeErrorDict):
        # REALLY!?!? Convert a list of Nodes objects into a list of
        # node names so we can report the list back to the end-user.
        # This needs to be FIXED!

        result = {}
        nodes_deleted = []

        for key, nodeList in nodeErrorDict.items():
            result[key] = [dbNode.name for dbNode in nodeList]

            if key == 'NodesDeleted':
                for node in nodeList:
                    node_deleted = {
                        'name': node.name,
                        'hardwareprofile': node.hardwareprofile.name,
                    }

                    if node.softwareprofile:
                        node_deleted['softwareprofile'] = \
                            node.softwareprofile.name

                    nodes_deleted.append(node_deleted)

        return result, nodes_deleted

    def __preDeleteHost(self, kitmgr: KitActionsManager, nodes):
        self._logger.debug('__preDeleteHost(): nodes=[%s]' %
                           (' '.join([node.name for node in nodes])))

        for node in nodes:
            kitmgr.pre_delete_host(
                node.hardwareprofile.name,
                node.softwareprofile.name if node.softwareprofile else None,
                nodes=[node.name])

    def __postDeleteHost(self, kitmgr, nodes_deleted):
        # 'nodes_deleted' is a list of dicts of the following format:
        #
        # {
        #     'name': 'compute-01',
        #     'softwareprofile': 'Compute',
        #     'hardwareprofile': 'LocalIron',
        # }
        #
        # if the node does not have an associated software profile, the
        # dict does not contain the key 'softwareprofile'.

        self._logger.debug('__postDeleteHost(): nodes_deleted=[%s]' %
                           (nodes_deleted))

        if not nodes_deleted:
            self._logger.debug('No nodes deleted in this operation')

            return

        for node_dict in nodes_deleted:
            kitmgr.post_delete_host(node_dict['hardwareprofile'],
                                    node_dict['softwareprofile'] if
                                    'softwareprofile' in node_dict else None,
                                    nodes=[node_dict['name']])

    def __scheduleUpdate(self):
        self._syncApi.scheduleClusterUpdate()

    def getInstallerNode(self,
                         session,
                         optionDict: Optional[OptionDict] = None):
        return self._nodeDbApi.getNode(
            session,
            self._cm.getInstaller(),
            optionDict=get_default_relations(optionDict))

    def getProvisioningInfo(self, session: Session, nodeName):
        return self._nodeDbApi.getProvisioningInfo(session, nodeName)

    def startupNode(self,
                    session,
                    nodespec: str,
                    remainingNodeList: List[NodeModel] = None,
                    bootMethod: str = 'n') -> None:
        """
        Raises:
            NodeNotFound
        """

        try:
            nodes = self._nodesDbHandler.expand_nodespec(session, nodespec)

            if not nodes:
                raise NodeNotFound('No matching nodes for nodespec [%s]' %
                                   (nodespec))

            # Break list of nodes into dict keyed on hardware profile
            nodes_dict = self.__processNodeList(nodes)

            for dbHardwareProfile, detailsDict in nodes_dict.items():
                # Get the ResourceAdapter
                adapter = self.__getResourceAdapter(dbHardwareProfile)

                # Call startup action extension
                adapter.startupNode(detailsDict['nodes'],
                                    remainingNodeList=remainingNodeList or [],
                                    tmpBootMethod=bootMethod)

            session.commit()
        except TortugaException:
            session.rollback()
            raise
        except Exception as ex:
            session.rollback()
            self._logger.exception(str(ex))
            raise

    def shutdownNode(self, session, nodespec: str, bSoftShutdown: bool = False) \
            -> None:
        """
        Raises:
            NodeNotFound
        """

        try:
            nodes = self._nodesDbHandler.expand_nodespec(session, nodespec)

            if not nodes:
                raise NodeNotFound('No matching nodes for nodespec [%s]' %
                                   (nodespec))

            d = self.__processNodeList(nodes)

            for dbHardwareProfile, detailsDict in d.items():
                # Get the ResourceAdapter
                adapter = self.__getResourceAdapter(dbHardwareProfile)

                # Call shutdown action extension
                adapter.shutdownNode(detailsDict['nodes'], bSoftShutdown)

            session.commit()
        except TortugaException:
            session.rollback()
            raise
        except Exception as ex:
            session.rollback()
            self._logger.exception(str(ex))
            raise

    def rebootNode(self,
                   session,
                   nodespec: str,
                   bSoftReset: bool = False,
                   bReinstall: bool = False) -> None:
        """
        Raises:
            NodeNotFound
        """

        nodes = self._nodesDbHandler.expand_nodespec(session, nodespec)
        if not nodes:
            raise NodeNotFound('No nodes matching nodespec [%s]' % (nodespec))

        if bReinstall:
            for dbNode in nodes:
                self._bhm.setNodeForNetworkBoot(session, dbNode)

        for dbHardwareProfile, detailsDict in \
                self.__processNodeList(nodes).items():
            # iterate over hardware profile/nodes dict to reboot each
            # node
            adapter = self.__getResourceAdapter(dbHardwareProfile)

            # Call reboot action extension
            adapter.rebootNode(detailsDict['nodes'], bSoftReset)

        session.commit()

    def getNodesByNodeState(self, session, node_state: str,
                            optionDict: Optional[OptionDict] = None) \
            -> TortugaObjectList:
        """
        Get nodes by state
        """

        return self.__populate_nodes(
            session,
            self._nodeDbApi.getNodesByNodeState(
                session,
                node_state,
                optionDict=get_default_relations(optionDict)))

    def getNodesByNameFilter(self, session, nodespec: str,
                             optionDict: OptionDict = None,
                             include_installer: Optional[bool] = True) \
            -> TortugaObjectList:
        """
        Return TortugaObjectList of Node objects matching nodespec
        """

        return self.__populate_nodes(
            session,
            self._nodeDbApi.getNodesByNameFilter(
                session,
                nodespec,
                optionDict=get_default_relations(optionDict),
                include_installer=include_installer))

    def getNodesByAddHostSession(self, session, addHostSession: str,
                                 optionDict: OptionDict = None) \
            -> TortugaObjectList:
        """
        Return TortugaObjectList of Node objects matching add host session
        """

        return self.__populate_nodes(
            session,
            self._nodeDbApi.getNodesByAddHostSession(
                session,
                addHostSession,
                optionDict=get_default_relations(optionDict)))

    def __processNodeList(self, dbNodes: List[NodeModel]) \
            -> Dict[HardwareProfileModel, Dict[str, list]]:
        """
        Returns dict indexed by hardware profile, each with a list of
        nodes in the hardware profile
        """

        d: Dict[HardwareProfileModel, Dict[str, list]] = {}

        for dbNode in dbNodes:
            if dbNode.hardwareprofile not in d:
                d[dbNode.hardwareprofile] = {
                    'nodes': [],
                }

            d[dbNode.hardwareprofile]['nodes'].append(dbNode)

        return d

    def __getResourceAdapter(self, hardwareProfile: HardwareProfileModel):
        """
        Raises:
            OperationFailed
        """

        if not hardwareProfile.resourceadapter:
            raise OperationFailed(
                'Hardware profile [%s] does not have an associated'
                ' resource adapter' % (hardwareProfile.name))

        return resourceAdapterFactory.get_api(
            hardwareProfile.resourceadapter.name) \
            if hardwareProfile.resourceadapter else None
コード例 #23
0
class TortugaDeployer:     \
        # pylint: disable=too-many-public-methods

    def __init__(self, logger, cmdline_options=None):
        self._cm = ConfigManager()

        self._logger = logger

        self._osObjectFactory = osUtility.getOsObjectFactory()

        self._settings = self.__load_settings(cmdline_options)

        self._settings['installer_software_profile'] = 'Installer'
        self._settings['installer_hardware_profile'] = 'Installer'

        self._settings['eulaAccepted'] = False

        self._settings['fqdn'] = getfqdn()

        self._settings['osInfo'] = getOsInfo()

        self._forceCleaning = False
        self._depotCreated = False

        fsManager = self._osObjectFactory.getOsFileSystemManager()

        self._lockFilePath = os.path.join(fsManager.getOsLockFilePath(),
                                          'tortuga-setup')

        langdomain = 'tortuga-config'

        localedir = os.path.join(self._cm.getRoot(), 'share', 'locale')

        if not os.path.exists(localedir):
            # Try the system path
            localedir = '/usr/share/locale'

        gettext.bindtextdomain(langdomain, localedir)
        gettext.textdomain(langdomain)
        self.gettext = gettext.gettext
        self._ = self.gettext

        self._logger.info('Detected OS: [%s]', self._settings['osInfo'])

    def __load_settings(self, cmdline_options):
        settings = dict(list(cmdline_options.items()))

        default_cfgfile = os.path.join(self._cm.getKitConfigBase(),
                                       'tortuga.ini')

        if 'inifile' in cmdline_options and \
                cmdline_options['inifile'] != default_cfgfile:
            # Copy configuration specified on command-line to
            # $TORTUGA_ROOT/config/tortuga.ini

            self._logger.info('Using configuration file [%s]' %
                              (settings['inifile']))

            self._logger.info('Copying configuration to [%s]' %
                              (default_cfgfile))

            if os.path.exists(default_cfgfile):
                # Back up existing 'tortuga.ini'
                shutil.move(default_cfgfile, default_cfgfile + '.orig')

            shutil.copyfile(cmdline_options['inifile'], default_cfgfile)

        settings['inifile'] = default_cfgfile

        cfg = configparser.ConfigParser()
        cfg.read(settings['inifile'])

        settings['timezone'] = ''
        settings['utc'] = False
        settings['keyboard'] = 'us'
        settings['language'] = 'en_US.UTF-8'

        # Get database setting
        value = cfg.get('database', 'engine') \
            if cfg.has_section('database') and \
            cfg.has_option('database', 'engine') else None

        if value and value not in ('mysql', 'sqlite'):
            raise InvalidArgument('Unsupported database engine [%s]' % (value))

        settings['database'] = {'engine': value if value else 'sqlite'}

        # Get depot directory
        if cfg.has_section('installer') and \
                cfg.has_option('installer', 'depotpath'):
            settings['depotpath'] = cfg.get('installer', 'depotpath')

            # For consistency's sake...
            self._cm.setDepotDir(settings['depotpath'])
        else:
            settings['depotpath'] = self._cm.getDepotDir()

        # Internal web port
        settings['intWebPort'] = cfg.getint('installer', 'intWebPort') \
            if cfg.has_section('installer') and \
            cfg.has_option('installer', 'intWebPort') else \
            self._cm.getIntWebPort()

        self._cm.setIntWebPort(settings['intWebPort'])

        # Admin port
        settings['adminPort'] = cfg.getint('installer', 'adminPort') \
            if cfg.has_section('installer') and \
            cfg.has_option('installer', 'adminPort') else \
            self._cm.getAdminPort()

        self._cm.setAdminPort(settings['adminPort'])

        # IntWebServicePort
        settings['intWebServicePort'] = cfg.getint(
            'installer', 'intWebServicePort') \
            if cfg.has_section('installer') and \
            cfg.has_option('installer', 'intWebServicePort') else \
            self._cm.getIntWebServicePort()

        self._cm.setIntWebServicePort(settings['intWebServicePort'])

        return settings

    def _get_setting(self, name, section=None):
        if section and section in self._settings:
            return self._settings[section][name] \
                if name in self._settings[section] else None

        return self._settings[name] if name in self._settings else None

    def eout(self, message, *args):
        """
        Output messages to STDERR with Internationalization.
        Additional arguments will be used to substitute variables in the
        message output
        """
        if args:
            mesg = self.gettext(message) % args
        else:
            mesg = self.gettext(message)
        sys.stderr.write(mesg)

    def out(self, message, *args):
        """
        Output messages to STDOUT with Internationalization.
        Additional arguments will be used to substitute variables in the
        message output
        """
        if args:
            mesg = self.gettext(message) % args
        else:
            mesg = self.gettext(message)
        sys.stdout.write(mesg)

    def prompt(self,
               default_value,
               auto_answer_default_value,
               text_list,
               question,
               tag=None,
               section=None,
               isPassword=False):
        """Generic user prompting routine"""

        resp_value = None

        bDefaults = self._settings['defaults']

        if tag:
            resp_value = self._get_setting(tag, section=section)
            if not resp_value and bDefaults:
                # Use the default value
                default_value = auto_answer_default_value
        elif bDefaults:
            default_value = auto_answer_default_value

        if text_list:
            self.out('\n')

            for line in text_list:
                self.out(line + '\n')

        if default_value and not isPassword:
            self.out('\n%s [%s]: ' % (question, default_value))
        else:
            self.out('\n%s: ' % (question))

        if bDefaults or resp_value:
            if resp_value:
                value = resp_value
            else:
                value = auto_answer_default_value
            if not isPassword:
                self.out('%s\n' % value)
        else:
            if isPassword:
                import getpass
                value = getpass.getpass('').strip()
            else:
                value = input('').strip()
            if not value:
                value = default_value

        return value

    def checkPreInstallConfiguration(self):  # pylint: disable=no-self-use
        """
        Raises:
            InvalidMachineConfiguration
        """

        # Check for existence of /etc/hosts
        if not os.path.exists('/etc/hosts'):
            raise InvalidMachineConfiguration(
                '/etc/hosts file is missing. Unable to proceed with'
                ' installation')

    def preInstallPrep(self):
        bAcceptEula = self._settings['acceptEula']

        license_file = ' %s/LICENSE' % (self._cm.getEtcDir())

        print()

        if bAcceptEula:
            cmd = 'cat %s\n' % (license_file)
            os.system(cmd)
        else:
            cmd = 'more %s\n' % (license_file)

            print("To install Tortuga you must read and agree to "
                  "the following EULA.")

            print("Press 'Enter' to continue...")

            input('')
            os.system(cmd)
            print()
            while True:
                print('Do you agree? [Yes / No]', end=' ')
                answer = input('').lower()

                if answer not in ['yes', 'no', 'y', 'n']:
                    print('Invalid response. Please respond \'Yes\''
                          ' or \'No\'')

                    continue
                break
            if answer[0] == 'n':
                raise EulaAcceptanceRequired(
                    'You must accept the EULA to install Tortuga')

        self._settings['eulaAccepted'] = \
            'Accepted on: %s local machine time' % (time.ctime())

        # Restore resolv.conf if we have a backup
        if osUtility.haveBackupFile('/etc/resolv.conf'):
            osUtility.restoreFile('/etc/resolv.conf')

    def _runCommandWithSpinner(self, cmd, statusMsg, logFileName):
        self._logger.debug(
            '_runCommandWithSpinner(cmd=[%s], logFileName=[%s])' %
            (cmd, logFileName))

        self.out(statusMsg + '  ')

        # Open the log file in unbuffered mode
        fpOut = open(logFileName, 'wb', 0)

        p = subprocess.Popen(cmd,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             bufsize=1,
                             close_fds=True)

        for i in itertools.cycle(['/', '-', '\\', '|']):
            buf = p.stdout.readline()

            sys.stdout.write('')
            sys.stdout.flush()

            if not buf:
                break

            fpOut.write(buf)

            sys.stdout.write(i)
            sys.stdout.flush()

        sys.stdout.write(' ')
        self.out('done.\n')

        retval = p.wait()

        fpOut.close()

        return retval

    def puppetApply(self):
        '''
        Complete the installer configuration by running against the
        previously installed Puppet master.  Display a spinner while Puppet
        runs.
        '''

        self._logger.info('Running Puppet for post-configuration')

        logFileName = '/tmp/tortuga_setup.log'

        cmd = ('/opt/puppetlabs/bin/puppet agent --color false --onetime'
               ' --no-daemonize --detailed-exitcodes --verbose 2>&1')

        retval = self._runCommandWithSpinner(
            cmd,
            statusMsg=('\nCompleting installer configuration.'
                       ' Please wait...'),
            logFileName=logFileName)

        if retval not in (0, 2):
            # Puppet can return a non-zero return code, even if it was
            # successful.

            errmsg = 'Puppet post-configuration failed (see log file %s)' % (
                logFileName)

            self._logger.error(errmsg)

            self.out(errmsg + '\n')

            raise Exception(errmsg)

        self._logger.info('Puppet post-configuration completed')

    def startSetup(self):
        # If force was specified clean first and then run...
        bForce = self._settings['force']

        if bForce:
            self._forceCleaning = True

            self.out('--force option specified. Cleaning previous'
                     ' installation.\n')

            self.cleanup()

            self._forceCleaning = False

        if os.path.exists(self._lockFilePath):
            raise SoftwareAlreadyDeployed(
                "\ntortuga-setup has already been run.\n\n"
                "Use --force option to force reinstallation.")

        open(self._lockFilePath, 'w').close()

        self.out('Tortuga Setup\n')

    def getClusterConfig(self):
        sysManager = self._osObjectFactory.getOsSysManager()

        self._settings['timezone'], self._settings['utc'] = \
            sysManager.findTimeInfo()

        self._settings['keyboard'] = sysManager.findKeyboard()

        self._settings['language'] = sysManager.findLanguage()

        self.out(_('\nStarting Tortuga setup...\n'))

        # Ports configuration
        if not self._settings['defaults']:
            intWebPort, adminPort, intWebServicePort = self.configurePorts()

            self._cm.setIntWebPort(intWebPort)
            self._cm.setAdminPort(adminPort)
            self._cm.setIntWebServicePort(intWebServicePort)

            self._settings['intWebPort'] = intWebPort
            self._settings['adminPort'] = adminPort
            self._settings['intWebServicePort'] = intWebServicePort

        # Admin username and password
        self._settings['adminUsername'], \
            self._settings['adminPassword'] = self.promptForAdminCredentials()

    def prepDepot(self):
        depotpath = None

        if not self._settings['defaults']:
            self.out(
                _('Tortuga requires a directory for storage of OS'
                  ' distribution media and other files required for'
                  ' node provisioning.\n\n'))

        while not depotpath:
            if self._settings['defaults']:
                response = self._settings['depotpath']
            else:
                try:
                    response = input(
                        'Please enter a depot path (Ctrl-C to interrupt)'
                        ' [%s]: ' % (self._settings['depotpath']))
                except KeyboardInterrupt:
                    raise InvalidArgument(_('Aborted by user.'))

                if not response:
                    response = self._settings['depotpath']

            if not response.startswith('/'):
                errmsg = 'Depot path must be fully-qualified'

                if not self._settings['defaults']:
                    self.out('Error: %s\n' % (errmsg))

                    continue

                raise InvalidArgument(errmsg)

            if response == '/':
                errmsg = 'Depot path cannot be system root directory'

                if not self._settings['defaults']:
                    self.out(_('Error: %s\n' % (errmsg)))

                    continue

                raise InvalidArgument(errmsg)

            if os.path.exists(response):
                if not self._settings['force']:
                    if not self._settings['defaults']:
                        self.out(
                            _('Directory [%s] already exists. Do you wish to'
                              ' remove it [N/y]? ') % (response))

                        remove_response = input('')

                        if not remove_response or \
                                remove_response[0].lower() == 'n':
                            continue_response = input(
                                'Do you wish to continue [N/y]? ')

                            if continue_response and \
                                    continue_response[0].lower() == 'y':
                                continue

                            raise InvalidArgument(_('Aborted by user.'))
                    else:
                        raise InvalidArgument(
                            _('Existing depot directory [%s] will not be'
                              ' removed.') % (response))
                else:
                    self.out(
                        _('\nRemoving existing depot directory [%s]... ') %
                        (response))

                    depotpath = response

                    tortugaSubprocess.executeCommand('rm -rf %s/*' %
                                                     (depotpath))

                    self.out(_('done.\n'))
            else:
                depotpath = response

        self._settings['depotpath'] = depotpath

        self._cm.setDepotDir(self._settings['depotpath'])

    def _portPrompt(self, promptStr, defaultValue):
        while True:
            tmpPort = self.prompt(defaultValue, defaultValue, None, promptStr)

            try:
                tmpPort = int(tmpPort)

                if tmpPort <= 0 or tmpPort > 65535:
                    raise ValueError('Port must be between 1 and 65535')

                # Success
                break
            except ValueError as ex:
                self.out('Error: ' + str(ex) + '\n')

        return tmpPort

    def configurePorts(self):
        reconfigurePorts = self.prompt(
            'N', 'N', [
                'The following ports will be used by Tortuga:'
                '', '    +-----------------------------+-------+',
                '    | Description                 | Port  |',
                '    +-----------------------------+-------+',
                '    | Internal webserver          | %5d |' %
                (self._settings['intWebPort']),
                '    | SSL webservice daemon       | %5d |' %
                (self._settings['adminPort']),
                '    | Local webservice daemon     | %5d |' %
                (self._settings['intWebServicePort']),
                '    +-----------------------------+-------+'
            ], 'Do you wish to change the default configuration [N/y]?')

        if not reconfigurePorts or reconfigurePorts[0].lower() == 'n':
            return self._settings['intWebPort'], \
                self._settings['adminPort'], \
                self._settings['intWebServicePort']

        # Internal web server port
        intWebPort = self._portPrompt('Enter port for internal webserver',
                                      self._settings['intWebPort'])

        # SSL webservice daemon port
        adminPort = self._portPrompt('Enter port for SSL webservice daemon',
                                     self._settings['adminPort'])

        # Local webservice daemon port
        intWebServicePort = self._portPrompt(
            'Enter port for local webservice daemon',
            self._settings['intWebServicePort'])

        return intWebPort, adminPort, intWebServicePort

    def _removePackageSources(self):
        pkgManager = self._osObjectFactory.getOsPackageManager()
        for pkgSrcName in pkgManager.getPackageSourceNames():
            self._logger.info('Removing package source [%s]' % (pkgSrcName))
            pkgManager.removePackageSource(pkgSrcName)

    def _disableTortugaws(self):
        self.out('  * Disabling Tortuga webservice\n')

        _tortugaWsManager = self._osObjectFactory.getTortugawsManager()
        serviceName = _tortugaWsManager.getServiceName()
        _osServiceManager = getOsObjectFactory().getOsServiceManager()

        try:
            _osServiceManager.stop(serviceName)
        except CommandFailed:
            pass

    def cleanup(self):
        # If possible, remove any package sources we added
        self._removePackageSources()

        osUtility.removeFile(self._lockFilePath)

        osUtility.removeFile(self._cm.getProfileNiiFile())

        # Turn off the webservice daemon
        self._disableTortugaws()

        # Restore resolv.conf
        if osUtility.haveBackupFile('/etc/resolv.conf'):
            osUtility.restoreFile('/etc/resolv.conf')

        # Drop database
        dbManager = self._osObjectFactory.getOsApplicationManager(
            self._settings['database']['engine'])

        try:
            dbSchema = self._cm.getDbSchema()

            self.out('  * Removing database [%s]\n' % (dbSchema))

            dbManager.destroyDb(dbSchema)
        except Exception as ex:  # pylint: disable=broad-except
            self._logger.exception(
                'Could not destroy existing db: {}'.format(ex))

        # Remove DB password file
        osUtility.removeFile(self._cm.getDbPasswordFile())

        # Remove CFM secret
        cfmSecretFile = self._cm.getCfmSecretFile()
        if os.path.exists(cfmSecretFile):
            osUtility.removeFile(self._cm.getCfmSecretFile())

        # Generic cleanup
        osUtility.removeLink('/etc/tortuga-release')

        # Cleanup or remove depot directory
        errmsg = 'Removing contents of [%s]' % (self._settings['depotpath'])

        self._logger.debug(errmsg)

        if self._depotCreated:
            self.out('  * %s\n' % (errmsg))

            osUtility.removeDir(self._settings['depotpath'])
        else:
            if self._settings['depotpath']:
                self.out('  * %s\n' % (errmsg))

                tortugaSubprocess.executeCommand('rm -rf %s/*' %
                                                 (self._settings['depotpath']))

                self.out('\n')

        if not self._forceCleaning:
            self.out('Consult log(s) for further details.\n')

            self._logger.error('Installation failed')

    def runSetup(self):
        """ Installer setup. """
        self.checkPreInstallConfiguration()

        # Do not run cleanup if this fails.
        self.startSetup()

        try:
            self.preInstallPrep()

            self.getClusterConfig()

            self.prepDepot()

            self.preConfig()

            self.pre_init_db()

            self.puppetBootstrap()

            dbm, session = self.initDatabase()

            try:

                self.createAdminUser(session, self._settings['adminUsername'],
                                     self._settings['adminPassword'])

                self.installKits(dbm)

                self.enableComponents(session)
            finally:
                dbm.closeSession()

                self.puppetApply()

            self.out('\nTortuga installation completed successfully!\n\n')

            print('Run \"exec -l $SHELL\" to initialize Tortuga environment\n')
        except Exception:  # pylint: disable=broad-except
            self._logger.exception('Fatal error occurred during setup')

            raise TortugaException('Installation failed')

    def _generate_db_password(self):
        """
        Generate a database password.

        """
        #
        # Because Apache httpd server is not installed at the time this
        # runs, we cannot set the ownership of this file to be 'apache'
        # (which is necessary for the Tortuga webservice).
        #
        # Set ownership of file to root:puppet.
        #
        # When the Puppet bootstrap runs, it changes the ownership to
        # 'apache:puppet' and everybody is happy!
        #
        puppet_user = pwd.getpwnam('puppet')
        gid = puppet_user[3]
        self._generate_password_file(self._cm.getDbPasswordFile(), gid=gid)

    def _generate_redis_password(self):
        """
        Generate a password for Redis.

        """
        #
        # Puppet needs read access to this file so that it can use it for
        # writing the redis config file.
        #
        puppet_user = pwd.getpwnam('puppet')
        gid = puppet_user[3]
        self._generate_password_file(self._cm.getRedisPasswordFile(), gid=gid)

    def _generate_password_file(self,
                                file_name: str,
                                password_length: int = 32,
                                uid: int = 0,
                                gid: int = 0,
                                mode: int = 0o440):
        """
        Generate a password in a file.

        :param file_name:       the name of the file in which the password
                                will be stored
        :param password_length: the length of the password, default = 32
        :param uid:             the uid (owner) of the file, default = 0
        :param gid:             the gid (group) of the file, default = 0
        :param mode:            the file perms, default 0440

        """
        password = self._generate_password(password_length)

        with open(file_name, 'w') as fp:
            fp.write(password)

        os.chown(file_name, uid, gid)
        os.chmod(file_name, mode)

    def _generate_password(self, length: int = 8) -> str:
        """
        Generate a password.

        :param length: the length of the password

        :return:       the generated password

        """
        chars = string.ascii_letters + string.digits

        return ''.join([random.choice(chars) for _ in range(length)])

    def preConfig(self):
        # Create default hieradata directory
        hieraDataDir = '/etc/puppetlabs/code/environments/production/data'
        if not os.path.exists(hieraDataDir):
            os.makedirs(hieraDataDir)

        # Derive host name of puppet master from FQDN
        fqdn = self._settings['fqdn']

        configDict = {
            'version': 5,
            'DNSZone': 'private',
            'puppet_server': fqdn,
            'depot': self._settings['depotpath'],
        }

        with open(os.path.join(hieraDataDir, 'tortuga-common.yaml'),
                  'wb') as fp:
            fp.write(
                yaml.safe_dump(configDict,
                               explicit_start=True,
                               default_flow_style=False).encode())

        self._generate_db_password()
        self._generate_redis_password()

    def pre_init_db(self):
        # If using 'mysql' as the database backend, we need to install the
        # puppetlabs-mysql Puppet module prior to bootstrapping. This used
        # to be done in 'install-tortuga.sh'

        if self._settings['database']['engine'] == 'mysql':
            print('\nUsing MySQL as backing database.')

            puppet_module = 'puppetlabs-mysql'

            logmsg = f'Installing \'{puppet_module}\' module'

            self._logger.debug(logmsg)

            print(f'\n{logmsg}...', end='')

            cmd = ('/opt/puppetlabs/bin/puppet module install'
                   f' --color false {puppet_module}')
            tortugaSubprocess.executeCommand(cmd)

            print('done.')

    def puppetBootstrap(self):
        localPuppetRoot = os.path.join(self._cm.getEtcDir(), 'puppet')

        logFileName = '/tmp/bootstrap.log'

        puppet_server = self._settings['fqdn']

        # Bootstrap using Puppet
        cmd = ('/opt/puppetlabs/bin/puppet apply --verbose'
               ' --detailed-exitcodes'
               ' --execute "class { \'tortuga::installer\':'
               ' puppet_server => \'%s\','
               '}"' % (puppet_server))

        retval = self._runCommandWithSpinner(
            cmd,
            '\nPerforming pre-configuration... Please wait...',
            logFileName=logFileName)

        if retval not in (0, 2):
            # Puppet can return a non-zero return code, even if it was
            # successful.
            self._logger.debug('Puppet pre-configuration returned non-zero'
                               ' return code [%d]' % (retval))

            errmsg = 'Puppet bootstrap failed (see log file %s)' % (
                logFileName)

            self._logger.error(errmsg)

            raise Exception(errmsg)

        self._logger.debug('Puppet pre-configuration completed')

    def initDatabase(self) -> Tuple[Any, Session]:
        msg = _('Initializing database')

        self._logger.info(msg)

        print_('\n' + msg + '... ', end='')

        # This cannot be a global import since the database configuration
        # may be set in this script.
        from tortuga.db.dbManager import DbManager

        dbm = DbManager()

        # create database
        dbm.init_database()

        session = dbm.openSession()

        # Prime the database previously created as part of the bootstrap
        try:
            dbUtility.primeDb(session, self._settings)

            dbUtility.init_global_parameters(session, self._settings)

            print_(_('done'))

            session.commit()
        except Exception as exc:  # pylint: disable=broad-except
            session.rollback()

            print_(_('failed.'))

            print_(_('Exception raised initializing database:') +
                   ' {0}'.format(exc),
                   file=sys.stderr)

        self._logger.debug('Done initializing database')

        return dbm, session

    def installKits(self, dbm):
        self._logger.info('Installing kits')

        self.out('\n' + _('Installing kits') + '...\n')

        kitApi = KitApi()

        # Iterate over the glob of 'kits-*.tar.bz2'
        kitFileGlob = '%s/kits/kit-*.tar.bz2' % (self._cm.getRoot())

        # Split comma-separated list of kits to skip installing. Sorry, you
        # cannot skip installing the base kit.
        val = self._settings['skip_kits'] \
            if 'skip_kits' in self._settings else ''

        skip_kits = set([
            item for item in val.split(',') if item != 'base']) \
            if val else set()

        for kitPackage in glob.glob(kitFileGlob):
            try:
                kit = get_metadata_from_archive(kitPackage)
            except KitNotFound:
                msg = 'Kit [%s] is malformed/invalid. Skipping.' % (
                    os.path.basename(kitPackage))

                self._logger.error(msg)

                self.out('   %s\n' % (msg))

                continue

            if kit['name'] in skip_kits:
                msg = 'Kit [%s] installation skipped.' % (kit['name'])

                self.out('   %s\n' % (msg))

                self._logger.info(msg)

                continue

            try:
                kitApi.installKitPackage(dbm, kitPackage)
            except EulaAcceptanceRequired:
                msg = 'Kit [%s] requires EULA acceptance. Skipping.' % (
                    kitPackage)

                self.out('   %s\n' % (msg))

                self._logger.info(msg)

                continue

            self.out('   - %s installed.\n' % (kit['name']))

            self._logger.info('Kit [%s] installed' % (kit['name']))

        self._logger.info('Done installing kits')

        load_kits()

    def enableComponents(self, session: Session):
        """
        Raises:
            ConfigurationError
        """

        self._logger.info('Enabling \'installer\' component')

        base_kit = KitApi().getKit(session, 'base')

        enabledComponents = ['installer']

        # get list of components from 'base' kit
        components = [
            c for c in base_kit.getComponentList()
            if c.getName() in enabledComponents
        ]

        installerNode = NodeApi().getInstallerNode(session)

        for component in components:
            SoftwareProfileApi().enableComponent(
                session,
                installerNode.getSoftwareProfile().getName(),
                base_kit.getName(),
                base_kit.getVersion(),
                base_kit.getIteration(),
                component.getName(),
                compVersion=component.getVersion(),
            )

    def promptForAdminCredentials(self):
        # Get admin username and password for use with web service

        if self._settings['defaults']:
            self.out(_('\nUsing default Tortuga admin user name/password.\n'))

            return 'admin', 'password'

        username = password = None

        # Administrator username
        while True:
            username = self.prompt('admin', 'admin', [
                'Enter name for Tortuga admin user.',
                'This user is not associated with any system user.'
            ], 'Admin user name')

            if len(username) > 3:
                break

            self.out('Admin user name must be at least 4 characters.')

        # Administrator password
        while True:
            password = self.prompt('', 'password',
                                   ['Enter password for Tortuga admin user.'],
                                   'Admin password', None, None, True)

            if len(password) < 4:
                self.out('Admin password must be at least 4 characters.')
                continue

            confirmPassword = self.prompt('', 'password',
                                          ['Confirm admin password.'],
                                          'Confirm password', None, None, True)

            if confirmPassword == password:
                self.out('\n')
                break

            self.out('Passwords did not match.')

        return username, password

    def createAdminUser(self, session: Session, username, password):
        msg = _('Adding administrative user')

        self._logger.info(msg)

        self.out('\n' + msg + '... ')

        AdminApi().addAdmin(session,
                            username,
                            password,
                            False,
                            description='Added by tortuga-setup')

        self.out(_('done.') + '\n')
コード例 #24
0
class OsBootHostManagerCommon(OsObjectManager):
    """Methods for manipulating PXE files"""
    def __init__(self):
        OsObjectManager.__init__(self)

        # Cache this for later
        try:
            self.passdata = pwd.getpwnam('apache')
        except KeyError:
            self.passdata = pwd.getpwnam(os.getenv('USER'))

        self.hardwareProfileDbApi = HardwareProfileDbApi()
        self.softwareProfileDbApi = SoftwareProfileDbApi()

        self._nodeApi = nodeApi.NodeApi()

        self._cm = ConfigManager()

    def __getActualSoftwareProfile(self, node, softwareProfileName):
        """
        Determine the actual software profile. For example,
        """
        self.getLogger().debug('__getActualSoftwareProfile(): node=[%s],'
                               ' softwareProfileName=[%s]' %
                               (node.name, softwareProfileName))

        softwareProfile = None

        if node.isIdle:
            # Use the software profile that was passed in if there is one,
            if softwareProfileName:
                softwareProfile = self.softwareProfileDbApi.\
                    getSoftwareProfile(softwareProfileName, {'os': True})
            else:
                # ELSE use the default idle software profile
                hardwareProfile = node.getHardwareProfile()

                idleSoftwareProfileId = hardwareProfile.\
                    getIdleSoftwareProfileId()

                if idleSoftwareProfileId:
                    softwareProfile = self.softwareProfileDbApi.\
                        getSoftwareProfileById(idleSoftwareProfileId,
                                               {'os': True})
        else:
            # Use active software profile
            if softwareProfileName is None:
                softwareProfile = node.getSoftwareProfile()
            else:
                softwareProfile = self.softwareProfileDbApi.\
                    getSoftwareProfile(softwareProfileName, {'os': True})

        return softwareProfile

    def deletePuppetNodeCert(self, nodeName):
        # Remove the Puppet certificate when the node is reinstalled

        self.getLogger().debug('deletePuppetNodeCert(node=[%s])' % (nodeName))

        puppetSslDir = '/etc/puppetlabs/puppet/ssl'
        puppetReportDir = '/var/lib/puppet/reports'
        puppetYamlDir = '/var/lib/puppet/yaml'

        filesToRemove = [
            os.path.join(puppetSslDir, 'public_keys/%s.pem' % (nodeName)),
            os.path.join(puppetSslDir, 'ca/signed/%s.pem' % (nodeName)),
            os.path.join(puppetSslDir, 'private_keys/%s.pem' % (nodeName)),
            os.path.join(puppetSslDir, 'certs/%s.pem' % (nodeName)),
            os.path.join(puppetYamlDir, 'node/%s.yaml' % (nodeName)),
            os.path.join(puppetYamlDir, 'facts/%s.yaml' % (nodeName)),
        ]

        for fn in filesToRemove:
            try:
                os.unlink(fn)
            except OSError as exc:
                if exc.errno != 2:
                    self.getLogger().error(
                        'Error attempting to remove %s (reason: %s)' %
                        (fn, exc))

        fn = os.path.join(puppetReportDir, nodeName)
        try:
            shutil.rmtree(fn)
        except OSError as exc:
            if exc.errno != 2:
                self.getLogger().error(
                    'Error attempting to remove %s (reason: %s)' % (fn, exc))

    def nodeCleanup(self, nodeName):
        """
        Remove files related to the node
        """

        # Remove 'private' directory
        private_dir = os.path.join(self._cm.getRoot(), 'private', nodeName)

        if os.path.exists(private_dir):
            shutil.rmtree(private_dir)

    def addDhcpLease(self, node, nic):
        # Add DHCP lease to DHCP server
        pass

    def removeDhcpLease(self, nodeName):
        # Remove the DHCP lease from the DHCP server.  This will be
        # a no-op on any platform that doesn't support the operation
        # (ie. any platform not running ISC DHCPD)
        pass

    def setNodeForNetworkBoot(self, dbNode):
        # Update node status to "Expired" and boot from network
        dbNode.state = 'Expired'
        dbNode.bootFrom = 0

        self.deletePuppetNodeCert(dbNode.name)

        # Write the updated file
        self.writePXEFile(dbNode)
コード例 #25
0
ファイル: tortugaCli.py プロジェクト: ilumb/tortuga
class TortugaCli(object):
    """
    Base tortuga command line interface class.
    """
    def __init__(self, validArgCount=0):
        self._logger = logging.getLogger('tortuga.cli.%s' %
                                         (self.__class__.__name__))
        self._logger.addHandler(logging.NullHandler())

        self._parser = OptionParser(add_help_option=False)
        self._options = None
        self._args = []
        self._validArgCount = validArgCount
        self._username = None
        self._password = None
        self._optionGroupDict = {}
        self._cm = ConfigManager()

        self.__initializeLocale()

        commonGroup = _('Common Tortuga Options')
        self.addOptionGroup(commonGroup, None)

        self.addOptionToGroup(commonGroup,
                              '-h',
                              '--help',
                              action='help',
                              help=_('show this help message and exit'))

        self.addOptionToGroup(commonGroup,
                              '-?',
                              '',
                              action='help',
                              help=_('show this help message and exit'))

        self.addOptionToGroup(commonGroup,
                              '-V',
                              '',
                              action='store_true',
                              dest='cmdVersion',
                              default=False,
                              help=_('print version and exit'))

        self.addOptionToGroup(
            commonGroup,
            '-d',
            '--debug',
            dest='consoleLogLevel',
            help=_('set debug level; valid values are: critical, error,'
                   ' warning, info, debug'))

        self.addOptionToGroup(
            commonGroup,
            '--username',
            dest='username',
            help=_('Credential to use when not running as root on the'
                   ' installer.'))

        self.addOptionToGroup(
            commonGroup,
            '--password',
            dest='password',
            help=_('Credential to use when not running as root on the'
                   ' installer.'))

    def getLogger(self):
        """ Get logger for this class. """
        return self._logger

    def __initializeLocale(self):
        """Initialize the gettext domain """
        langdomain = 'tortugaStrings'

        # Locate the Internationalization stuff
        localedir = '../share/locale' \
            if os.path.exists('../share/locale') else \
            os.path.join(self._cm.getRoot(), 'share/locale')

        gettext.install(langdomain, localedir)

    def getParser(self):
        """ Get parser for this class. """
        return self._parser

    def addOption(self, *args, **kwargs):
        """ Add option. """
        self._parser.add_option(*args, **kwargs)

    def addOptionToGroup(self, groupName, *args, **kwargs):
        """
        Add option for the given group name.
        Group should be created using addOptionGroup().
        """
        group = self._optionGroupDict.get(groupName)
        group.add_option(*args, **kwargs)

    def addOptionGroup(self, groupName, desc):
        """ Add option group. """
        group = OptionGroup(self._parser, groupName, desc)
        self._parser.add_option_group(group)
        self._optionGroupDict[groupName] = group

    def parseArgs(self, usage=None):
        """
        Parse args

        Raises:
            InvalidArgument
        """

        if usage:
            self._parser.usage = usage

        try:
            self._options, self._args = self._parser.parse_args()
        except SystemExit as rc:
            sys.stdout.flush()
            sys.stderr.flush()
            sys.exit(int(str(rc)))

        if self._validArgCount < len(self._args):
            # Postitional args are not enabled and we have some
            msg = _("Invalid Argument(s):")
            for arg in self._args[self._validArgCount:]:
                msg += " " + arg

            raise InvalidArgument(msg)

        optDict = self._options.__dict__
        if optDict.get('cmdVersion'):
            print(
                _('{0} version: {1}'.format(os.path.basename(sys.argv[0]),
                                            self._cm.getTortugaRelease())))

            sys.exit(0)

        # Log level.
        consoleLogLevel = optDict.get('consoleLogLevel', None)
        if consoleLogLevel:
            # logManager.setConsoleLogLevel(consoleLogLevel)

            logger = logging.getLogger('tortuga')

            logger.setLevel(logging.DEBUG)

            # create console handler and set level to debug
            ch = logging.StreamHandler()
            ch.setLevel(logging.DEBUG)

            # create formatter
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

            # add formatter to ch
            ch.setFormatter(formatter)

            # add ch to logger
            logger.addHandler(ch)

        # Promote options to attributes

        self._username = self._options.username
        self._password = self._options.password

        return self._options, self._args

    def usage(self, s=None):
        '''Print the help provided by optparse'''

        if s:
            sys.stderr.write(_('Error: {0}').format(s) + '\n')

        self._parser.print_help()

        sys.exit(1)

    def getOptions(self):
        '''Returns the command line options'''
        return self._options

    def getNArgs(self):
        '''Returns the number of command line arguments'''
        return len(self._args)

    def getArgs(self):
        '''Returns the command line argument list'''
        return self._args

    def getArg(self, i):
        '''Returns the i-th command line argument'''
        return self._args[i]

    def getUsername(self):
        """ Get user name. """
        return self._username

    def getPassword(self):
        """ Get password. """
        return self._password

    def runCommand(self):         \
            # pylint: disable=no-self-use
        """ This method must be implemented by the derived class. """

        raise AbstractMethod(
            _('runCommand() has to be overriden in the derived class.'))

    def run(self):
        """
        Invoke runCommand() in derivative class and handle exceptions.
        """
        try:
            self.runCommand()
        except TortugaException as ex:
            print('%s' % (ex.getErrorMessage()))
            raise SystemExit(ex.getErrorCode())
        except SystemExit as ex:
            raise
        except Exception as ex:
            print('%s' % (ex))
            raise SystemExit(-1)

    def getParam(self,
                 xtype,
                 options,
                 oname,
                 config,
                 section,
                 cname,
                 default=None):
        '''
        Get the value of a configurable parameter.
        First look at command line options. Return it if there.
        Then look in the configFile. Return it if there.
        Otherwise return the default.
        '''

        value = self.__getParam2(options, oname, config, section, cname,
                                 default)

        if xtype == int:
            if not value:
                value = 0
            elif type(value) != int:
                value = int(value)

        elif xtype == bool:
            if type(value) == str:
                value = value.lower() == 'true'
            elif type(value) == int:
                value = bool(value)

        return value

    def __getParam2(self, options, oname, config, section, cname, default):         \
            # pylint: disable=no-self-use

        # Command line option takes precedence

        if options and oname in options.__dict__ and \
                options.__dict__[oname] is not None:
            return options.__dict__[oname]

        # Config file is next

        if config and config.has_section(section) and \
                config.has_option(section, cname):
            return config.get(section, cname)

        # Last resort
        return default

    def _parseDiskSize(self, diskSizeParam):         \
            # pylint: disable=no-self-use
        """
        Parses diskSizeParam, returns an int value representing
        number of megabytes

        Raises:
            ValueError
        """
        if diskSizeParam.endswith('TB'):
            return int(float(diskSizeParam[:-2]) * 1000000)

        if diskSizeParam.endswith('GB'):
            return int(float(diskSizeParam[:-2]) * 1000)
        elif diskSizeParam.endswith('MB'):
            # Must be an integer
            return int(diskSizeParam[:-2])

        return int(diskSizeParam)

    def _getDiskSizeDisplayStr(self, volSize):         \
            # pylint: disable=no-self-use

        if volSize < 1000:
            result = '%s MB' % (volSize)
        elif volSize < 1000000:
            result = '%.3f GB' % (float(volSize) / 1000)
        else:
            result = '%.3f TB' % (float(volSize) / 1000000)

        return result
コード例 #26
0
class TortugaProxyConfig(TortugaCli):
    def __init__(self):
        super(TortugaProxyConfig, self).__init__(validArgCount=4)

        self._cm = ConfigManager()
        self._kitApi = KitApi()

    def parseArgs(self, usage=None):
        # TODO: add stuff here
        self.addOption('-f',
                       '--force',
                       action='store_true',
                       default=False,
                       dest='bForce',
                       help='Override built-in sanity checks')

        self.addOption('-n',
                       '--dry-run',
                       action='store_true',
                       dest='bDryRun',
                       default=False,
                       help='Do not write anything to disk')

        super().parseArgs(usage=usage)

    def runCommand(self):
        self.parseArgs()

        if self.getNArgs() < 1:
            self.usage()

            return

        action = self.getArgs()[0]

        if action == 'list':
            self._listProxies()
        elif action == 'add':
            if self.getNArgs() != 3:
                self.usage()
                return

            self._addProxy()
        elif action == 'delete':
            self._deleteProxy()
        else:
            raise InvalidArgument('Unknown directive [%s]' % (action))

    def _getProxyCfg(self):         \
            # pylint: disable=no-self-use

        cfg = configparser.ConfigParser()

        cfg.read('/opt/tortuga/config/base/apache-component.conf')

        return cfg

    def __get_proxy_set(self, cfg):         \
            # pylint: disable=no-self-use

        return set(cfg.get('proxy', 'proxy_list').split(' ')) \
            if cfg.has_option('proxy', 'proxy_list') else set()

    def _getProxyMap(self, cfg):
        proxyMap = {}

        if not cfg.has_section('proxy'):
            return proxyMap

        proxy_option_list = self.__get_proxy_set(cfg)

        for opt in proxy_option_list:
            if not cfg.has_option('proxy', opt):
                continue

            proxyMap[opt] = cfg.get('proxy', opt)

        return proxyMap

    def _writeProxyMap(self, cfg, proxyMap):
        if not cfg.has_section('proxy'):
            cfg.add_section('proxy')

        # Determine differences between what exists on disk and what has
        # just been removed.
        for deleted_option in self.__get_proxy_set(cfg) - set(proxyMap.keys()):
            if not cfg.has_option('proxy', deleted_option):
                continue

            cfg.remove_option('proxy', deleted_option)

        cfg.set('proxy', 'proxy_list', ' '.join(list(proxyMap.keys())))

        for key, value in proxyMap.items():
            cfg.set('proxy', key, value)

        proxyDict = dict(cfg.items('proxy'))

        if self.getArgs().bDryRun:
            print('[dryrun] %s' % (pprint.pformat(proxyDict)))
            return

        with open('/opt/tortuga/config/base/apache-component.conf', 'w') as fp:
            cfg.write(fp)

    def _addProxy(self):
        proxy_from = self.getArgs()[1]
        proxy_to = self.getArgs()[2]

        cfg = self._getProxyCfg()

        proxyMap = self._getProxyMap(cfg)

        if proxy_from in proxyMap:
            if proxy_to == proxyMap[proxy_from]:
                print('Proxy already mapped')

                sys.exit(1)

            if not self.getArgs().bForce:
                print('URI [%s] is already proxied to [%s]' %
                      (proxy_from, proxyMap[proxy_from]))

                sys.exit(1)

        proxyMap[proxy_from] = proxy_to

        self._writeProxyMap(cfg, proxyMap)

    def __find_kit_by_name_and_version(self, os_name, os_version):
        """
        Iterate over list of all installed kits looking for a name and
        version match only.

        Returns Kit object, otherwise None.
        """

        kit = None

        for kit in self._kitApi.getKitList():
            if kit.getName() == os_name and \
               kit.getVersion() == os_version:
                break
        else:
            return None

        return kit

    def __get_existing_kit_by_url(self, proxy_uri):
        """
        Given a proxy URI, determine if the path matches that of an
        installed kit. Returns Kit object or None.
        """

        uri_parts = proxy_uri.split('/')

        if len(uri_parts) != 5:
            # Short-circuit any check if the URI is longer/shorter than
            # a properly formatted Tortuga kit URL.
            return None

        # Check if this URI is formatted like valid OS kit URL

        os_name = uri_parts[2]
        os_version = uri_parts[3]
        os_arch = uri_parts[4]

        fake_url = self._cm.getYumRootUrl(
            'INSTALLER') + '/%s/%s/%s' % (os_name, os_version, os_arch)

        o = urllib.parse.urlparse(fake_url)

        if o.path != proxy_uri:
            # The paths don't start with the requisite Tortuga path
            return None

        version = os_version.split('-')

        # Check if supplied 'version' element of the path matches the
        # Tortuga convention.

        if len(version) == 1:
            # Possibly an OS kit
            # bOsKit = True if os_arch == 'x86_64' else None
            pass
        elif len(version) == 2:
            # Possibly a non-OS kit. Non-OS kits must have the 'arch'
            # set to 'noarch'
            # bOsKit = False if os_arch == 'noarch' else None
            pass
        else:
            # version element doesn't match Tortuga format
            return None

        return self.__find_kit_by_name_and_version(os_name, os_version)

    def _deleteProxy(self):
        proxy_path = self.getArgs()[1]

        cfg = self._getProxyCfg()

        proxyMap = self._getProxyMap(cfg)

        if proxy_path in proxyMap:
            existingKit = self.__get_existing_kit_by_url(proxy_path)

            if existingKit and not self.getArgs().bForce:
                print('WARNING: an installed %s [%s] matches this URL.'
                      ' Use \'--force\' to override this sanity check.' %
                      ('OS kit' if existingKit.getIsOs() else 'kit',
                       existingKit))

                sys.exit(1)

        if proxy_path not in proxyMap:
            print('Error: proxy path [%s] not found' % (proxy_path))

            sys.exit(1)

        del proxyMap[proxy_path]

        self._writeProxyMap(cfg, proxyMap)

    def _listProxies(self):
        cfg = self._getProxyCfg()

        for key, value in self._getProxyMap(cfg).items():
            print('%s -> %s' % (key, value))
コード例 #27
0
class DbManager(TortugaObjectManager):
    """
    Class for db management.

    :param engine: a SQLAlchemy database engine instance
    :param init:   a flag that is set when the database has not yet
                   been initialized. If this flag is set, not attempts
                   will be made to load/map kit tables. This flag is
                   cleared once the database has been initialized.

    """
    def __init__(self, engine=None):
        super().__init__()

        if not engine:
            self._cm = ConfigManager()

            self._dbConfig = self._refreshDbConfig()

            engineURI = self.__getDbEngineURI()

            if self._dbConfig['engine'] == 'sqlite' and \
                    not os.path.exists(self._dbConfig['path']):
                # Ensure SQLite database file is created with proper permissions
                fd = os.open(
                    self._dbConfig['path'], os.O_CREAT, mode=0o600)

                os.close(fd)

            self._engine = sqlalchemy.create_engine(engineURI)
        else:
            self._engine = engine

        self.Session = sqlalchemy.orm.scoped_session(
            sqlalchemy.orm.sessionmaker(bind=self.engine))

    def _register_database_tables(self):
        for kit_installer_class in get_all_kit_installers():
            kit_installer = kit_installer_class()
            kit_installer.register_database_tables()

    @property
    def engine(self):
        """
        SQLAlchemy Engine object property
        """
        self._register_database_tables()
        return self._engine

    def session(self):
        """
        Database session context manager
        """
        return SessionContextManager(self)

    def init_database(self):
        #
        # Create tables
        #
        self._register_database_tables()
        try:
            ModelBase.metadata.create_all(self.engine)
        except Exception:
            self._logger.exception('SQLAlchemy raised exception')
            raise DbError('Check database settings or credentials')

    @property
    def metadata(self):
        return self._metadata

    def __getDbEngineURI(self):
        dbPort = self._dbConfig['port']
        dbHost = self._dbConfig['host']
        engine = self._dbConfig['engine']
        dbUser = self._dbConfig['username']
        dbPassword = self._dbConfig['password']

        if engine == 'sqlite':
            engineURI = 'sqlite:///%s' % (self._dbConfig['path'])
        else:
            if dbUser is not None:
                if dbPassword is not None:
                    userspec = '%s:%s' % (dbUser, dbPassword)
                else:
                    userspec = dbUser
            else:
                userspec = None

            if dbPort is not None:
                hostspec = '%s:%s' % (dbHost, dbPort)
            else:
                hostspec = dbHost

            engineURI = f'{engine}+pymysql' if engine == 'mysql' else engine
            engineURI += '://'

            if userspec is not None:
                engineURI += f'{userspec}@'

            engineURI += f'{hostspec}' + '/{}'.format(self._cm.getDbSchema())

        return engineURI

    def _getDefaultDbEngine(self): \
            # pylint: disable=no-self-use
        return 'sqlite'

    def _getDefaultDbHost(self): \
            # pylint: disable=no-self-use
        return 'localhost'

    def _getDefaultDbPort(self, engine): \
            # pylint: disable=no-self-use
        # MySQL default port
        if engine == 'mysql':
            return 3306

        return None

    def _getDefaultDbUserName(self):
        return self._cm.getDbUser()

    def _getDefaultDbPassword(self):
        if os.path.exists(self._cm.getDbPasswordFile()):
            with open(self._cm.getDbPasswordFile()) as fp:
                dbPassword = fp.read()
        else:
            dbPassword = None

        return dbPassword

    def _refreshDbConfig(self, cfg=None):
        dbConfig = {}

        if cfg is None:
            cfg = configparser.ConfigParser()

            cfg.read(os.path.join(self._cm.getKitConfigBase(), 'tortuga.ini'))

        # Database engine
        val = cfg.get('database', 'engine').strip().lower() \
            if cfg.has_option('database', 'engine') else \
            self._getDefaultDbEngine()

        dbConfig['engine'] = val

        if dbConfig['engine'] == 'sqlite':
            # If database is sqlite, read the path
            dbConfig['path'] = cfg.get('database', 'path') \
                if cfg.has_section('database') and \
                cfg.has_option('database', 'path') else \
                os.path.join(self._cm.getEtcDir(),
                             self._cm.getDbSchema() + '.sqlite')

        # Database host
        val = cfg.get('database', 'host') \
            if cfg.has_option('database', 'host') else \
            self._getDefaultDbHost()

        dbConfig['host'] = val

        # Database port
        val = cfg.get('database', 'port') \
            if cfg.has_option('database', 'port') else None

        dbConfig['port'] = val if val else self._getDefaultDbPort(
            engine=dbConfig['engine'])

        # Database username
        val = cfg.get('database', 'username') \
            if cfg.has_option('database', 'username') \
            else self._getDefaultDbUserName()

        dbConfig['username'] = val

        # Database password
        val = cfg.get('database', 'password') \
            if cfg.has_option('database', 'password') \
            else self._getDefaultDbPassword()

        dbConfig['password'] = val

        return dbConfig

    def get_backend_opts(self): \
            # pylint: disable=no-self-use
        return {
            'mysql_engine': 'InnoDB',
        }

    def getMetadataTable(self, table):
        return self._metadata.tables[table]

    def openSession(self):
        """ Open db session. """

        return self.Session()

    def closeSession(self):
        """Close scoped_session."""

        self.Session.remove()
コード例 #28
0
    def __init__(self):
        super(TortugaProxyConfig, self).__init__(validArgCount=4)

        self._cm = ConfigManager()
        self._kitApi = KitApi()
コード例 #29
0
ファイル: hardwareProfileDbApi.py プロジェクト: ilumb/tortuga
 def __getInstallerNode(self, session):
     return self._nodesDbHandler.getNode(session,
                                         ConfigManager().getInstaller())
コード例 #30
0
def get_puppet_node_yaml(session, nodeName):
    _cm = ConfigManager()

    publicInstallerFQDN = _cm.getInstaller().lower()
    primaryInstallerHostName = publicInstallerFQDN.split('.', 1)[0]

    try:
        dnsZone = GlobalParametersDbHandler().getParameter(
            session, 'DNSZone').value.lower()
    except ParameterNotFound:
        dnsZone = None

    try:
        depot_path = GlobalParametersDbHandler().getParameter(
            session, 'depot').value.lower()

        _cm.setDepotDir(depot_path)
    except ParameterNotFound:
        pass

    bInstaller = primaryInstallerHostName == nodeName.split('.', 1)[0]

    try:
        dbNode = NodesDbHandler().getNode(session, nodeName)
    except NodeNotFound:
        sys.exit(1)

    data = None
    try:
        from tortuga.db.dataRequestsDbHandler import DataRequestsDbHandler
        dbDataRequest = DataRequestsDbHandler().get_by_addHostSession(
            session, dbNode.addHostSession)
        if dbDataRequest:
            data = dbDataRequest.request
    except Exception as e:
        pass

    if dbNode.hardwareprofile.nics:
        privateInstallerFQDN = '%s%s%s' % (primaryInstallerHostName,
                                           get_installer_hostname_suffix(
                                               dbNode.hardwareprofile.nics[0],
                                               enable_interface_aliases=None),
                                           '.%s' %
                                           (dnsZone) if dnsZone else '')
    else:
        privateInstallerFQDN = '%s%s' % (primaryInstallerHostName, '.%s' %
                                         (dnsZone) if dnsZone else '')

    if not bInstaller and dbNode.hardwareprofile.location == 'local':
        # If the hardware profile does not have an associated provisioning
        # NIC, use the public installer FQDN by default. This can happen if
        # the user has added their own "public" nodes to a local hardware
        # profile.

        if not dbNode.hardwareprofile.nics:
            installerHostName = publicInstallerFQDN
        else:
            installerHostName = privateInstallerFQDN
    else:
        # If the specified node is the installer itself or a node
        # accessing the installer through it's public interface, use the
        # public host name.
        installerHostName = publicInstallerFQDN

    puppet_classes = {}

    enabledKits = set()

    if dbNode.softwareprofile:

        for dbComponent in dbNode.softwareprofile.components:

            if not dbComponent.kit.isOs:
                #
                # Load the kit and component installers
                #
                kit_spec = (dbComponent.kit.name, dbComponent.kit.version,
                            dbComponent.kit.iteration)
                kit_installer = get_kit_installer(kit_spec)()
                kit_installer.session = session
                _component = kit_installer.get_component_installer(
                    dbComponent.name)

                #
                # Get the puppet args for the component
                #
                try:
                    puppet_class_args = _component.run_action(
                        'get_puppet_args',
                        dbNode.softwareprofile,
                        dbNode.hardwareprofile,
                        data=data)
                    if puppet_class_args is not None:
                        puppet_classes[_component.puppet_class] = \
                            puppet_class_args
                except Exception:  # noqa pylint: disable=broad-except
                    # suppress exception if unable to get Puppet args
                    puppet_classes[_component.puppet_class] = {}

            else:
                #
                # OS kit component is omitted on installer. The installer
                # is assumed to have a pre-existing OS repository
                # configuration.
                #
                if bInstaller:
                    continue

            enabledKits.add(dbComponent.kit)

    dataDict = {}

    if puppet_classes:
        dataDict['classes'] = puppet_classes

    parametersDict = {}
    dataDict['parameters'] = parametersDict

    # software profile
    if dbNode.softwareprofile:
        parametersDict['swprofilename'] = dbNode.softwareprofile.name

    # hardware profile
    parametersDict['hwprofilename'] = dbNode.hardwareprofile.name

    # installer hostname
    parametersDict['primary_installer_hostname'] = installerHostName

    # Local repos directory
    repodir = os.path.join(_cm.getDepotDir(), 'kits')

    # Build YUM repository entries only if we have kits associated with
    # the software profile.
    if enabledKits:
        repourl = _cm.getIntWebRootUrl(installerHostName) + '/repos' \
            if not bInstaller else 'file://{0}'.format(repodir)

        repo_type = None

        if dbNode.softwareprofile.os.family.name == 'rhel':
            repo_type = 'yum'
        # elif dbNode.softwareprofile.os.family == 'ubuntu':
        #     repo_type = 'apt'

        if repo_type:
            # Only add 'repos' entries for supported operating system
            # families.

            repos_dict = {}

            for kit in enabledKits:
                if kit.isOs:
                    verstr = str(kit.version)
                    arch = kit.components[0].os[0].arch
                else:
                    verstr = '%s-%s' % (kit.version, kit.iteration)
                    arch = 'noarch'

                for dbKitSource in dbNode.softwareprofile.kitsources:
                    if dbKitSource in kit.sources:
                        baseurl = dbKitSource.url
                        break
                else:
                    subpath = '%s/%s/%s' % (kit.name, verstr, arch)

                    if not kit.isOs and not os.path.exists(
                            os.path.join(repodir, subpath,
                                         'repodata/repomd.xml')):
                        continue

                    baseurl = '%s/%s' % (repourl, subpath)

                    # [TODO] temporary workaround for handling RHEL media
                    # path.
                    #
                    # This code is duplicated from tortuga.boot.distro
                    if kit.isOs and \
                       dbNode.softwareprofile.os.name == 'rhel' and \
                       dbNode.softwareprofile.os.family.version != '7':
                        subpath += '/Server'

                if repo_type == 'yum':
                    if dbNode.hardwareprofile.location == 'remote':
                        cost = 1200
                    else:
                        cost = 1000

                    repos_dict['uc-kit-%s' % (kit.name)] = {
                        'type': repo_type,
                        'baseurl': baseurl,
                        'cost': cost,
                    }

            if repos_dict:
                parametersDict['repos'] = repos_dict

    # Enable '3rdparty' repo
    if dbNode.softwareprofile:
        third_party_repo_subpath = '3rdparty/%s/%s/%s' % (
            dbNode.softwareprofile.os.family.name,
            dbNode.softwareprofile.os.family.version,
            dbNode.softwareprofile.os.arch)

        local_repos_path = os.path.join(repodir, third_party_repo_subpath)

        # Check for existence of repository metadata to validate existence
        if enabledKits and os.path.exists(
                os.path.join(local_repos_path, 'repodata', 'repomd.xml')):
            third_party_repo_dict = {
                'tortuga-third-party': {
                    'type': 'yum',
                    'baseurl': os.path.join(repourl, third_party_repo_subpath),
                },
            }

            if 'repos' not in parametersDict:
                parametersDict['repos'] = third_party_repo_dict
            else:
                parametersDict['repos'] = dict(
                    list(parametersDict['repos'].items()) +
                    list(third_party_repo_dict.items()))

    # environment
    dataDict['environment'] = 'production'

    sys.stdout.write(
        yaml.safe_dump(dataDict, default_flow_style=False,
                       explicit_start=True))