def get_full_pillar(self): pillar_props = {} # If any of the formulas we're using have default pillar # data defined in its corresponding SPECFILE, we need to pull # that into our stack pillar file. # First get the unique set of formulas formulas = self.get_formulas() # for each unique formula, pull the properties from the SPECFILE for formula in formulas: # Grab the formula version try: version = self.formula_versions.get(formula=formula).version except FormulaVersion.DoesNotExist: version = formula.default_version # Update the formula formula.get_gitfs().update() # Add it to the rest of the pillar recursive_update(pillar_props, formula.properties(version)) # Add in properties that were supplied via the blueprint and during # stack creation recursive_update(pillar_props, self.global_orchestration_properties) return pillar_props
def get_full_pillar(self): users = [] # pull the create_ssh_users property from the stackd.io config file. # If it's False, we won't create ssh users on the box. if self.create_users: user_permissions_map = get_users_with_perms( self, attach_perms=True, with_superusers=True, with_group_users=True ) for user, perms in user_permissions_map.items(): if 'ssh_environment' in perms: if user.settings.public_key: logger.debug('Granting {0} ssh permission to environment: {1}'.format( user.username, self.name, )) users.append({ 'username': user.username, 'public_key': user.settings.public_key, 'id': user.id, }) else: logger.debug( 'User {0} has ssh permission for environment {1}, ' 'but has no public key. Skipping.'.format( user.username, self.name, ) ) pillar_props = { '__stackdio__': { 'users': users } } # If any of the formulas we're using have default pillar # data defined in its corresponding SPECFILE, we need to pull # that into our environment pillar file. # for each unique formula, pull the properties from the SPECFILE for formula_version in self.formula_versions.all(): formula = formula_version.formula version = formula_version.version # Update the formula formula.get_gitfs().update() # Add it to the rest of the pillar recursive_update(pillar_props, formula.properties(version)) # Add in properties that were supplied via the blueprint and during # environment creation recursive_update(pillar_props, self.properties) return pillar_props
def ext_pillar(minion_id, pillar, *args, **kwargs): """ Basically, we need to provide additional pillar data to our states but only the pillar data defined for a stack. The user should have the ability to do this from the UI and the pillar file used will be located in the grains. """ new_pillar = {} # First try the environment # (always do this regardless of whether there's we're in global orchestration or not) if 'env' in __grains__: _, _, env_name = __grains__['env'].partition('.') try: environment = Environment.objects.get(name=env_name) recursive_update(new_pillar, environment.get_full_pillar()) except Environment.DoesNotExist: logger.info('Environment {} was specified in the grains ' 'but was not found.'.format(env_name)) global_orch = __grains__.get('global_orchestration', False) # Then the cloud account (but only if we ARE in global orchestration) if global_orch and 'cloud_account' in __grains__: try: account = CloudAccount.objects.get( slug=__grains__['cloud_account']) recursive_update(new_pillar, account.get_full_pillar()) except CloudAccount.DoesNotExist: logger.info('Cloud account {} not found'.format( __grains__['cloud_account'])) # Then the stack (but only if we ARE NOT in global orchestration) if not global_orch and 'stack_id' in __grains__ and isinstance( __grains__['stack_id'], int): try: stack = Stack.objects.get(id=__grains__['stack_id']) recursive_update(new_pillar, stack.get_full_pillar()) except Stack.DoesNotExist: logger.info('Stack {} not found'.format(__grains__['stack_id'])) # This is the old way, try it too for compatibility purposes. # Make it last so it has the highest precedence. if 'stack_pillar_file' in __grains__: # load the stack_pillar_file, rendered as yaml, and add it into the return dict try: with open(__grains__['stack_pillar_file'], 'r') as f: loaded_pillar = yaml.safe_load(f) recursive_update(new_pillar, loaded_pillar) except Exception as e: logger.exception(e) logger.critical( 'Unable to load/render stack_pillar_file. Is the YAML ' 'properly formatted?') return new_pillar
def generate_global_pillar_file(self, update_formulas=False): # Import here to not cause circular imports from stackdio.api.formulas.models import FormulaVersion from stackdio.api.formulas.tasks import update_formula pillar_props = {} # Find all of the globally used formulas for the stack accounts = set( [host.cloud_image.account for host in self.hosts.all()] ) global_formulas = [] for account in accounts: global_formulas.extend(account.get_formulas()) # Update the formulas if requested if update_formulas: for formula in global_formulas: # Update the formula, and fail silently if there was an error. if formula.private_git_repo: logger.debug('Skipping private formula: {0}'.format(formula.uri)) continue try: version = self.formula_versions.get(formula=formula).version except FormulaVersion.DoesNotExist: version = formula.default_version update_formula.si(formula.id, None, version, raise_exception=False)() # Add the global formulas into the props for formula in set(global_formulas): recursive_update(pillar_props, formula.properties) # Add in the account properties AFTER the stack properties for account in accounts: recursive_update(pillar_props, account.global_orchestration_properties) pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False) if not self.global_pillar_file: self.global_pillar_file.save('stack.global_pillar', ContentFile(pillar_file_yaml)) else: with open(self.global_pillar_file.path, 'w') as f: f.write(pillar_file_yaml)
def create(self, **kwargs): new_properties = kwargs.pop('properties', {}) with transaction.atomic(using=self.db): stack = super(StackQuerySet, self).create(**kwargs) # manage the properties properties = stack.blueprint.properties recursive_update(properties, new_properties) stack.properties = properties # Create the appropriate hosts & security group objects stack.create_security_groups() stack.create_hosts() return stack
def ext_pillar(minion_id, pillar, *args, **kwargs): """ Basically, we need to provide additional pillar data to our states but only the pillar data defined for a stack. The user should have the ability to do this from the UI and the pillar file used will be located in the grains. """ new_pillar = {} # First try the environment # (always do this regardless of whether there's we're in global orchestration or not) if 'env' in __grains__: _, _, env_name = __grains__['env'].partition('.') try: environment = Environment.objects.get(name=env_name) recursive_update(new_pillar, environment.get_full_pillar()) except Environment.DoesNotExist: logger.info('Environment {} was specified in the grains ' 'but was not found.'.format(env_name)) global_orch = __grains__.get('global_orchestration', False) # Then the cloud account (but only if we ARE in global orchestration) if global_orch and 'cloud_account' in __grains__: try: account = CloudAccount.objects.get(slug=__grains__['cloud_account']) recursive_update(new_pillar, account.get_full_pillar()) except CloudAccount.DoesNotExist: logger.info('Cloud account {} not found'.format(__grains__['cloud_account'])) # Then the stack (but only if we ARE NOT in global orchestration) if not global_orch and 'stack_id' in __grains__ and isinstance(__grains__['stack_id'], int): try: stack = Stack.objects.get(id=__grains__['stack_id']) recursive_update(new_pillar, stack.get_full_pillar()) except Stack.DoesNotExist: logger.info('Stack {} not found'.format(__grains__['stack_id'])) # This is the old way, try it too for compatibility purposes. # Make it last so it has the highest precedence. if 'stack_pillar_file' in __grains__: # load the stack_pillar_file, rendered as yaml, and add it into the return dict try: with open(__grains__['stack_pillar_file'], 'r') as f: loaded_pillar = yaml.safe_load(f) recursive_update(new_pillar, loaded_pillar) except Exception as e: logger.exception(e) logger.critical('Unable to load/render stack_pillar_file. Is the YAML ' 'properly formatted?') return new_pillar
def update(self, instance, validated_data): """ Need to override this so we can add custom logic for PUT vs PATCH """ if self.partial: # This is a PATCH request - so merge the new options into the old ones new_options = validated_data.get('options', {}) validated_data['options'] = recursive_update(instance.options, new_options) return super(NotificationHandlerSerializer, self).update(instance, validated_data)
def update(self, stack, validated_data): if self.partial: # This is a PATCH, so properly merge in the old data old_properties = stack.properties stack.properties = recursive_update(old_properties, validated_data) else: # This is a PUT, so just add the data directly stack.properties = validated_data # Be sure to save the instance stack.save() return stack
def update(self, account, validated_data): if self.partial: # This is a PATCH, so properly merge in the old data old_properties = account.global_orchestration_properties account.global_orchestration_properties = recursive_update(old_properties, validated_data) else: # This is a PUT, so just add the data directly account.global_orchestration_properties = validated_data # Be sure to persist the data account.save() return account
def update(self, instance, validated_data): """ Need to override this so we can add custom logic for PUT vs PATCH """ if self.partial: # This is a PATCH request - so merge the new options into the old ones new_options = validated_data.get('options', {}) validated_data['options'] = recursive_update( instance.options, new_options) return super(NotificationHandlerSerializer, self).update(instance, validated_data)
def update(self, instance, validated_data): blueprint = instance if self.partial: # This is a PATCH, so properly merge in the old data old_properties = blueprint.properties blueprint.properties = recursive_update(old_properties, validated_data) else: # This is a PUT, so just add the data directly blueprint.properties = validated_data # Be sure to persist the data blueprint.save() return blueprint
def update(self, instance, validated_data): if self.partial: # This is a PATCH, so properly merge in the old data old_properties = instance.properties instance.properties = utils.recursive_update(old_properties, validated_data) else: # This is a PUT, so just add the data directly instance.properties = validated_data # Be sure to save the instance instance.save() return instance
def update(self, account, validated_data): if self.partial: # This is a PATCH, so properly merge in the old data old_properties = account.global_orchestration_properties account.global_orchestration_properties = recursive_update( old_properties, validated_data) else: # This is a PUT, so just add the data directly account.global_orchestration_properties = validated_data # Be sure to persist the data account.save() return account
def generate_pillar_file(self, update_formulas=False): # Import here to not cause circular imports from stackdio.api.formulas.models import FormulaVersion from stackdio.api.formulas.tasks import update_formula users = [] # pull the create_ssh_users property from the stackd.io config file. # If it's False, we won't create ssh users on the box. if self.create_users: user_permissions_map = get_users_with_perms( self, attach_perms=True, with_superusers=True, with_group_users=True ) for user, perms in user_permissions_map.items(): if 'ssh_stack' in perms: if user.settings.public_key: logger.debug('Granting {0} ssh permission to stack: {1}'.format( user.username, self.title, )) users.append({ 'username': user.username, 'public_key': user.settings.public_key, 'id': user.id, }) else: logger.debug( 'User {0} has ssh permission for stack {1}, but has no public key. ' 'Skipping.'.format( user.username, self.title, ) ) pillar_props = { '__stackdio__': { 'users': users } } # If any of the formulas we're using have default pillar # data defined in its corresponding SPECFILE, we need to pull # that into our stack pillar file. # First get the unique set of formulas formulas = set() for host in self.hosts.all(): formulas.update([c.formula for c in host.formula_components.all()]) # Update the formulas if requested if update_formulas: for formula in formulas: # Update the formula, and fail silently if there was an error. if formula.private_git_repo: logger.debug('Skipping private formula: {0}'.format(formula.uri)) continue try: version = self.formula_versions.get(formula=formula).version except FormulaVersion.DoesNotExist: version = formula.default_version update_formula.si(formula.id, None, version, raise_exception=False)() # for each unique formula, pull the properties from the SPECFILE for formula in formulas: recursive_update(pillar_props, formula.properties) # Add in properties that were supplied via the blueprint and during # stack creation recursive_update(pillar_props, self.properties) pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False) if not self.pillar_file: self.pillar_file.save('stack.pillar', ContentFile(pillar_file_yaml)) else: with open(self.pillar_file.path, 'w') as f: f.write(pillar_file_yaml)