def remove_tags_from_resource(owner, resource_obj, tags, *args, **kwargs): """ This function get a list of tags in the form [{'key': 'joe'}] or [{'key': 'joe', 'value': 'schmoe'}] and will delete them from the resource :param owner: the resource owner :param resource_obj: the resource object where the tags will be added :param rtype: resource type :param tags: list of tags to be deleted """ # ensure there are no duplicate tag keys because mongoengine will # raise exception for duplicates in query key_list = list(set(tags)) # create a query that will return all the tags with query = reduce(lambda q1, q2: q1.__or__(q2), map(lambda key: Q(key=key), key_list)) Tag.objects(Q(owner=owner) & Q(resource=resource_obj) & (query)).delete() # SEC owner.mapper.update(resource_obj) rtype = resource_obj._meta["collection"] trigger_session_update(owner, [rtype + 's' if not rtype.endswith('s') else rtype])
def delete(self): if self.reminder: self.reminder.delete() super(Schedule, self).delete() Tag.objects(resource_id=self.id, resource_type='schedule').delete() self.owner.mapper.remove(self) if self.owned_by: self.owned_by.get_ownership_mapper(self.owner).remove(self)
def delete(self): super(Cloud, self).delete() Tag.objects(resource=self).delete() try: self.owner.mapper.remove(self) except Exception as exc: log.error("Got error %r while removing cloud %s", exc, self.id) try: if self.owned_by: self.owned_by.get_ownership_mapper(self.owner).remove(self) except Exception as exc: log.error("Got error %r while removing cloud %s", exc, self.id)
def tags(self): """Return the tags of this network.""" return { tag.key: tag.value for tag in Tag.objects(resource_id=self.id, resource_type='network') }
def as_dict(self): cdict = { 'id': self.id, 'title': self.title, 'provider': self.ctl.provider, 'enabled': self.enabled, 'dns_enabled': self.dns_enabled, 'state': 'online' if self.enabled else 'offline', 'polling_interval': self.polling_interval, 'tags': [{ 'key': tag.key, 'value': tag.value } for tag in Tag.objects(owner=self.owner, resource=self).only( 'key', 'value')], 'owned_by': self.owned_by.id if self.owned_by else '', 'created_by': self.created_by.id if self.created_by else '', } cdict.update({ key: getattr(self, key) for key in self._cloud_specific_fields if key not in self._private_fields }) return cdict
def modify_security_tags(auth_context, tags, resource=None): """ This method splits the resources' tags in security and non-security groups. Security tags are part of team policies. Such tags should only be modified by organization owners in order to enforce team policies. If a team member attempts to edit a security tag, an UnauthorizedError will be thrown :param tags: the new tags dict :param resource: the resource on which the tags are going to be applied :return: False, if a security tag has been modified in the new tags dict by someone other than the organization owner, otherwise True """ # private context if auth_context.org is None: return True if auth_context.is_owner(): return True else: rtags = Tag.objects(owner=auth_context.owner.id, resource=resource).only('key', 'value') rtags = {rtag.key: rtag.value for rtag in rtags} security_tags = auth_context.get_security_tags() # check whether the new tags tend to modify any of the security_tags for security_tag in security_tags: for key, value in security_tag.items(): if key not in rtags.keys(): if key in tags.keys(): return False else: if key not in tags.keys(): return False elif value != tags[key]: return False return True
def q(self): rtype = self._instance.condition_resource_cls._meta["collection"] ids = set() for key, value in self.tags.iteritems(): query = { 'owner': self._instance.owner, 'resource_type': rtype, 'key': key, } if value: query['value'] = value ids |= set(tag.resource.id for tag in Tag.objects(**query)) return me.Q(id__in=ids)
def add_tags_to_resource(owner, resource_obj, tags, *args, **kwargs): """ This function get a list of tags in the form [{'key': 'joe', 'value': 'schmoe'}] and will scan the list and update all the tags whose keys are present but whose values are different and add all the missing ones :param owner: the resource owner :param resource_obj: the resource object where the tags will be added :param tags: list of tags to be added """ # merge all the tags in the list into one dict. this will also make sure # that if there are duplicates they will be cleaned up tag_dict = dict(tags) for tag_obj in Tag.objects(owner=owner, resource=resource_obj): # if any of the tag keys is already present check if it's value should # be changed and remove it from the tag_dict if tag_obj.key in tag_dict: if tag_obj.value != tag_dict[tag_obj.key]: tag_obj.value = tag_dict[tag_obj.key] tag_obj.save() del tag_dict[tag_obj.key] # remaining tags in tag_dict have not been found in the db so add them now for key, value in tag_dict.iteritems(): Tag(owner=owner, resource=resource_obj, key=key, value=value).save() # SEC owner.mapper.update(resource_obj) # FIXME: The fact that a session update is triggered at this point may # result in re-updating the RBAC Mappings twice for the given resource # for no f*** reason. rtype = resource_obj._meta["collection"] trigger_session_update(owner, [rtype + 's' if not rtype.endswith('s') else rtype])
def q(self): rtype = self._instance.selector_resource_cls._meta[ "collection"].rstrip('s') ids = set() for key, value in self.include.items(): query = { 'owner': self._instance.owner, 'resource_type': rtype, 'key': key, } if value: query['value'] = value ids |= set(tag.resource_id for tag in Tag.objects(**query)) # TODO: exclude items return me.Q(id__in=ids)
def _decide_machine_cost(machine, tags=None, cost=(0, 0)): """Decide what the monthly and hourly machine cost is Params: machine: Machine model instance tags: Optional machine tags dict, if not provided it will be queried. cost: Optional two-tuple of hourly/monthly cost, such as that returned by cloud provider. Any cost-specific tags take precedence. """ def parse_num(num): try: return float(num or 0) except (ValueError, TypeError): log.warning("Can't parse %r as float.", num) return 0 now = datetime.datetime.utcnow() month_days = calendar.monthrange(now.year, now.month)[1] # Get machine tags from db tags = tags or { tag.key: tag.value for tag in Tag.objects( owner=machine.cloud.owner, resource=machine, ) } try: cph = parse_num(tags.get('cost_per_hour')) cpm = parse_num(tags.get('cost_per_month')) if not (cph or cpm) or cph > 100 or cpm > 100 * 24 * 31: log.warning("Invalid cost tags for machine %s", machine) cph, cpm = map(parse_num, cost) if not cph: cph = float(cpm) / month_days / 24 elif not cpm: cpm = cph * 24 * month_days except Exception: log.exception("Error while deciding cost for machine %s", machine) machine.cost.hourly = cph machine.cost.monthly = cpm
def tags(self): """Return the tags of this record.""" return {tag.key: tag.value for tag in Tag.objects(resource_id=self.id, resource_type='record')}
def delete(self): super(Schedule, self).delete() Tag.objects(resource=self).delete() self.owner.mapper.remove(self) if self.owned_by: self.owned_by.get_ownership_mapper(self.owner).remove(self)
def delete(self): super(Network, self).delete() self.owner.mapper.remove(self) Tag.objects(resource_id=self.id, resource_type='network').delete() if self.owned_by: self.owned_by.get_ownership_mapper(self.owner).remove(self)
def delete(self): super(Subnet, self).delete() Tag.objects(resource_id=self.id, resource_type='subnet').delete()
def get_tag_objects_for_resource(owner, resource_obj, *args, **kwargs): return Tag.objects( owner=owner, resource_type=resource_obj.to_dbref().collection.rstrip('s'), resource_id=resource_obj.id)
def delete(self): super(Cloud, self).delete() Tag.objects(resource=self).delete() self.owner.mapper.remove(self)
def delete(self): super(Record, self).delete() Tag.objects(resource=self).delete() self.zone.owner.mapper.remove(self)
def tags(self): """Return the tags of this subnet.""" return [{ 'key': tag.key, 'value': tag.value } for tag in Tag.objects(resource=self)]
def delete(self): super(Zone, self).delete() Tag.objects(resource_id=self.id, resource_type='zone').delete() self.owner.mapper.remove(self) if self.owned_by: self.owned_by.get_ownership_mapper(self.owner).remove(self)
def delete(self): super(Subnet, self).delete() Tag.objects(resource=self).delete()
def list_machines(self): """Return list of machines for cloud A list of nodes is fetched from libcloud, the data is processed, stored on machine models, and a list of machine models is returned. Subclasses SHOULD NOT override or extend this method. There are instead a number of methods that are called from this method, to allow subclasses to modify the data according to the specific of their cloud type. These methods currently are: `self._list_machines__fetch_machines` `self._list_machines__machine_actions` `self._list_machines__postparse_machine` `self._list_machines__cost_machine` `self._list_machines__fetch_generic_machines` Subclasses that require special handling should override these, by default, dummy methods. """ # Try to query list of machines from provider API. try: nodes = self._list_machines__fetch_machines() log.info("List nodes returned %d results for %s.", len(nodes), self.cloud) except InvalidCredsError as exc: log.warning("Invalid creds on running list_nodes on %s: %s", self.cloud, exc) raise CloudUnauthorizedError(msg=exc.message) except ssl.SSLError as exc: log.error("SSLError on running list_nodes on %s: %s", self.cloud, exc) raise SSLError(exc=exc) except Exception as exc: log.exception("Error while running list_nodes on %s", self.cloud) raise CloudUnavailableError(exc=exc) machines = [] now = datetime.datetime.utcnow() # Process each machine in returned list. # Store previously unseen machines separately. new_machines = [] for node in nodes: # Fetch machine mongoengine model from db, or initialize one. try: machine = Machine.objects.get(cloud=self.cloud, machine_id=node.id) except Machine.DoesNotExist: machine = Machine(cloud=self.cloud, machine_id=node.id).save() new_machines.append(machine) # Update machine_model's last_seen fields. machine.last_seen = now machine.missing_since = None # Get misc libcloud metadata. image_id = str(node.image or node.extra.get('imageId') or node.extra.get('image_id') or node.extra.get('image') or '') size = (node.size or node.extra.get('flavorId') or node.extra.get('instancetype')) machine.name = node.name machine.image_id = image_id machine.size = size machine.state = config.STATES[node.state] machine.private_ips = node.private_ips machine.public_ips = node.public_ips # Set machine extra dict. # Make sure we don't meet any surprises when we try to json encode # later on in the HTTP response. extra = self._list_machines__get_machine_extra(machine, node) for key, val in extra.items(): try: json.dumps(val) except TypeError: extra[key] = str(val) machine.extra = extra # Set machine hostname if machine.extra.get('dns_name'): machine.hostname = machine.extra['dns_name'] else: ips = machine.public_ips + machine.private_ips if not ips: ips = [] for ip in ips: if ip and ':' not in ip: machine.hostname = ip break # Get machine tags from db tags = {tag.key: tag.value for tag in Tag.objects( owner=self.cloud.owner, resource=machine, ).only('key', 'value')} # Get machine creation date. try: created = self._list_machines__machine_creation_date(machine, node) if created: machine.created = get_datetime(created) except Exception as exc: log.exception("Error finding creation date for %s in %s.", self.cloud, machine) # TODO: Consider if we should fall back to using current date. # if not machine_model.created: # machine_model.created = datetime.datetime.utcnow() # Update with available machine actions. try: self._list_machines__machine_actions(machine, node) except Exception as exc: log.exception("Error while finding machine actions " "for machine %s:%s for %s", machine.id, node.name, self.cloud) # Apply any cloud/provider specific post processing. try: self._list_machines__postparse_machine(machine, node) except Exception as exc: log.exception("Error while post parsing machine %s:%s for %s", machine.id, node.name, self.cloud) # Apply any cloud/provider cost reporting. try: def parse_num(num): try: return float(num or 0) except (ValueError, TypeError): log.warning("Can't parse %r as float.", num) return 0 month_days = calendar.monthrange(now.year, now.month)[1] cph = parse_num(tags.get('cost_per_hour')) cpm = parse_num(tags.get('cost_per_month')) if not (cph or cpm) or cph > 100 or cpm > 100 * 24 * 31: cph, cpm = map(parse_num, self._list_machines__cost_machine(machine, node)) if not cph: cph = float(cpm) / month_days / 24 elif not cpm: cpm = cph * 24 * month_days machine.cost.hourly = cph machine.cost.monthly = cpm except Exception as exc: log.exception("Error while calculating cost " "for machine %s:%s for %s", machine.id, node.name, self.cloud) if node.state.lower() == 'terminated': machine.cost.hourly = 0 machine.cost.monthly = 0 # Save all changes to machine model on the database. try: machine.save() except me.ValidationError as exc: log.error("Error adding %s: %s", machine.name, exc.to_dict()) raise BadRequestError({"msg": exc.message, "errors": exc.to_dict()}) except me.NotUniqueError as exc: log.error("Machine %s not unique error: %s", machine.name, exc) raise ConflictError("Machine with this name already exists") machines.append(machine) # Append generic-type machines, which aren't handled by libcloud. for machine in self._list_machines__fetch_generic_machines(): machine.last_seen = now machine.missing_since = None machine.state = config.STATES[NodeState.UNKNOWN] for action in ('start', 'stop', 'reboot', 'destroy', 'rename', 'resume', 'suspend', 'undefine'): setattr(machine.actions, action, False) machine.actions.tag = True # allow reboot action for bare metal with key associated if machine.key_associations: machine.actions.reboot = True machine.save() machines.append(machine) # Set last_seen on machine models we didn't see for the first time now. Machine.objects(cloud=self.cloud, id__nin=[m.id for m in machines], missing_since=None).update(missing_since=now) # Update RBAC Mappings given the list of nodes seen for the first time. self.cloud.owner.mapper.update(new_machines) # Update machine counts on cloud and org. # FIXME: resolve circular import issues from mist.api.clouds.models import Cloud self.cloud.machine_count = len(machines) self.cloud.save() self.cloud.owner.total_machine_count = sum( cloud.machine_count for cloud in Cloud.objects( owner=self.cloud.owner, deleted=None ).only('machine_count') ) self.cloud.owner.save() # Close libcloud connection try: self.disconnect() except Exception as exc: log.warning("Error while closing connection: %r", exc) return machines
def get_tags_for_resource(owner, resource_obj, *args, **kwargs): return [{ 'key': tag.key, 'value': tag.value } for tag in Tag.objects(owner=owner, resource=resource_obj)]