Ejemplo n.º 1
0
 def do(self, action, from_kind, from_id, to_kind, to_id):
     from_klass = core.kind_to_class(from_kind)
     to_klass = core.kind_to_class(to_kind)
     from_entity = from_klass.get_by_id(from_id)
     to_entity = to_klass.get_by_id(to_id)
     logging.info("handler action {}".format(action))
     from_entity = self.api.unassociate(action, from_entity, to_entity)
     return {'success': True, 'data': from_entity.to_dict()}
Ejemplo n.º 2
0
    def do(self, action, from_kind, from_id, to_kind, to_id):
        from_klass = core.kind_to_class(from_kind)
        to_klass = core.kind_to_class(to_kind)
        from_entity = from_klass.get_by_id(from_id)
        to_entity = to_klass.get_by_id(to_id)
        from_entity = self.api.associate(action, from_entity, to_entity)

        # We'll want to return this at the end.
        data = from_entity.to_dict()

        # Special case for activity management: when teachers associate with a
        # cohort or classroom for the first time, activity entities need to be
        # created for them.

        init_activities = (
            from_kind == 'user' and from_entity.user_type == 'teacher'
            and ((to_kind == 'cohort' and action == 'associate') or
                 (to_kind == 'classroom' and action == 'set_owner')))
        if init_activities:
            # To simulate a fresh call, refresh the user in the Api object.
            # This only applies when a user is associating *themselves* with a
            # cohort or classroom. Without this refresh, the new associations
            # created just above won't be there and permissions to associate
            # the new activities will be denied.
            if (self.api.user.id == from_id):
                self.api = Api(from_entity)

            # If the classroom or cohort being associated to is a testing
            # entity, then these activities should also be.
            kwargs = {'is_test': to_entity.is_test}
            program_id = to_entity.assc_program_list[0]
            if to_kind == 'cohort':
                kwargs['cohort_id'] = to_entity.id
                user_type = 'teacher'
            if to_kind == 'classroom':
                kwargs['cohort_id'] = to_entity.assc_cohort_list[0]
                kwargs['classroom_id'] = to_entity.id
                user_type = 'student'
            teacher_id = from_entity.id
            activities = self.api.init_activities(user_type, teacher_id,
                                                  program_id, **kwargs)

            # If these activities are being created FOR the teacher by an admin
            # or researcher, we need to do extra work to make sure those
            # activities are owned by the teacher.
            if self.get_current_user() != from_entity:
                for a in activities:
                    self.api.associate('set_owner', from_entity, a)

            # Include the created activities with the modified entity so the
            # client gets them immediately. This allows client views to update
            # immediately if necessary.
            data['_teacher_activity_list'] = [a.to_dict() for a in activities]

        return {'success': True, 'data': data}
Ejemplo n.º 3
0
    def get_changed_entities(self):

        # get classes
        # not blacklisted
        klass_names = [
            k.kind_name for k in Kind.all()
            if k.kind_name not in self.blacklist
        ]

        # check for cases where the Klass cannot be converted (KeyError)
        # this happens in production for reasons I don't understand
        # bmh 2013
        Klasses = []
        for k in klass_names:
            try:
                Klass = core.kind_to_class(k)
            except AttributeError:
                pass
            else:
                Klasses.append(Klass)

        # get entites
        if not self.last_check:
            self.last_check = 0

        entities = [
            e for Klass in Klasses
            for e in Klass.all().filter("modified > ", self.last_check).order(
                "modified").fetch(self.max_entities_to_index)
        ]

        return entities
Ejemplo n.º 4
0
    def _get_children(self, kind, id, filters=[], exclude_kinds=[]):
        """Returns a list of the requested entity and all its children. What
        'children' means is defined by config.children_cascade."""
        # Confusingly enough, a pegasus kind is not the same as an app engine
        # kind. Example: class StraitifierHistory:
        # pegasus kind (used in api urls): 'stratifier_history'
        # app engine kind (used in keys): 'StratifierHistory'
        klass = core.kind_to_class(kind)
        entity = klass.get_by_id(id)
        results = [entity]

        # children-fetching differs based on user type
        if kind == 'user':
            if entity.user_type in ['god']:
                raise Exception()
            kind = entity.user_type

        # Depending on the current kind, we need to get children of other
        # kinds. Exactly which kinds and filters apply is defined in
        # config.children_cascade. For instance, activities with user type
        # 'teacher' are children of a user with user type 'teacher', but
        # activities with user type 'student' are not (those are children of a
        # classroom). Since this structure doesn't map perfectly onto pure
        # kinds, we need a little fanciness to achieve the needed flexibility.
        if kind in config.children_cascade:
            for info in config.children_cascade[kind]:
                loop_filters = filters[:]
                # activities need some extra filtering
                if info.kind == 'teacher_activity':
                    loop_filters.append(('user_type =', 'teacher'))
                    child_kind = 'activity'
                elif info.kind == 'student_activity':
                    loop_filters.append(('user_type =', 'student'))
                    child_kind = 'activity'
                else:
                    child_kind = info.kind

                if child_kind not in exclude_kinds:
                    child_klass = core.kind_to_class(child_kind)
                    q = child_klass.all().filter(info.property + ' =', id)
                    for filter_tuple in loop_filters:
                        q.filter(*filter_tuple)
                    for child in q.run():
                        results += self._get_children(child_kind, child.id,
                                                      filters, exclude_kinds)

        return results
Ejemplo n.º 5
0
    def see(self, kind, kwargs):
        if 'n' in kwargs:
            n = kwargs['n']
            del (kwargs['n'])
        else:
            n = 1000

        # Although we almost always want to 'see' the entity's name, sometimes
        # want to specify a different property, like email. Allow this via the
        # 'see' key word.
        if 'see' in kwargs:
            projection = kwargs['see']
            del (kwargs['see'])
        else:
            projection = 'name'

        permissions_filters = self.user.get_permission_filters(kind, 'see')
        # request_filters = [(k + ' =', v) for k, v in kwargs.items()]
        request_filters = []
        for k, v in kwargs.items():
            operator = ' IN' if type(v) is list else ' ='
            request_filters.append((k + operator, v))

        logging.info('Api.see(kind={}, kwargs={})'.format(kind, kwargs))
        logging.info('permission filters: {}'.format(permissions_filters))
        logging.info('request filters: {}'.format(request_filters))

        filters = permissions_filters + request_filters
        klass = core.kind_to_class(kind)
        safe_filters, unsafe_filters = Api.limit_subqueries(filters)

        # Projection queries are nice and efficient for 'see' b/c they only
        # return what you're looking for (name and id), but they won't work if
        # you are filtering on the same thing you're projecting (see
        # https://developers.google.com/appengine/docs/python/datastore/projectionqueries#Python_Limitations_on_projections)
        # so fork into one of two modes: projection when not filtering by name,
        # and regular otherwise.
        # Also, post processing on projection results doesn't work because
        # python can't introspect the entity's properties.
        if 'name' in kwargs or len(unsafe_filters) > 0:
            # regular-type query
            query = klass.all().filter('deleted =', False)
        else:
            # projection query
            query = db.Query(klass, projection=[projection])
            query.filter('deleted =', False)
        for filter_tuple in safe_filters:
            query.filter(*filter_tuple)
        results = query.fetch(n)

        if len(unsafe_filters) > 0:
            results = Api.post_process(results, unsafe_filters)

        # Fill in the id property.
        for e in results:
            setattr(e, 'id', e.key().name())

        return results
Ejemplo n.º 6
0
 def do(self, kind):
     params = util.get_request_dictionary(self.request)
     ancestor = None
     # If an ancestor is specified, look it up by id and pass it in.
     if 'ancestor' in params:
         ancestor_kind = core.get_kind(params['ancestor'])
         ancestor_klass = core.kind_to_class(ancestor_kind)
         ancestor = ancestor_klass.get_by_id(params['ancestor'])
         del params['ancestor']
     results = self.api.get(kind, params, ancestor=ancestor)
     return {'success': True, 'data': [e.to_dict() for e in results]}
Ejemplo n.º 7
0
    def do(self, kind):
        # kind must be one of these
        if kind not in ['cohort', 'classroom']:
            raise Exception("Invalid kind: {}".format(kind))

        params = util.get_request_dictionary(self.request)

        # Don't run the map reduce job, just show a sample of what it
        # would do.
        if 'preview' in params and params['preview'] is True:
            n = int(params['n']) if 'n' in params else 1

            # Set up a fake job context for the mapper
            conf = map_module.cache_contents(kind, submit_job=False)
            context = map_module.get_fake_context(conf)

            # This function will cache rosters and schedules via
            # api.get_roster()
            mapper = map_module.CacheContentsMapper()

            # Get some entities to preview.
            klass = core.kind_to_class(kind)
            query = klass.all()
            sample = query.fetch(n)
            [mapper(context, e) for e in sample]

            return {
                'success':
                True,
                'preview':
                True,
                'n':
                n,
                'data': {
                    'entities_processed': [e.id for e in sample]
                },
                'message':
                ("Warning: this is the result of a limited preview. No "
                 "system-wide mapreduce job has been run."),
            }

        # Run it for real
        else:
            # Actually have to run THREE mapreduce jobs, one for each kind.
            conf = map_module.cache_contents(kind)
            return {'success': True, 'data': conf.job_id}
Ejemplo n.º 8
0
    def do(self, mapper_name):
        params = util.get_request_dictionary(self.request)

        kind, mapper, params = getattr(self, mapper_name)(params)
        klass = core.kind_to_class(kind)

        # Don't run the map reduce job, just show a sample of what it
        # would do.
        if 'preview' in params and params['preview'] is True:
            # Get some entities to preview.
            n = int(params['n']) if 'n' in params else 10
            query = id_model.User.all()
            for k, v in params.items():
                if k not in ['n', 'preview']:
                    query.filter(k + ' =', v)
            sample = query.fetch(n)
            before = [e.to_dict() for e in sample]

            # Set up a fake job context for the mapper and run it on each
            # entity.
            context = self.get_fake_context(kind, mapper)
            results = [mapper().do(context, e) for e in sample]
            after = [e.to_dict() for e in sample]

            return {
                'success': True,
                'preview': True,
                'n': n,
                'data': {
                    'before': before,
                    'after': after,
                },
                'message': (
                    "Warning: the results returned here are the result of a "
                    "simple query-and-modify, not a true map reduce job. "
                    "Also, no changes have been saved."),
            }

        # Run it for real
        else:
            job_config = map_module.modify_kind(kind, mapper)
            return {'success': True, 'data': job_config.job_id}
Ejemplo n.º 9
0
    def get_changed(self, kind):
        """Get all entities of a kind which have been modified recently."""
        # Fetch in smaller chunks to prevent the process from being too slow.
        fetch_size = 50
        if kind not in self.last_check or not self.last_check[kind]:
            self.last_check[kind] = datetime.datetime(1970, 1, 1, 0, 0)

        # Check for updated entities of specified kind.
        klass = core.kind_to_class(kind)

        query = klass.all().filter('is_test =', False)
        # Do NOT filter by deleted, b/c we want recently-deleted entities
        # to come up in this query as "changed", forcing their parent to
        # update their totals downward.
        if kind == 'pd':
            # The exception for this is pd, which uses deletion differently.
            # We never have a reason to pay attention to deleted pd (except
            # for manual debugging and data analysis). And, in fact, we never
            # want pd progress values to decrease.
            query.filter('deleted =', False)

        # This only applies to pds for now, which need public = True.
        if kind in config.kinds_with_get_filters:
            for filter_tuple in klass.get_filters():
                query.filter(*filter_tuple)

        query.filter('modified >', self.last_check[kind])
        query.order('modified')

        result = query.fetch(fetch_size)

        # Set last_check for where most recent update left off.
        if len(result) > 0:
            self.last_check[kind] = result[-1].modified
        else:
            self.last_check[kind] = datetime.datetime.now()

        return result
Ejemplo n.º 10
0
    def get(self, kind, kwargs, ancestor=None):
        """Query entities in the datastore.

        Specify an ancestor to make an "ancestor query": a query limited to
        one entity group which is strongly consistent.

        * Applies query filters based on what permissions the user has.
        * Works around App Engine limitations for complex queries.
        * Calls class startup methods, allowing on-instantiation code execution
        """
        if 'n' in kwargs:
            n = int(kwargs['n'])
            del (kwargs['n'])
        else:
            n = 1000

        if 'order' in kwargs:
            order = kwargs['order']
            del (kwargs['order'])
        else:
            order = None

        permissions_filters = self.user.get_permission_filters(kind, 'get')
        # request_filters = [(k + ' =', v) for k, v in kwargs.items()]
        request_filters = []
        for k, v in kwargs.items():
            operator = ' IN' if type(v) is list else ' ='
            request_filters.append((k + operator, v))

        logging.info('Api.get(kind={}, kwargs={}, ancestor={})'.format(
            kind, kwargs, ancestor))
        logging.info('permission filters: {}'.format(permissions_filters))
        logging.info('request filters: {}'.format(request_filters))

        filters = permissions_filters + request_filters
        klass = core.kind_to_class(kind)
        query = klass.all().filter('deleted =', False)

        if order:
            query.order(order)

        if isinstance(ancestor, core.Model):
            query.ancestor(ancestor)

        if kind in config.kinds_with_get_filters:
            filters = filters + klass.get_filters()

        safe_filters, unsafe_filters = Api.limit_subqueries(filters)

        # build the query
        for filter_tuple in safe_filters:
            query.filter(*filter_tuple)
        # get full, in-memory entities
        results = query.fetch(n)
        # post-processing, if necessary
        if len(unsafe_filters) > 0:
            results = Api.post_process(results, unsafe_filters)

        # run custom startup code, if such behavior is defined
        for e in results:
            if hasattr(e, 'startup'):
                e.startup()

        return results
Ejemplo n.º 11
0
    def create(self, kind, kwargs):
        logging.info('Api.create(kind={}, kwargs={})'.format(kind, kwargs))
        logging.info("Api.create is in transction: {}".format(
            db.is_in_transaction()))

        # check permissions

        # can this user create this type of object?
        if not self.user.can_create(kind):
            raise PermissionDenied("User type {} cannot create {}".format(
                self.user.user_type, kind))
        # if creating a user, can this user create this TYPE of user
        if kind == 'user':
            if not self.user.can_put_user_type(kwargs['user_type']):
                raise PermissionDenied(
                    "{} cannot create users of type {}.".format(
                        self.user.user_type, kwargs['user_type']))

        # create the object

        klass = core.kind_to_class(kind)
        # some updates require additional validity checks
        if kind in config.custom_create:
            # These put and associate themselves; the user is sent in so custom
            # code can check permissions.
            entity = klass.create(self.user, **kwargs)
            return entity
        else:
            # non-custom creates require more work
            entity = klass.create(**kwargs)

        if kind in config.kinds_requiring_put_validation:
            entity.validate_put(kwargs)

        # create initial relationships with the creating user

        action = config.creator_relationships.get(kind, None)
        if action is not None:
            if self.user.user_type == 'public':
                raise Exception(
                    "We should never be associating with the public user.")
            # associate, but don't put the created entity yet, there's more
            # work to do
            self.user = self.associate(action, self.user, entity, put=False)
            self.user.put()  # do put the changes to the creator

        # create required relationships between the created entity and existing
        # non-user entities

        # different types of users have different required relationships
        k = kind if kind != 'user' else entity.user_type
        for kind_to_associate in config.required_associations.get(k, []):
            target_klass = core.kind_to_class(kind_to_associate)
            # the id of the entity to associate must have been passed in
            target = target_klass.get_by_id(kwargs[kind_to_associate])
            entity = self.associate('associate', entity, target, put=False)
        if k in config.optional_associations:
            for kind_to_associate in config.optional_associations[k]:
                # they're optional, so check if the id has been passed in
                if kind_to_associate in kwargs:
                    # if it was, do the association
                    target_klass = core.kind_to_class(kind_to_associate)
                    target = target_klass.get_by_id(kwargs[kind_to_associate])
                    entity = self.associate('associate',
                                            entity,
                                            target,
                                            put=False)

        # At one point we created qualtrics link pds for students here. Now
        # that happens in the program app via the getQualtricsLinks functional
        # node.

        # now we're done, so we can put all the changes to the new entity
        entity.put()

        return entity
Ejemplo n.º 12
0
    def do(self, kind):
        # kind must be one of these
        if kind not in ['user', 'activity', 'cohort']:
            raise Exception("Invalid kind: {}".format(kind))

        params = util.get_request_dictionary(self.request)

        # Params not in this list will be used to filter previews.
        expected_keys = ['n', 'preview']

        # Don't run the map reduce job, just show a sample of what it
        # would do.
        if 'preview' in params and params['preview'] is True:
            n = int(params['n']) if 'n' in params else 100

            # Set up a fake job context for the mapper
            conf = map_module.fix_aggregation_json(kind, submit_job=False)
            context = map_module.get_fake_context(conf)

            # This function will modify the entity by copying aggregation data
            # to a new string property.
            mapper = map_module.AggregationJsonMapper()

            def summarize_entity(entity):
                return {
                    'id': entity.id,
                    'aggregation_data': entity.aggregation_data,
                    'aggregation_json': entity.aggregation_json
                }

            # Get some entities to preview.
            klass = core.kind_to_class(kind)
            query = klass.all()
            for k, v in params.items():
                if k not in expected_keys:
                    query.filter(k + ' =', v)
            sample = query.fetch(n)
            before = [summarize_entity(e) for e in sample]

            results = [mapper.do(context, e) for e in sample]

            after = [summarize_entity(e) for e in results]

            return {
                'success':
                True,
                'preview':
                True,
                'n':
                n,
                'data': {
                    'before': before,
                    'after': after,
                },
                'message':
                ("Warning: the results returned here are the result of a "
                 "simple query-and-modify, not a true map reduce job. "
                 "Also, no changes have been saved."),
            }

        # Run it for real
        else:
            # Actually have to run THREE mapreduce jobs, one for each kind.
            conf = map_module.fix_aggregation_json(kind)
            return {'success': True, 'data': conf.job_id}
Ejemplo n.º 13
0
    def do(self, update_name):
        """The update name defines what kind of update to run. This will be
        used to:
        - Create a timestamp to track progress.
        - Query for entities based on configuration in
          config.systematic_update_settings, which must be defined.
        - Execute method of this class by the same name on each entity returned
          from the query. It must be defined. The method should take an entity
          and return and return either None (if the entity should not be
          updated) or a modified entity.

        Accepts parameters in request string:
        fetch_size (int) - how many entities to process at once
        start_time (str) - what "created" time to start searching for entities;
                           this overrides the normal "systematic" behavior
        preview (bool) - report on results without actually updating
        """
        from google.appengine.ext import db
        from datetime import datetime

        params = util.get_request_dictionary(self.request)

        # Check request string and apply defaults where necessary
        if 'fetch_size' in params:
            fetch_size = params['fetch_size']
        else:
            fetch_size = 100
        if 'start_time' in params:
            start_time = params['start_time']
        else:
            # Look up / create a timestamp to track progress
            ts = Timestamp.get_or_insert(update_name)
            start_time = ts.timestamp
        if 'preview' in params:
            preview = params['preview']
        else:
            preivew = False

        conf = config.systematic_update_settings[update_name]

        # Query for entities
        klass = core.kind_to_class(conf['kind'])
        query = klass.all()
        query.filter('created >', start_time)
        query.order('created')
        entity_list = query.fetch(fetch_size)

        before_snapshot = [e.to_dict() for e in entity_list]

        # Look up related method
        method = getattr(self, update_name)
        if not util.is_function(method):
            raise Exception("Invalid update name: method isn't callable.")

        # Execute the method on each entity
        modified_entities = []
        for entity in entity_list:
            # Check if this systematic update has been performed before
            if update_name in entity.systematic_updates:
                raise Exception(
                    "{} has already been performed on entity {}.".format(
                        update_name, entity.id))
            else:
                entity.systematic_updates.append(update_name)
            updated_entity = method(entity)
            if updated_entity is not None:
                modified_entities.append(updated_entity)

        # The entity variable is still set to the last one of the list;
        # use it to save our spot for next time.
        end_time = entity.created

        after_snapshot = [e.to_dict() for e in modified_entities]

        if not preview:
            db.put(modified_entities)

        if 'start_time' not in params:
            # Save progress
            ts.timestamp = end_time
            ts.put()

        return {
            'success': True,
            'data': {
                'entities_queried': len(entity_list),
                'entities_modified': len(modified_entities),
                'start_time': start_time,
                'end_time': end_time,
                'entities before update': before_snapshot,
                'entities after update': after_snapshot,
            }
        }