Esempio n. 1
0
    def mock_enrollment_data(self, has_enrollments):
        result = EventEnrollmentManager()

        data = []
        max_event_id = 0
        for index in range(TestApi.NUM_DAILY_EVENTS +
                           TestApi.NUM_WEEKLY_EVENTS):
            item = {}
            item['enrollment_id'] = index + 10
            item['event_id'] = index
            item['roster_id'] = 1
            data.append(item)
        max_event_id = TestApi.NUM_DAILY_EVENTS + TestApi.NUM_WEEKLY_EVENTS

        for index in range(TestApi.NUM_DAILY_EVENTS):
            item = {}
            item['enrollment_id'] = max_event_id + index + 10
            item['event_id'] = max_event_id + index
            item['roster_id'] = 1
            data.append(item)

        queryset = QuerySet()
        result.filter = MagicMock(return_value=queryset)

        if has_enrollments:
            queryset.values = MagicMock(return_value=data)
        else:
            queryset.values = MagicMock(return_value=[])

        return result
 def get_dates(decks: QuerySet) -> List[date]:
     """
     Gets the unique list of dates for the given list of decks
     :param decks: The list of decks to get te date for
     :return: The list of dates
     """
     return [d['date_created'] for d in decks.values('date_created').distinct()]
Esempio n. 3
0
    def mock_event_data(self):
        result = EventManager()

        data = []
        max_event_id = 0
        # Make a 'class' for each class designated
        for index in range(TestApi.NUM_DAILY_EVENTS +
                           TestApi.NUM_WEEKLY_EVENTS):
            item = {}
            item['event_id'] = index
            item['event_name'] = "Event " + str(index)
            item['meeting_link'] = "https://www.google.fake"
            item['section'] = "fake"
            item['year'] = TestApi.SCHEDULED_YEAR
            data.append(item)
            max_event_id = index
        max_event_id = max_event_id + 1

        # Make a 'class' for each daily designated class for previous year
        for index in range(TestApi.NUM_DAILY_EVENTS):
            item = {}
            item['event_id'] = index + max_event_id
            item['event_name'] = "Event " + str(index + max_event_id)
            item['meeting_link'] = "https://www.google.fake"
            item['section'] = "fake"
            item['year'] = TestApi.EARLY_YEAR
            data.append(item)

        queryset = QuerySet()
        result.filter = MagicMock(return_value=queryset)
        queryset.values = MagicMock(return_value=data)

        return result
Esempio n. 4
0
 def date_counts(self):
     """
     Returns a dictionary mapping {item_date: count}.
     """
     from django.db.models.query import QuerySet
     qs = QuerySet.values(self, 'item_date').annotate(count=models.Count('id'))
     # Turn off ordering, as that breaks Count; see https://docs.djangoproject.com/en/dev/topics/db/aggregation/#interaction-with-default-ordering-or-order-by
     qs = qs.order_by()
     return dict([(v['item_date'], v['count']) for v in qs])
Esempio n. 5
0
 def date_counts(self):
     """
     Returns a dictionary mapping {item_date: count}.
     """
     # TODO: values + annotate doesn't seem to play nice with GeoQuerySet
     # at the moment. This is the changeset where it broke:
     # http://code.djangoproject.com/changeset/10326
     from django.db.models.query import QuerySet
     qs = QuerySet.values(self, 'item_date').annotate(count=models.Count('id'))
     return dict([(v['item_date'], v['count']) for v in qs])
Esempio n. 6
0
def query_set_to_html(q_set: QuerySet):
    """
    This function returns a simple html table from the query_set obj
    note: this is a temporary solution, in future version the same functionality
    will be carried out via django templates
    TODO : rebuild this via django templates
    """
    data = q_set.values()
    frame = pd.DataFrame(data)
    return frame.to_html(notebook=True), list(frame['id'])
Esempio n. 7
0
 def date_counts(self):
     """
     Returns a dictionary mapping {item_date: count}.
     """
     # TODO: values + annotate doesn't seem to play nice with GeoQuerySet
     # at the moment. This is the changeset where it broke:
     # http://code.djangoproject.com/changeset/10326
     from django.db.models.query import QuerySet
     qs = QuerySet.values(self,
                          'item_date').annotate(count=models.Count('id'))
     return dict([(v['item_date'], v['count']) for v in qs])
Esempio n. 8
0
    def mock_roster_data(self):
        result = EventRosterParticipantManager()

        data = []
        for index in range(TestApi.NUM_DAILY_EVENTS +
                           TestApi.NUM_WEEKLY_EVENTS):
            item = {}
            item['roster_id'] = 1
            item['participant_id'] = index
            data.append(item)

        queryset = QuerySet()
        result.filter = MagicMock(return_value=queryset)
        queryset.values = MagicMock(return_value=data)

        return result
Esempio n. 9
0
    def mock_event_schedule_data(self):
        result = EventScheduleManager()

        data = []

        # Make a 'class' for each class designated (weekly)
        max_previous = 0
        for index in range(TestApi.NUM_WEEKLY_EVENTS):
            start_hour = (6 + index * 2) % 20
            stop_hour = start_hour + 1

            item = {}
            item['event_id'] = index
            item['schedule_id'] = index + 20
            item['weekday'] = index % 5
            item['start_time'] = str(
                datetime.time(hour=start_hour, minute=0, second=0))
            item['end_time'] = str(
                datetime.time(hour=stop_hour, minute=0, second=0))
            data.append(item)
            max_previous = index

        # Make a 'class' for each class designated (daily)
        # Then make a 'class' for each daily designated class for previous year
        for index in range(2):
            for index in range(TestApi.NUM_DAILY_EVENTS):
                max_previous = max_previous + 1
                daily_index = index + max_previous
                start_hour = (6 + daily_index * 2) % 20
                stop_hour = start_hour + 1

                item = {}
                item['event_id'] = daily_index
                item['schedule_id'] = daily_index + 20
                item['weekday'] = -1
                item['start_time'] = str(
                    datetime.time(hour=start_hour, minute=0, second=0))
                item['end_time'] = str(
                    datetime.time(hour=stop_hour, minute=0, second=0))
                data.append(item)

        queryset = QuerySet()
        result.filter = MagicMock(return_value=queryset)
        queryset.values = MagicMock(return_value=data)

        return result
Esempio n. 10
0
def buy_offers(account: BankAccount, password: str,
               shopping_offers: QuerySet) -> dict:
    global_message = {}
    for shop_id in set(x['Offer__Store__id']
                       for x in shopping_offers.values('Offer__Store__id')):
        shop_offers = shopping_offers.filter(Offer__Store__id=shop_id)
        offers = Offer.objects.none()
        amounts = []
        for i, shop_off in enumerate(shop_offers):
            amounts.append(shop_off.Amount)
            offers |= Offer.objects.filter(id=shop_off.Offer.id)
        buyer = OfferBuyer(offers, amounts, account, bank, password,
                           offers.first().Store.Bank_Account.Account)
        message = buyer.buy_offers()
        for x in message:
            if x in global_message:
                global_message[x].extend(message[x])
            else:
                global_message[x] = message[x]
    return global_message
Esempio n. 11
0
def stickiness_format_intervals(events: QuerySet,
                                filter: StickinessFilter) -> QuerySet:
    return (events.values("person_id").annotate(
        day_count=Count(filter.trunc_func("timestamp"), distinct=True)).filter(
            day_count=filter.selected_interval))
Esempio n. 12
0
def qs_to_result(result: QuerySet,
                 stride: int = 1,
                 group: bool = False,
                 shuffle: bool = False,
                 deterministic_order: bool = False,
                 custom_order_by_id: List[int] = None,
                 frame_major: bool = True,
                 show_count: bool = False,
                 limit: int = 100,
                 color: str = "red") -> Dict:

    count = result.count() if show_count else 0

    if shuffle:
        result = result.order_by('?')

    materialized_result = []
    cls = result.model
    bases = cls.__bases__
    if bases[0] is base_models.Frame:
        if custom_order_by_id is not None:
            result = sorted(result, key=lambda x: custom_order_by_id.index(x.id))
        elif not shuffle and deterministic_order:
            result = result.order_by('video', 'number')
        for frame in result[:limit * stride:stride]:
            materialized_result.append({
                'video': frame.video.id,
                'min_frame': frame.number,
                'objects': []
            })

    elif (cls is Face or cls is FaceGender or cls is FaceIdentity
            or cls is Object or cls is Pose or cls is FaceLandmarks):
        if cls is FaceGender or cls is FaceIdentity or cls is FaceLandmarks:
            frame_path = 'face__frame'
            if cls is FaceGender:
                result = result.select_related('face', 'gender')
            elif cls is FaceIdentity:
                result = result.select_related('face', 'identity')
            else:
                result = result.select_related('face')
        else:
            frame_path = 'frame'
        result = result.select_related(frame_path)

        if not shuffle and deterministic_order:
            result = result.order_by(frame_path + '__video', frame_path + '__number')

        if cls is Face:
            fn = bbox_to_dict
        elif cls is Object:
            fn = object_to_dict
        elif cls is FaceGender:
            fn = gender_to_dict
        elif cls is FaceIdentity:
            fn = identity_to_dict
        elif cls is Pose:
            fn = pose_to_dict
        elif cls is FaceLandmarks:
            fn = face_landmarks_to_dict

        if frame_major:
            frame_ids = set()

            def get_all_results():
                all_results = collect(
                    list(result.filter(**{frame_path + '__in': list(frame_ids)})),
                    lambda t: access(t, frame_path + '__id'))
                return all_results

            if custom_order_by_id is None:
                frames = set()
                for inst in list(
                        result.values(
                            frame_path + '__video', frame_path + '__number',
                            frame_path + '__id').annotate(m=F('id') % stride).filter(m=0)[:limit]):
                    frames.add((inst[frame_path + '__video'], inst[frame_path + '__number'],
                                inst[frame_path + '__id']))
                    frame_ids.add(inst[frame_path + '__id'])

                all_results = get_all_results()
                frames = list(frames)
                frames.sort(key=itemgetter(0, 1))

            else:
                frames = {}
                id_to_position = defaultdict(lambda: float('inf'))
                for i, id_ in enumerate(custom_order_by_id):
                    id_to_position[id_] = i
                for inst in list(
                        result.values(
                            'id', frame_path + '__video', frame_path + '__number',
                            frame_path + '__id').annotate(m=F('id') % stride).filter(m=0)):
                    frame_key = (inst[frame_path + '__video'], inst[frame_path + '__number'],
                                 inst[frame_path + '__id'])
                    frames[frame_key] = min(id_to_position[inst['id']], frames[frame_key]
                                            if frame_key in frames else float('inf'))
                    frame_ids.add(inst[frame_path + '__id'])
                all_results = get_all_results()
                frames = sorted([x for x in frames.items()], key=lambda x: x[1])
                frames = [x[0] for x in frames[:limit]]

            for (video, frame_num, frame_id) in frames:
                materialized_result.append({
                    'video': video,
                    'min_frame': frame_num,
                    'objects': [fn(inst) for inst in all_results[frame_id]]
                })


        else:
            for inst in result[:limit * stride:stride]:
                r = {
                    'video': inst.frame.video.id,
                    'min_frame': inst.frame.number,
                    'objects': [fn(inst)]
                }
                materialized_result.append(r)

    elif bases[0] is base_models.Track:
        if not shuffle and deterministic_order:
            result = result.order_by('video', 'min_frame')

        for t in result.annotate(duration=Track.duration_expr()).filter(duration__gt=0)[:limit]:
            result = {
                'video': t.video.id,
                'track': t.id,
                'min_frame': t.min_frame,
                'max_frame': t.max_frame,
            }

            materialized_result.append(result)
        if custom_order_by_id is not None:
            materialized_result.sort(key=lambda x: custom_order_by_id.index(x['track']))
        else:
            materialized_result.sort(key=itemgetter('video', 'min_frame'))

    elif bases[0] is base_models.Video:
        if custom_order_by_id is not None:
            raise NotImplementedError()

        if not shuffle and deterministic_order:
            result = result.order_by('id')

        for v in result[:limit]:
            materialized_result.append({
                'video': v.id,
                'min_frame': 0,
                'max_frame': v.num_frames})

    else:
        raise Exception("Unsupported class")

    ty_name = cls.__name__
    if group:
        by_video = collect(materialized_result, itemgetter('video'))
        videos = collect(Video.objects.filter(id__in=by_video.keys()).all(),
                attrgetter('id'))
        groups = [{
            'type': 'contiguous',
            'label': video,
            'num_frames': videos[video][0].num_frames,
            'elements': [{
                'video': video,
                'segments': sorted(by_video[video], key=itemgetter('min_frame')),
                'color': color
            }]
        } for video in sorted(by_video.keys())]
    else:
        groups = [{'type': 'flat', 'label': '', 'elements': [r]} for r in materialized_result]

    return {'result': groups, 'count': count, 'type': ty_name}
Esempio n. 13
0
class QueryBuilder:
    """
        Name  : QueryBuilder
        Input : Model and request
        Desc  : Generic takes model and applies filter,group and aggregation 
                return json response upon calling build method
    """
    def __init__(self, model, request):
        "Default intitailizer to initaialize the default values of the variables"
        self.agg = request.GET.get('agg', [])
        self.group = request.GET.get('group', [])
        self.where = request.GET.get('where', {})
        self.sort = request.GET.get('sort', [])
        self.model = model
        self.qs = QuerySet(model)
        self.agg_literal_map = {'+': Sum, '-': Avg, '*': Count}
        self.agg_map = {}

    def _get_agg_col_mapping(self, column):
        """
            Input : column
            Ouput: 
                If the first char starts any one of the below then below mappings
                are applied or by default Sum is applied
                    + -> Sum
                    - -> Avg
                    * -> Count
            e.g,:
                +col1 -> Avg(col1)
        """
        _aggregation = Sum
        _column = column

        if (column[0] in self.agg_literal_map):
            _aggregation = self.agg_literal_map[column[0]]
            _column = column[1:]

        self.agg_map[_column] = _aggregation(_column)

    def _get_agg(self):
        """
            Splits the agg param from the request and _get_agg_col_mapping 
            is invoked to construct aggregation map
        """
        if self.agg is not None:
            columns = self.agg.split(",")
            for column in columns:
                self._get_agg_col_mapping(column)

        return self.agg_map

    def _get_group(self):
        """
            Splits the group param from the request otherwise returns empty list
        """
        if len(self.group):
            return [field for field in self.group.split(",")]
        return EMPTY_LIST

    def _get_where(self):
        """
            Converts the JSON type string where clause to dict
            Custmisation done only for date_to and date_from which are mapped 
            to out of the box date__gte and date__lte fields
        """
        if self.where:
            json_where_clause = json.loads(self.where)
            if 'date_from' in json_where_clause:
                json_where_clause["date__gte"] = json_where_clause["date_from"]
                del json_where_clause["date_from"]
            if 'date_to' in json_where_clause:
                json_where_clause["date__lte"] = json_where_clause["date_to"]
                del json_where_clause["date_to"]
            return json_where_clause
        return EMPTY_DICT

    def _get_order_by(self):
        """
            If Sort param is present then splits otherwise return emptylist
        """
        if self.sort:
            return self.sort.split(",")
        return EMPTY_LIST

    def build(self):
        """
         From queryset applies filter,group,sort and agg methods accordingly
        """
        is_group_by = False

        # caluclated field CPI
        cpi = ExpressionWrapper(F('spend') / F('installs'),
                                output_field=FloatField())
        self.qs = self.qs.annotate(cpi=cpi)

        # Aggreation are applied here
        if len(self._get_group()) > 0 and len(self._get_agg()) > 0:
            is_group_by = True
            self.qs = self.qs.values(*self._get_group()) \
                             .annotate(**self._get_agg())

        # Where clause if applied here
        if len(self._get_where()) > 0:
            self.qs = self.qs.filter(**self._get_where())

        # Sorting is applied
        if len(self._get_order_by()) > 0:
            self.qs = self.qs.order_by(*self._get_order_by())

        # return the list of PeformanceMetric user
        if is_group_by:
            return list(self.qs)
        else:
            self.qs = self.qs.values()
            return list(self.qs)
Esempio n. 14
0
def query_set_to_df(q_set: QuerySet):
    """
    this function returns a pandas DataFrame with data from given django queryset
    """
    return pd.DataFrame(q_set.values())