def _cohort_stream_transform(source, stream, start, end, transform, grouping_key, unit): start_stream = KronosSource(source, stream, start, end) if transform: transformed = transform(start_stream) else: transformed = start_stream projected = Project(transformed, [Property(TIMESTAMP_FIELD, alias=TIMESTAMP_FIELD), Property(grouping_key, alias=grouping_key), Floor([Property(TIMESTAMP_FIELD), Constant(DateUnit.unit_to_kronos_time(unit)), Constant(start)], alias='date')]) # This leaves us with a single event per (user, unit time) pair. aggregated = Aggregate( projected, GroupBy([Property(grouping_key, alias=grouping_key), Property('date', alias='date')]), # The first time the user performed the event in that bucket. [Min([Property(TIMESTAMP_FIELD)], alias=TIMESTAMP_FIELD)]) return aggregated
def test_aggregate(self): sums = defaultdict(int) for i in xrange(200): a = random.randint(0, 2) self.kronos_client.put( {self.stream: [{ constants.TIMESTAMP_FIELD: i, 'a': a }]}) sums[50 * (i / 50)] += a events = self.query( Aggregate( Project(KronosSource('kronos', self.stream, 0, 1000), [ Floor([Property(constants.TIMESTAMP_FIELD), Constant(50)], alias=constants.TIMESTAMP_FIELD) ], merge=True), GroupBy( Property(constants.TIMESTAMP_FIELD, alias=constants.TIMESTAMP_FIELD)), [ Count([], alias='count'), Sum([Property('a')], alias='sum'), Min([Property('a')], alias='min'), Max([Property('a')], alias='max'), Avg([Property('a')], alias='avg') ]).to_dict()) self.assertEqual(len(events), 200 / 50) for event in events: self.assertEqual(event[constants.TIMESTAMP_FIELD] % 50, 0) self.assertEqual(event['count'], 50) self.assertEqual(event['min'], 0) self.assertEqual(event['max'], 2) self.assertEqual(event['sum'], sums[event[constants.TIMESTAMP_FIELD]]) self.assertTrue(event['avg'] * 50 > event['sum'] - 0.1) self.assertTrue(event['avg'] * 50 < event['sum'] + 0.1) events = self.query( Aggregate( KronosSource('kronos', self.stream, 0, 1000), GroupBy( Floor([Property(constants.TIMESTAMP_FIELD), Constant(50)], alias=constants.TIMESTAMP_FIELD)), [Count([], alias='count')]).to_dict()) self.assertEqual(len(events), 200 / 50)
def aggregate(query_plan, operands): aggregates = [] for agg in operands['aggregates']: aggregates.append(agg_op(agg['agg_type'], cpf(agg['agg_on']), agg['alias'])) groups = [] for group in operands['groups']: groups.append(cpf(group['field'], group['alias'])) group_by = GroupBy(groups) return Aggregate(query_plan, group_by, aggregates)
def aggregate(query_plan, operands): aggregates = [] for agg in operands['aggregates']: cpf_type = agg['agg_on']['cpf_type'] property_name = agg['agg_on'].get('property_name') constant_value = agg['agg_on'].get('constant_value') empty = (cpf_type == 'property' and not property_name or cpf_type == 'constant' and not constant_value) if empty: agg_on_cpf = None else: agg_on_cpf = cpf(agg['agg_on']) aggregates.append(agg_op(agg['agg_type'], agg_on_cpf, agg['alias'])) groups = [] for group in operands['groups']: groups.append(cpf(group['field'], group['alias'])) group_by = GroupBy(groups) return Aggregate(query_plan, group_by, aggregates)
def cohort_queryplan(plan): """ Input: { 'source': 'kronos', # Name of data source from settings 'cohort': {'stream': CohortTest.EMAIL_STREAM, # Kronos stream to define cohort from. 'transform': lambda x: x, # Transformations on the kstream. 'start': date.now(), # The day of the first cohort. 'unit': DateUnit.XX, # Users are in the same cohort # if they are in the same day/week. 'cohorts': 5 # How many cohorts (days/weeks/months) # to track. 'grouping_key': 'user'}, # What key in an event should we tie # to a key in the action stream? 'action': {'stream': CohortTest.FRONTPAGE_STREAM, # Stream users take actions on. 'transform': lambda x: x # Transformations on the stream. 'unit': DateUnit.XX, # Track events in day/week/months. 'repetitions': 14 # How many days/weeks/months to track. 'grouping_key': 'user_id'} # What key in an event should we tie # to a key in the action stream? } Output: A metis-compatible query plan to return a cohort analysis. """ cohort = plan['cohort'] action = plan['action'] source = plan['source'] # Calculate the start and end dates, in Kronos time, of the # beginning and end of the cohort and action streams that will be # relevant. cohort_start = datetime_to_kronos_time(_date_to_datetime(cohort['start'])) cohort_span = timedelta(**{cohort['unit']: cohort['cohorts']}) cohort_end = cohort['start'] + cohort_span action_span = timedelta(**{action['unit']: action['repetitions']}) action_end = cohort_end + action_span cohort_end = datetime_to_kronos_time(_date_to_datetime(cohort_end)) + 1 action_end = datetime_to_kronos_time(_date_to_datetime(action_end)) + 1 left = _cohort_stream_transform(source, cohort['stream'], cohort_start, cohort_end, cohort.get('transform'), cohort['grouping_key'], cohort['unit']) right = _cohort_stream_transform(source, action['stream'], cohort_start, action_end, action.get('transform'), action['grouping_key'], action['unit']) additional_action_time = (DateUnit.unit_to_kronos_time(action['unit']) * action['repetitions']) left.alias = 'cohort' right.alias = 'action' joined = Join(left, right, (Condition(Condition.Op.EQ, Property('cohort.%s' % cohort['grouping_key']), Property('action.%s' % action['grouping_key'])) & Condition(Condition.Op.GTE, Property('action.%s' % TIMESTAMP_FIELD), Property('cohort.%s' % TIMESTAMP_FIELD)) & Condition(Condition.Op.LT, Property('action.%s' % TIMESTAMP_FIELD), Add([Property('cohort.%s' % TIMESTAMP_FIELD), Constant(additional_action_time)])))) user_aggregated = Aggregate( joined, GroupBy([Property('cohort.date', alias=TIMESTAMP_FIELD), Property('cohort.%s' % cohort['grouping_key'], alias='group'), Floor([Subtract([Property('action.%s' % TIMESTAMP_FIELD), Property('cohort.%s' % TIMESTAMP_FIELD)]), Constant(DateUnit.unit_to_kronos_time(action['unit']))], alias='action_step')]), [Count([], alias='count')] ) aggregated = Aggregate( user_aggregated, GroupBy([Property(TIMESTAMP_FIELD, alias=TIMESTAMP_FIELD), Property('action_step', alias='action_step')]), [Count([], alias='cohort_actions')]) # TODO(marcua): Also sum up the cohort sizes, join with the plan. return aggregated.to_dict()