def export_retention_activity_raw_log(request):
    """
	Exports retention activity log for analysis
	"""
    defender_id = request.user.id
    is_defender, is_super_defender = in_defenders(defender_id,
                                                  return_super_status=True)
    if is_super_defender:
        data_to_write_to_csv = retrieve_retention_activity_raw_records()
        if data_to_write_to_csv:
            filename = 'retention_activity.csv'
            with open(filename, 'wb') as f:
                wtr = csv.writer(f)
                columns = ['User ID','Which Cohort','Activity Time','Which Day','Activity Time (Human)','HTTP Method','Activity Type','Page #',\
                'Input Text','Img URL','Caption','Target User ID','Which Topic','Object Type','Num 1on1s']
                wtr.writerow(columns)
                # WATCH OUT: M5.e or S5.e are errors which, if encoutered in "Activity Type", could be the main culprit for var2's demise. This error was observed between 'mhb11' and 'lonnie_smith' IDs
                for activity_dict, cohort_id, user_id, which_day, activity_time in data_to_write_to_csv:
                    to_write = [user_id, cohort_id, int(activity_time), which_day, exact_date(activity_time), activity_dict['m'], \
                    activity_dict['act'], activity_dict.get('pg',''), activity_dict.get('tx','').encode('utf-8'),activity_dict.get('pi',''),\
                    activity_dict.get('pc','').encode('utf-8'),activity_dict.get('tuid',''),activity_dict.get('url','').encode('utf-8'),\
                    activity_dict.get('ot',''),activity_dict.get('nf','')]
                    wtr.writerows([to_write])
        ##########################################################
    raise Http404("Completed ;)")
def export_logged_actions(request):
    """
	Exports all logged actions into a CSV file for analysis
	"""
    own_id = request.user.id
    is_defender, is_super_defender = in_defenders(own_id,
                                                  return_super_status=True)
    if is_super_defender:
        data_to_write_to_csv = retrieve_all_logged_actions(
        )  # list of lists (where each list is a list of dictionaries)
        if data_to_write_to_csv:
            filename = 'action_data.csv'
            with open(filename, 'wb') as f:
                wtr = csv.writer(f)
                #  payload = str(user_id)+":"+user_age+":"+action_categ+":"+action_sub_categ+":"+action_liq+":"+str(time_of_action)
                columns = [
                    "User ID", "hours since segment init", "parent category",
                    "sub category", "action liquidity", "time of logging"
                ]
                wtr.writerow(columns)
                for action_data in data_to_write_to_csv:
                    data_list = action_data.split(":")
                    user_id, hrs_since_segment_init, action_categ, action_sub_categ, liquidity, time_of_action = data_list[0],\
                    data_list[1],data_list[2],data_list[3], data_list[4], data_list[5]
                    to_write = [
                        user_id, hrs_since_segment_init, action_categ,
                        action_sub_categ, liquidity,
                        exact_date(float(time_of_action))
                    ]
                    wtr.writerows([to_write])
    raise Http404("Completed logging ;)")
Esempio n. 3
0
def construct_administrative_activity(punisher_id, target_id, time_now, group_id, history_type, reply_id=None):
	"""
	Logs 'hide' and 'unhide' activity in publc mehfil administrative history

	A more involved version of document_administrative_activity() since it constructs the main sentence itself
	In the end, it calls document_administrative_activity() to get the job done
	"""    
	username_dictionary = retrieve_bulk_unames([punisher_id,target_id],decode=True)
	text = retrieve_single_group_submission(group_id, reply_id, text_only=True)
	if history_type == 'hide':
		partial_sentence = username_dictionary[punisher_id]+" ne "+username_dictionary[int(target_id)]+\
		" ki baat hide ki at {0}".format(exact_date(time_now))
	elif history_type == 'unhide':
		partial_sentence = username_dictionary[punisher_id]+" ne "+username_dictionary[int(target_id)]+\
		" ki baat unhide ki at {0}".format(exact_date(time_now))
	main_sentence = partial_sentence+". Text: "+text if text else partial_sentence
	document_administrative_activity(group_id, main_sentence, history_type)
Esempio n. 4
0
def user_vote_history(request,vote):
	"""
	Renders the voting history of the user

	Shows all upvotes or downvotes (in separate pages) cast within last 1 month
	"""
	if vote in ('upvote','downvote'):
		own_id, page_num, upvote_listing = request.user.id, request.GET.get('page', '1'), True if vote == 'upvote' else False
		start_index, end_index = get_indices(page_num, VOTE_HISTORY_ITEMS_PER_PAGE)
		voting_data, list_total_size = retrieve_voting_records(voter_id=own_id, start_idx=start_index, end_idx=end_index, \
			upvotes=upvote_listing, with_total_votes=True)
		num_pages = list_total_size/VOTE_HISTORY_ITEMS_PER_PAGE
		max_pages = num_pages if list_total_size % VOTE_HISTORY_ITEMS_PER_PAGE == 0 else (num_pages+1)
		page_num = int(page_num)
		final_data = []
		for data, vote_time in voting_data:
			# data contains voter_id+":"+str(target_user_id)+":"+vote_value+":"+target_obj_tp+":"+target_obj_id
			data_list = data.split(":")
			human_vote_time = exact_date(vote_time)
			final_data.append((data_list[1], data_list[2], data_list[3], data_list[4], human_vote_time))
		return render(request,"voting/voting_history.html",{'data':final_data})
	else:
		raise Http404("No other type of voting exists")
Esempio n. 5
0
def export_voting_records(request):
	"""
	Exports all available voting records into a CSV file for analysis
	"""
	own_id = request.user.id
	is_defender, is_super_defender = in_defenders(own_id, return_super_status=True)
	if is_super_defender:
		data_to_write_to_csv = retrieve_global_voting_records()# list of lists (where each list is a list of dictionaries)
		if data_to_write_to_csv:
			import csv
			filename = 'voting_data.csv'
			with open(filename,'wb') as f:
				wtr = csv.writer(f)
				columns = ["Voting time (human)","voter ID","target user ID","vote value","target obj type","target obj ID"]
				wtr.writerow(columns)
				for vote_data, voting_time in data_to_write_to_csv:
					# vote_data contains voter_id+":"+str(target_user_id)+":"+vote_value+":"+target_obj_tp+":"+target_obj_id
					data_list = vote_data.split(":")
					voter_id, target_user_id, vote_value, target_obj_tp, target_obj_id = data_list[0],data_list[1],data_list[2],\
					data_list[3], data_list[4]
					to_write = [exact_date(voting_time),voter_id,target_user_id,vote_value,target_obj_tp,target_obj_id]
					wtr.writerows([to_write])
	raise Http404("Completed ;)")
Esempio n. 6
0
def export_video_submissions(request):
    """
	Export submissions into a CSV for viewing
	"""
    own_id = request.user.id
    is_defender, is_super_defender = in_defenders(own_id,
                                                  return_super_status=True)
    if is_super_defender:
        data_to_write_to_csv = retrieve_competition_submissions(
            round_num=COMPETITION_ROUND
        )  # list of lists (where each list is a list of dictionaries)
        if data_to_write_to_csv:
            import csv
            filename = 'competition_round_{}_submissions.csv'.format(
                COMPETITION_ROUND)
            with open(filename, 'wb') as f:
                wtr = csv.writer(f)
                columns = ['User ID','Username','Submission Time','Raw Submission','Mobile Number','Is Youtube',\
                'YouTube URL','Channel Name','Passed','Partially Passed','Rejected','Checked By','Remarks']

                wtr.writerow(columns)
                for data in data_to_write_to_csv:
                    user_id = data.get('user_id', None)
                    epoch_submission_time = data.get('t', None)
                    submission_time = exact_date(
                        float(epoch_submission_time
                              )) if epoch_submission_time else ''
                    raw_video_url = unicode(data['raw_vurl'], "utf-8")
                    youtube_video_id = data.get('yt_video_id',
                                                '').encode('utf-8')
                    youtube_url = 'https://www.youtube.com/watch?v={}'.format(
                        youtube_video_id) if youtube_video_id else ''
                    to_write = [user_id, retrieve_uname(user_id,decode=True).encode('utf-8'),submission_time, raw_video_url.encode('utf-8'),\
                    data.get('mob_num',''),data.get('is_youtube','0'),youtube_url,'','','','','','']
                    wtr.writerows([to_write])
    raise Http404("Completed ;)")
Esempio n. 7
0
def report_section_wise_retention(which_var):
    """
	Reporting: calculates (via raw data captured by set_variation_wise_retention()) and presents the daily retention data of a given section
	
	We report 12 days worth of data - calculated 'd1' retention is the respective 'd1' averaged out for all these days
	"""
    my_server = redis.Redis(connection_pool=POOL)
    cached_data = my_server.get(EXP[which_var + 'cr'])
    if cached_data:
        cohort_data = json.loads(cached_data)
    else:
        time_now = time.time()
        cohort_id_today = retrieve_cohort(time_now)

        cohorts_to_display = range(cohort_id_today - 11, cohort_id_today + 1,
                                   1)
        stringified_cohorts = map(str, cohorts_to_display)
        cohort_names = ['11 days ago','10 days ago','9 days ago','8 days ago','7 days ago','6 days ago','5 days ago','4 days ago',\
        '3 days ago','2 days ago','Yesterday','Today']
        cohort_names_dict = dict(zip(stringified_cohorts, cohort_names))
        cohort_dates = exact_date(cohorts_to_display, in_bulk=True)
        cohort_dates_dict = dict(zip(stringified_cohorts, cohort_dates))
        days_dict = {
            'd0': 0,
            'd1': 1,
            'd2': 2,
            'd3': 3,
            'd4': 4,
            'd5': 5,
            'd6': 6,
            'd7': 7,
            'd8': 8,
            'd9': 9,
            'd10': 10,
            'd11': 11
        }

        # extracting logged cohort-based data from redis DB
        pipeline1 = my_server.pipeline()
        for cohort_id in stringified_cohorts:
            pipeline1.hgetall(EXP[which_var + 'r'] + cohort_id)
        cohorts = pipeline1.execute()

        # re-sort retrieved cohorts according to days (i.e. 'd0' ought to be first, 'd1' second, and so forth)
        sorted_cohorts = []
        for cohort in cohorts:
            if cohort:
                sorted_cohort = sorted(cohort.items(),
                                       key=lambda x: days_dict[x[0]])
                sorted_cohorts.append(sorted_cohort)
            else:
                sorted_cohorts.append(cohort)

        # enrich retention raw numbers with '% of d0' metrics
        final_retention_data = []
        for cohort_retention_data in sorted_cohorts:
            try:
                num_visitors_in_d0_of_that_cohort = int(
                    cohort_retention_data[0]
                    [1]) if cohort_retention_data else 0
            except IndexError:
                num_visitors_in_d0_of_that_cohort = 0
            enriched_retention_data = []
            if cohort_retention_data:
                for retention_type, num_visitors in cohort_retention_data:
                    num_visitors = int(
                        num_visitors) if num_visitors else num_visitors
                    if num_visitors_in_d0_of_that_cohort:
                        enriched_retention_data.append(
                            (retention_type, num_visitors,
                             format((100.0 * num_visitors /
                                     num_visitors_in_d0_of_that_cohort),
                                    '.2f')))
                    else:
                        enriched_retention_data.append(
                            (retention_type, num_visitors, '0.00'))
            else:
                enriched_retention_data.append(())  #empty tuple is appended
            final_retention_data.append(enriched_retention_data)

        # prepare data for the template
        cohort_data, counter = [], 0
        for cohort_id in stringified_cohorts:
            cohort_data.append(
                (cohort_id, cohort_dates_dict[cohort_id],
                 cohort_names_dict[cohort_id], final_retention_data[counter]))
            counter += 1

        my_server.setex(EXP[which_var + 'cr'], json.dumps(cohort_data),
                        ONE_HOUR)
    return cohort_data
Esempio n. 8
0
def export_survey_results(request):
    """
	Exports all survey results into a CSV file for analysis
	"""
    own_id = request.user.id
    is_defender, is_super_defender = in_defenders(own_id,
                                                  return_super_status=True)
    if is_super_defender:
        data_to_write_to_csv = retrieve_survey_records(
        )  # list of lists (where each list is a list of dictionaries)
        if data_to_write_to_csv:
            import csv
            filename = 'survey_data.csv'
            with open(filename, 'wb') as f:
                wtr = csv.writer(f)
                columns = ['Submission time','Skipped survey','Joining time','via FBS','Num followers','World age','Verified','Username',\
                'Gender','Age bracket','Do you watch talent vids?','I watch comedy','I watch singing','I watch dancing','I watch physical stunts',\
                'I watch magic tricks','I watch pranks','I watch parody','I watch roasting','I watch none of these','I watch other genre(s)',\
                'Do you create talent vids?','I create comedy','I create singing','I create dancing','I create physical stunts',\
                'I create magic tricks','I create pranks','I create parody','I create roasting','I create none of these','I create other genre(s)',\
                'Frequency of video consumption','Most used app for videos','Optional talent video link']

                wtr.writerow(columns)
                for json_data in data_to_write_to_csv:
                    data = json.loads(json_data)
                    verified = 'Yes' if data['verif'] else 'No'
                    #########################################
                    if data['skipped'] == '0':
                        # survey was taken
                        gender = 'Male' if data['ans4'] == '1' else 'Female'
                        age_code = data['ans5']
                        if age_code == '1':
                            age_bracket = '12-17'
                        elif age_code == '2':
                            age_bracket = '18-22'
                        elif age_code == '3':
                            age_bracket = '22 se zyada'
                        ans1_string = 'Yes' if data['ans1'] == '1' else 'No'
                        watch_none, watch_comedy, watch_singing, watch_dancing, watch_stunts, watch_magic, watch_pranks, watch_parody, \
                        watch_roasting = '-','-','-','-','-','-','-','-','-'
                        for ans in data['ans2']:
                            if ans == '1':
                                watch_none = 'Yes'
                            elif ans == '2':
                                watch_comedy = 'Yes'
                            elif ans == '3':
                                watch_singing = 'Yes'
                            elif ans == '4':
                                watch_dancing = 'Yes'
                            elif ans == '5':
                                watch_stunts = 'Yes'
                            elif ans == '6':
                                watch_magic = 'Yes'
                            elif ans == '7':
                                watch_pranks = 'Yes'
                            elif ans == '8':
                                watch_parody = 'Yes'
                            elif ans == '9':
                                watch_roasting = 'Yes'
                        create_none, create_comedy, create_singing, create_dancing, create_stunts, create_magic, create_pranks, create_parody,\
                        create_roasting = '-','-','-','-','-','-','-','-','-'
                        for ans in data['ans3']:
                            if ans == '1':
                                create_none = 'Yes'
                                ans3_string = 'No'
                            elif ans == '2':
                                create_comedy = 'Yes'
                                ans3_string = 'Yes'
                            elif ans == '3':
                                create_singing = 'Yes'
                                ans3_string = 'Yes'
                            elif ans == '4':
                                create_dancing = 'Yes'
                                ans3_string = 'Yes'
                            elif ans == '5':
                                create_stunts = 'Yes'
                                ans3_string = 'Yes'
                            elif ans == '6':
                                create_magic = 'Yes'
                                ans3_string = 'Yes'
                            elif ans == '7':
                                create_pranks = 'Yes'
                                ans3_string = 'Yes'
                            elif ans == '8':
                                create_parody = 'Yes'
                                ans3_string = 'Yes'
                            elif ans == '9':
                                create_roasting = 'Yes'
                                ans3_string = 'Yes'
                        ans3_string = 'Yes' if data['ans3b'] else ans3_string
                        freq_code = data['ans6']
                        if freq_code == '1':
                            freq_of_vid_consumption = 'Takreeban rozaana'
                        elif freq_code == '2':
                            freq_of_vid_consumption = 'Har 2-3 days baad'
                        elif freq_code == '3':
                            freq_of_vid_consumption = 'Har 1 week baad'
                        elif freq_code == '4':
                            freq_of_vid_consumption = 'Buhut kamm'
                        app_code = data['ans7']
                        if app_code == '1':
                            app_name = 'Youtube'
                        elif app_code == '2':
                            app_name = 'TikTok'
                        elif app_code == '3':
                            app_name = 'Facebook'
                        elif app_code == '4':
                            app_name = 'Likee'
                        elif app_code == '5':
                            app_name = 'Helo'
                        elif app_code == '6':
                            app_name = 'Kwai'
                        elif app_code == '7':
                            app_name = 'VidMate'
                        elif app_code == '8':
                            app_name = 'Instagram'
                        elif app_code == '9':
                            app_name = 'I dont use any'
                        vid_link = data.get('ans8', '-')
                        to_write = [exact_date(data['submission_time']),'No',exact_date(data['join_date']),data['on_fbs'],data['num_followers'],\
                        data['world_age'],verified,data['username'].encode('utf-8'),gender,age_bracket,ans1_string,watch_comedy,watch_singing,\
                        watch_dancing,watch_stunts,watch_magic,watch_pranks,watch_parody,watch_roasting,watch_none,data['ans2b'].encode('utf-8'),\
                        ans3_string,create_comedy, create_singing, create_dancing, create_stunts, create_magic, create_pranks, create_parody, \
                        create_roasting,create_none,data['ans3b'].encode('utf-8'),freq_of_vid_consumption,app_name,vid_link.encode('utf-8') if vid_link else '-']
                    else:
                        # survey was skipped
                        to_write = [exact_date(data['submission_time']),'Yes',exact_date(data['join_date']),data['on_fbs'],data['num_followers'],\
                        data['world_age'],verified,data['username'].encode('utf-8'),'-','-','-','-','-','-','-','-','-','-','-','-','-','-','-',\
                        '-','-','-','-','-','-','-','-','-','-','-','-']

                    wtr.writerows([to_write])
    raise Http404("Completed ;)")