def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) log(self.module, 'get_context_data', 'context=%r' % context, file=__file__) return context
def csv_to_model(path='dashboard/apps/core/management/commands/target_groups_w2_updated.csv'): tmp_data = pd.read_csv(path, sep=',') groups = [] for index in tmp_data.index: date = None try: date = tmp_data['date'][index] except KeyError: log("csv_to_model", 'csv_to_model', "couldn't get date", file=__file__) finally: if date is not None: groups.append(TargetGroup( group=tmp_data['group'][index], count=tmp_data['count'][index], percentage=tmp_data['percentage'][index], # Needs to be ISO datetime date=datetime.strptime(date, '%d/%m/%Y').date(), )) else: log("csv_to_model", 'csv_to_model', "added without date {group}".format(group=tmp_data['group'][index]), file=__file__) groups.append(TargetGroup( group=tmp_data['group'][index], count=tmp_data['count'][index], percentage=tmp_data['percentage'][index] )) TargetGroup.objects.bulk_create(groups)
def csv_to_model( path='dashboard/apps/core/management/commands/wordcloud_w1.csv'): # TODO: make this path dynamic tmp_data = pd.read_csv(path, sep=',') users = [] for index in tmp_data.index: date = None try: date = tmp_data['week'][index] except KeyError: log("csv_to_model", 'csv_to_model', "couldn't get date", file=__file__) finally: if date is not None: users.append( WordCloud(word=tmp_data['word'][index], count=tmp_data['count'][index], date=datetime.strptime(date, '%d/%m/%Y').date() # date=datetime.fromisoformat(date), )) else: users.append( WordCloud( word=tmp_data['word'][index], count=tmp_data['count'][index], )) WordCloud.objects.bulk_create(users)
def csv_to_model( path='dashboard/apps/core/management/commands/malicioususers_week1.csv' ): # TODO: make this path dynamic tmp_data = pd.read_csv(path, sep=',') users = [] for index in tmp_data.index: date = None try: date = tmp_data['week'][index] except KeyError: log("csv_to_model", 'csv_to_model', "couldn't get date", file=__file__) finally: if date is not None: users.append( MaliciousUser( user_id=tmp_data['user_id'][index], hs_freq=tmp_data['hs_freq'][index], postfreq=tmp_data['postfreq'][index], hsratio=tmp_data['hsratio'][index], av_overperforming=tmp_data['av_overperforming'][index], degree_centrality=tmp_data['degree_centrality'][index], betweenness_centrality=tmp_data[ 'betweenness_centrality'][index], eigenvector_centrality=tmp_data[ 'eigenvector_centrality'][index], pagerank=tmp_data['page_rank'][index], malicious_score=tmp_data['malicious_score'][index], date=datetime.strptime(date, '%d/%m/%Y').date(), )) else: users.append( MaliciousUser( user_id=tmp_data['user_id'][index], hs_freq=tmp_data['hs_freq'][index], postfreq=tmp_data['postfreq'][index], hsratio=tmp_data['hsratio'][index], av_overperforming=tmp_data['av_overperforming'][index], degree_centrality=tmp_data['degree_centrality'][index], betweenness_centrality=tmp_data[ 'betweenness_centrality'][index], eigenvector_centrality=tmp_data[ 'eigenvector_centrality'][index], pagerank=tmp_data['page_rank'][index], malicious_score=tmp_data['malicious_score'][index])) MaliciousUser.objects.bulk_create(users)
def get_queryset(self): log(self.module, 'get_queryset', file=__file__) return self.data
def _csv_to_model(path='dashboard/apps/core/management/commands/window_w1.csv'): data_windows = [] with open(path, mode='r') as csv_file: csv_reader = csv.DictReader(csv_file) next(csv_reader) count = 0 for row in csv_reader: date = None try: date = row['date'] except KeyError: log("csv_to_model", 'csv_to_model', "couldn't get date", file=__file__) finally: try: data_window = DataWindow( post_url=row['post_url'], reply_id=row['reply_id'], profile_id=str_to_int(row['profile_id']), user_name=row['user_name'], comment=row['comment'], likes=str_to_int(row['likes']), user_id=row['user_id'], post_type=row['post_type'], row_id=row['row_id'], hate_speech=str_to_bool(row['hate_speech']), page_name=row['page_name'], page_user_name=row['page_user_name'], page_likes_at_posting=str_to_int(row['page_likes_at_posting']), media_type=row['media_type'], post_likes=str_to_int(row['post_likes']), comments=str_to_int(row['comments']), shares=str_to_int(row['shares']), angry_reactions=str_to_int(row['angry_reactions']), media_link=row['media_link'], overperforming_score=str_to_float(row['overperforming_score']), hate_speech_item1=row['hate_speech_item1'], hate_speech_item2=row['hate_speech_item2'], hate_speech_item3=row['hate_speech_item3'], hate_speech_item4=row['hate_speech_item4'], targeted_group1=row['targeted_group1'], targeted_group2=row['targeted_group2'], targeted_group3=row['targeted_group3'], targeted_group4=row['targeted_group4'], election_topic_hs=str_to_bool(row['election_topic_hs']), number_of_posts=str_to_int(row['number_of_posts']), election_topic=str_to_bool(row['election_topic']), election_topic_keyword=str_to_bool(row['election_topic_keyword']), double_comment=str_to_bool(row['double_comment']), ) if row['comment_id'] is not None and row['comment_id'] != '': data_window.comment_id = int(float(row['comment_id'])) if date is not None and date != '': data_window.date = datetime.strptime(date, '%d/%m/%Y').date() data_windows.append(data_window) except Exception as e: log("csv_to_model", 'csv_to_model', e, file=__file__) raise count += 1 if count == 500: log("csv_to_model", 'csv_to_model', 'saved 500 items', file=__file__) DataWindow.objects.bulk_create(data_windows) data_windows = [] count = 0 DataWindow.objects.bulk_create(data_windows)