Beispiel #1
0
    def get(self, request, *args, **kwargs):
        user_id = request.GET.get('user_id', None)
        # tweet_list, df = twitter_user(user_id)

        return Response(
            data={
                "cold": cold_def(twitter_user(user_id)),
                "normal": normal_def(twitter_user(user_id)),
                "warm": warm_def(twitter_user(user_id))
            },
            status=status.HTTP_201_CREATED,
            # headers=headers,
        )
Beispiel #2
0
def tweet_sentiment(name):
    client = language_v1.LanguageServiceClient()

    tweet_list, df = twitter_user(name)

    score_list = ['\0' for i in range(len(tweet_list))]

    for i in range(len(tweet_list)):
        sentence_str = str(tweet_list[i])

        document = language_v1.Document(
            content=sentence_str, type_=language_v1.Document.Type.PLAIN_TEXT)

        sentiment = client.analyze_sentiment(request={
            'document': document
        }).document_sentiment
        score_list[i] = format(
            (sentiment.score + 1) * 25 + sentiment_predict(sentence_str) * 50,
            ".1f")
    #   print("Text: {}".format(sentence_str.strip("['']")))
    #    print("Sentiment: (score) {}   (magnitude) {}".format(sentiment.score, sentiment.magnitude))

#    return sentiment.score, sentence_str.strip("['']")
#     return score_list, sentiment.magnitude
    return score_list
def model_sentiment(name):
    new_list, df1 = twitter_user(name)

    model_score_list = ['\0' for i in range(len(new_list))]

    for i in range(len(new_list)):
        sentence_str = str(new_list[i])

        model_score_list[i] = sentiment_predict(sentence_str)

    return model_score_list
Beispiel #4
0
    def get(self, request, *args, **kwargs):
        user_id = request.GET.get('user_id', None)

        tweet_list, df = twitter_user(user_id)
        score = tweet_sentiment(user_id)
        ss = []

        for i in range(len(df)):
            st = (df['Tweets'][i], df['Dates'][i])
            ss.append(st)

        for i in range(len(df)):
            Sentence.objects.get_or_create(user_id=user_id,
                                           sentence=ss[i][0],
                                           date=ss[i][1],
                                           title=score[i],
                                           date_m=(ss[i][1]).strftime('%Y-%m'))

        queryset = self.get_queryset().filter(
            user_id=user_id)  # 필터 추가, 이름만 가져온다. sql where userid = name

        serializer_class = self.get_serializer_class()
        serializer = serializer_class(queryset, many=True)
        # headers = self.get_success_headers(serializer.data)

        #format(('date', "%Y-%m-%d"), "%Y-%m")
        total_sen = self.get_queryset().filter(user_id=user_id).count()
        avg_sen = self.get_queryset().filter(user_id=user_id).aggregate(
            Avg('title'))

        avg_sen['total_sen'] = total_sen
        avg_month = self.get_queryset().filter(
            user_id=user_id).values('date_m').annotate(Avg('title'))

        page = self.paginate_queryset(queryset)
        if page is not None:
            serializer = self.get_serializer(page, many=True)
            return self.get_paginated_response(serializer.data)

        return Response(
            data={
                #                "status": 201, {"total_sen": total_sen})
                #                "total": {"total_sen": total_sen},
                "avg": avg_sen,
                "m_avg": avg_month,
                "data": serializer.data,
                #                "data": score,
                #                "user_id": user_id,
            },
            status=status.HTTP_201_CREATED,
            # headers=headers,
        )
Beispiel #5
0
    def get(self, request, *args, **kwargs):
        user_id = request.GET.get('user_id', None)
        queryset = self.get_queryset().filter(
            word_user_id=user_id)  # 필터 추가, 이름만 가져온다. sql where userid = name

        tweet_list, df = twitter_user(user_id)
        words_dic, noun_adj_adv_list = count_word(df)
        #        words, noun_adj_adv_list = count_word(twitter_date(user_id, '2020-12'))

        #        wdf = twitter_date("SANDEUL920320", '2020-12')
        #        for i in range(len(words.keys())):

        #            wt = (wdf['Tweets'][i], wdf['Dates'][i])
        #            ww.append(wt)

        for key, value in words_dic.items():
            Word.objects.create(word_user_id=user_id, text=key, value=value)

#        ww = Word.objects.values('text', 'value')  #sql select word, word_count

#        wdf = noun_adj_adv_list

#        for i in range(len(noun_adj_adv_list)):
#            Word.objects.create(sen_id=i, word_date='2020-12', word=noun_adj_adv_list[i][0], word_count=words[i][1])

        serializer_class = self.get_serializer_class()
        serializer = serializer_class(queryset, many=True)
        # headers = self.get_success_headers(serializer.data)

        page = self.paginate_queryset(queryset)
        if page is not None:
            serializer = self.get_serializer(page, many=True)
            return self.get_paginated_response(serializer.data)

        return Response(
            data={
                #                "word_index": 201,
                #                "message": twitter_date("SANDEUL920320", '2020-12'),
                #                "message": twitter_date(user_id, '2020-12'),
                #                "sen_id": twitter_date("SANDEUL920320", '2020-12'),
                #                "word_date": 2020-12,
                #                "word": noun_adj_adv_list,
                #                "word_count": words
                "data": serializer.data,
                ##                "data": ww
            },
            status=status.HTTP_201_CREATED,
            # headers=headers,
        )