Exemplo n.º 1
0
def calculate(request):
    if request.method == 'POST':
        try:
            request_json = json.loads(request.body)
            webrtc_room_id = request_json["webrtc_room_id"]
            all_texts = Text.objects.all().filter(webrtc_room_id=webrtc_room_id)
            t = Tokenizer()

            relax_count = 0
            all_count = len(all_texts)
            if all_count == 0:
                return JsonResponse(data={"relaxing_topics": ["明日の天気", "昨日のテレビ"], "relax_rate": 0.6})

            # relaxing_topics → is_ralaxのときのtextの全部もしくは一部の集合
            relaxing_topics = []
            di = {}
            for text in all_texts:
                print(type(text.watson_response))
                print((text.watson_response))
                watson_response = json.loads(text.watson_response)
                watson_response = watson_response["classes"]
                postive = 0
                negative = 0
                score = 0
                for c in watson_response:
                    if c["class_name"] == "postive":
                        postive = c["confidence"]
                    if c["class_name"] == "negative":
                        negative = c["confidence"]
                scale = 0
                if postive > negative:
                    score = postive
                    if text.is_relax:
                        scale = 2
                    else:
                        scale = -2

                else:
                    score = negative
                    if text.is_relax:
                        scale = 1
                    else:
                        scale = -1

                for word in t.tokenize(text.text):
                    if "名詞" in word.part_of_speech:
                        if word.surface not in di:
                            di[word.surface] = score * scale
                        else:
                            di[word.surface] = di[word.surface] + score * scale

                if text.is_relax:
                    relax_count += 1
            relaxing_rate = (relax_count + 1)/(all_count + 1)
            print(di)
            di = sorted(di.items(), key=lambda x: -x[1])
            max_value = di[0][0]
            second_value = di[1][0]
            relaxing_topics = [max_value, second_value]
            if len(relaxing_topics) == 0:  # 現状は、いい感じのトピックがない場合はデフォルト値を返す
                relaxing_topics.append("お天気")

            return JsonResponse(data={"relaxing_topics": relaxing_topics, "relax_rate": relaxing_rate})
        except:
            return JsonResponse(data={"relaxing_topics": ["最近ハマってること", "昨日のテレビ"], "relax_rate": 0.6})

    else:
        return JsonResponse(data={"message": "only POST is acceptalbe"}, status=400)
Exemplo n.º 2
0
from janome.tokenizer import Tokenizer
import re

tokenizer = Tokenizer()


class BaseTokenizer:
    @classmethod
    def tokenize(cls, text):
        raise NotImplementedError


class JanomeTokenizer(BaseTokenizer):
    @classmethod
    def tokenize(cls, text):
        return (t for t in tokenizer.tokenize(text))
Exemplo n.º 3
0
 def _get_tokenizer(self):
     if self._t is not None:
         return self._t
     self._t = Tokenizer()
     return self._t
Exemplo n.º 4
0
 def __init__(self, mecab_args=''):
     self.word_dict = json.load(open(os.path.join(DICT_DIR, 'pn_noun.json')))
     self.wago_dict = json.load(open(os.path.join(DICT_DIR, 'pn_wago.json')))
     self.tagger = Tokenizer()
Exemplo n.º 5
0
def wakati(text):
    text = text.replace('\n', '')  #改行を削除
    text = text.replace('\r', '')  #スペースを削除
    t = Tokenizer()
    result = t.tokenize(text, wakati=True)
    return result
Exemplo n.º 6
0
# python解析器janomeをインポート - 1
from janome.tokenizer import Tokenizer
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
import json
import os
import requests
import configparser
import numpy as np
import glob
import csv
import os.path

# 形態素解析用オブジェクトの生成 - 2
text = Tokenizer()

out_dir = "text"

files = glob.glob(out_dir + '/*.txt')

for i in range(len(files)):
    if i % 100 == 0:
        print(i)

    file = files[i]

    output = file.replace(out_dir + "/", "m/")

    if os.path.exists(output):
        continue
Exemplo n.º 7
0
 def __init__(self, path_doc2vec_model: Union[str, Path]) -> None:
     self._doc2vec_model = Doc2Vec.load(str(path_doc2vec_model))
     self._janome_model = Tokenizer()
Exemplo n.º 8
0
class Dictionary:
    """
    思考エンジンのクラス。

    クラス変数:
    DICT_RANDOM -- ランダム辞書のファイル名。
    DICT_PATTERN -- パターン辞書のファイル名。
    TOKENIZER -- 形態素解析ツールjanomeの分析オブジェクト

    プロパティ:
    random -- ランダム辞書
    pattern -- パターン辞書
    """

    DICT_RANDOM = 'dics/random.txt'
    DICT_PATTERN = 'dics/pattern.txt'

    TOKENIZER = Tokenizer()

    def __init__(self):
        """
        ファイルからの辞書の読み込みを行う。
        """
        with open(Dictionary.DICT_RANDOM, encoding='utf-8') as f:
            self._random = [x for x in f.read().splitlines() if x]

        with open(Dictionary.DICT_PATTERN, encoding='utf-8') as f:
            self._pattern = [
                Dictionary.make_pattern(l) for l in f.read().splitlines() if l
            ]

    def study(self, text):
        """
        ランダム辞書、パターン辞書をメモリに保存する。
        """
        self.study_random(text)
        self.study_pattern(text, Dictionary.analyze(text))

    def study_random(self, text):
        """
        ユーザの発言textをメモリに保存する。
        すでに同じ発言があった場合は何もしない。
        """
        if not text in self._random:
            self._random.append(text)

    def study_pattern(self, text, parts):
        """
        ユーザの発言textを形態素partsに基づいてパターン辞書に保存する。
        """
        for word, part in parts:
            if self.is_keyword(part):  # 品詞が名詞であれば学習。
                # 単語の重複チェック
                # 同じ単語で登録されていれば、パターンを追加する
                # 無ければ新しいパターンを作成する
                duplicated = next(
                    (p for p in self._pattern if p['pattern'] == word), None)
                if duplicated:
                    if not text in duplicated['phrases']:
                        duplicated['phrases'].append(text)
                else:
                    self._pattern.append({'pattern': word, 'phrases': [text]})

    def save(self):
        """
        メモリ上の辞書をファイルに保存する。
        """
        with open(Dictionary.DICT_RANDOM, mode='w', encoding='utf-8') as f:
            f.write('\n'.join(self.random))

    @staticmethod
    def make_pattern(line):
        """
        文字列lineを\tで分割し、{'pattern':[0], 'pharases':[1]}の形式で返す。
        """
        pattern, phrases = line.split('\t')
        if pattern and phrases:
            return {'pattern': pattern, 'phrases': phrases.split('|')}

    @staticmethod
    def analyze(text):
        """
        文字列を形態素解析し、[(surface, parts)]の形にして返す。
        """
        return [(t.surface, t.part_of_speech)
                for t in Dictionary.TOKENIZER.tokenize(text)]

    @staticmethod
    def pattern_to_line(pattern):
        """
        パターンのハッシュを文字列に変換する。
        """
        return '{}\t{}'.format(pattern['pattern'],
                               '|'.join(pattern['phrases']))

    @staticmethod
    def is_keyword(part):
        """
        品詞partが学習すべきキーワードであるかどうか真偽値で返す。
        """
        return bool(re.match(r'名詞,(一般|代名詞|固有名詞|サ変接続|形容動詞語幹)', part))

    @property
    def random(self):
        """
        ランダム辞書
        """
        return self._random

    @property
    def pattern(self):
        """
        パターン辞書
        """
        return self._pattern
 def __init__(self):
     self.tokenizer = Tokenizer(wakati=True)
     self.excludes = ["。", "、", "(", ")"]
     self.exclude_nodes = ["cite", "script", "style"]
Exemplo n.º 10
0
# -*- coding: utf-8 -*-
from janome.tokenizer import Tokenizer
import sys
from io import open

PY3 = sys.version_info[0] == 3

print(u'Tokenize (stream mode)')
t = Tokenizer(mmap=True)

with open('text_lemon.txt', encoding='utf-8') as f:
    text = f.read()
    if not PY3:
        text = unicode(text)
    for token in t.tokenize(text, stream=True):
        print(token)
Exemplo n.º 11
0
import pandas as pd

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from janome.tokenizer import Tokenizer


#参考にしたページ・ソースコード
#https://blog.amedama.jp/entry/tf-idf


#tokenizerの初期化
janome_tokenizer  = Tokenizer()

def text_morpheme( text, part = "", part2 = ""):
    """janomeで形態素に分ける

    Arguments:
        text {[type]} -- 形態素に分ける文字列

    Keyword Arguments:
        part {str} -- 取得する品詞を指定(品詞の設定がない場合はすべて取得)
        part2 {str} -- サ変名詞などの2つ目の品詞

    Returns:
        [type] -- 形態素に分けた結果(リストで返す)
    """
    text_list = []
    for token in janome_tokenizer.tokenize(text):
        #print(token.part_of_speech)
Exemplo n.º 12
0
def doushi(honorific):
    t = Tokenizer()
    tokens = t.tokenize(honorific)
    for token in tokens:
            # 品詞を取り出し
        partOfSpeech = token.part_of_speech.split(',')[0]
        if partOfSpeech == "動詞":
            df = pd.read_csv('doushi.csv', encoding='utf_8', names=["見出し語","尊敬語","謙譲語","丁寧語"], usecols=[0,1,2,3],skiprows=[0], skipfooter=0, engine='python')
            df= df.replace({'\n': '<br>'}, regex=True)
            df= df.replace({'\r': ''}, regex=True)
            df = df[df['見出し語']==token.surface]
            #.emptyでCSVに入力されてない見出し語の場合に以下を出力
            if df.empty:
                response_empty='<font color="red">ご指定の語句には対応しておりません</font>'
                return response_empty
            
            # if honorific==token.surface:
                
                

            #尊敬語配列
            son=df["尊敬語"].to_string(index=False).replace("\n","").replace("NaN","").replace("'","")
            s=[son]
            #謙譲語配列
            ken=df["謙譲語"].to_string(index=False).replace("\n","").replace("NaN","")
            k=[ken]
            #丁寧語配列
            tei=df["丁寧語"].to_string(index=False).replace("\n","").replace("NaN","")
            t=[tei]
            return s,k,t

            # response_string=df.drop("見出し語",axis=1).to_string(index=False)
            # response_string={df.drop("見出し語",axis=1).to_string(index=False).replace("尊敬語","").replace("謙譲語","").replace("丁寧語","")}
            # return response_string
            #pprint.pprint(df.drop("見出し語",axis=1).to_string(index=False).replace("尊敬語","").replace("謙譲語","").replace("丁寧語",""))
        
        elif partOfSpeech =='名詞':
            ds = pd.read_csv('meishi.csv', encoding='utf_8', names=["見出し語","尊敬語","謙譲語","丁寧語"], usecols=[0,1,2,3], skiprows=[0], skipfooter=0, engine='python')
            ds= ds.replace({'\n': '<br>'}, regex=True)
            ds= ds.replace({'\r': ''}, regex=True)
            ds=ds[ds['見出し語']==(token.surface)]
            if ds.empty:
                response_empty='<font color="red">ご指定の語句には対応しておりません</font>'
                return response_empty
            #尊敬語配列
            son=ds["尊敬語"].to_string(index=False).replace("\n","").replace("NaN","").replace("'","")
            s=[son]
            #謙譲語配列
            ken=ds["謙譲語"].to_string(index=False).replace("\n","").replace("NaN","")
            k=[ken]
            #丁寧語配列
            tei=ds["丁寧語"].to_string(index=False).replace("\n","").replace("NaN","")
            t=[tei]
            return s,k,t
            # response_string=ds.drop("見出し語", axis=1).to_string(index=False).replace("尊敬語","").replace("謙譲語","").replace("丁寧語","")
            # return response_string
        
        elif partOfSpeech =='助詞':
            ds = pd.read_csv('zyoshi.csv', encoding='utf_8', names=["見出し語","尊敬語","謙譲語","丁寧語"], usecols=[0,1,2,3], skiprows=[0], skipfooter=0, engine='python')
            ds= ds.replace({'\n': '<br>'}, regex=True)
            ds= ds.replace({'\r': ''}, regex=True)
            ds=ds[ds['見出し語']==(token.surface)]
            if ds.empty:
                response_empty='<font color="red">ご指定の語句には対応しておりません</font>'
                return response_empty

            #尊敬語配列
            son=ds["尊敬語"].to_string(index=False).replace("\n","").replace("NaN","").replace("'","")
            s=[son]
            #謙譲語配列
            ken=ds["謙譲語"].to_string(index=False).replace("\n","").replace("NaN","")
            k=[ken]
            #丁寧語配列
            tei=ds["丁寧語"].to_string(index=False).replace("\n","").replace("NaN","")
            t=[tei]
            return s,k,t
            # response_string=ds.drop("見出し語", axis=1).to_string(index=False).replace("尊敬語","").replace("謙譲語","").replace("丁寧語","")
            # return response_string

        else:
            if honorific:
                df = pd.read_csv('doushi.csv', encoding='utf_8', names=["見出し語","尊敬語","謙譲語","丁寧語"], usecols=[0,1,2,3],skiprows=[0], skipfooter=0, engine='python')
                df= df.replace({'\n': '<br>'}, regex=True)
                df= df.replace({'\r': ''}, regex=True)
                #janomeで解析せず、見出し語と入力された語句が一致した場合に尊敬語・謙譲語・丁寧語を出力
                df = df[df['見出し語']==honorific]
                if df.empty:
                    response_empty='<font color="red">ご指定の語句には対応しておりません</font>'
                    return response_empty
                
                son=df["尊敬語"].to_string(index=False).replace("\n","").replace("NaN","").replace("'","")
                s=[son]
                #謙譲語配列
                ken=df["謙譲語"].to_string(index=False).replace("\n","").replace("NaN","")
                k=[ken]
                #丁寧語配列
                tei=df["丁寧語"].to_string(index=False).replace("\n","").replace("NaN","")
                t=[tei]
                return s,k,t

            else:
                response_error='<font color="red">ご指定の語句には対応しておりません</font>'
                return response_error
Exemplo n.º 13
0
""" utils/text_tools.py

テキスト処理のユーティリティ
"""
import re
import unicodedata

from django.core.validators import validate_email

from janome.tokenizer import Tokenizer

DEFAULT_TOKENIZER = Tokenizer()


def shortnate(string, length):
    """ 文字列が既定の長さ以上だった場合に規定の長さまでで残りを省略とする """
    return string if len(string) <= length else string[:length - 4] + '...'


def get_words(text, customdict=None):
    """ 与えられたテキストを形態素解析して、含まれる名詞のリストを返す """

    def _filter(s):
        """ 名詞だけにフィルタリングする """
        reg = re.compile(r'名詞')
        ignore_reg = re.compile(r'非自立')
        if (reg.search(s.part_of_speech) and
                not ignore_reg.search(s.part_of_speech)):
            return True

    if customdict:
def morphological_analysis_janome(text):
    t = Tokenizer()
    token = t.tokenize(text) # input unicode
    return token
Exemplo n.º 15
0
def csv_open(con, filename):
    reader = csv.reader(open(filename, 'r'))
    tokenizer = Tokenizer()
    for row in reader:
        save(con, tokenizer, row[0], row[1])
Exemplo n.º 16
0
 def __init__(self):
     self._tokenizer = Tokenizer()
Exemplo n.º 17
0
# coding:utf-8

from janome.tokenizer import Tokenizer
import os, re, json, random

dict_file = "chatbot-data.json"
dic = {}
tokenizer = Tokenizer()  # janome


# 辞書に単語を記録する
def register_dic(words):
    global dic
    if len(words) == 0: return
    tmp = ["@"]
    for i in words:
        word = i.surface
        if word in ["", "\r\n", "\n"]: continue
        tmp.append(word)
        if len(tmp) < 3: continue
        if len(tmp) > 3: tmp = tmp[1:]
        set_word3(dic, tmp)
        if word in ["。", "?"]:
            tmp = ["@"]
            continue

    # 辞書を更新する毎にファイルへ保存
    json.dump(dic, open(dict_file, "w", encoding="utf-8"))


# 三要素のリストを辞書として登録
Exemplo n.º 18
0
import pandas as pd
from janome.tokenizer import Tokenizer
import re
import math

query = '吾輩は猫である'
query_words = [
    token.surface for token in Tokenizer().tokenize(query)
    if not re.fullmatch(r"[あ-ん]|、|。| ", token.surface)
]
query_file = 'query'

arr = [line.strip().split("\t") for line in open('../index/index2.txt', 'r')]

idf_scores = {a[0]: float(a[3]) for a in arr}
tfidf_scores = {w: {} for w in idf_scores}
for a in arr:
    tfidf_scores[a[0]][a[1]] = float(a[4])
tfidf_table = pd.DataFrame(tfidf_scores).fillna(0)

query_tf = {w: 0 for w in idf_scores}
for w in query_tf:
    for q in query_words:
        if w == q: query_tf[w] += 1
query_tfidf = {
    w: {
        query_file: query_tf[w] * idf_scores[w]
    }
    for w in idf_scores
}
query_table = pd.DataFrame(query_tfidf)
Exemplo n.º 19
0
 def _get_tokenizer(cls):
     if TFIDF._t is not None:
         return TFIDF._t
     TFIDF._t = Tokenizer()
     return TFIDF._t
Exemplo n.º 20
0
# -*- coding: utf-8 -*-
from janome.tokenizer import Tokenizer

print(u'Tokenize (system dictionary)')
t = Tokenizer()
for token in t.tokenize(u'すもももももももものうち'):
  print(token)

print('')
print(u'Tokenize (mmap system dictionary)')
t = Tokenizer(mmap=True)
for token in t.tokenize(u'すもももももももものうち'):
  print(token)

print('')
print(u'Tokenize (wakati mode)')
for token in t.tokenize(u'すもももももももものうち', wakati = True):
  print(token)

print('')
print(u'Tokenize with user dictionary')
t = Tokenizer("user_ipadic.csv", udic_enc="utf8")
for token in t.tokenize(u'東京スカイツリーへのお越しは、東武スカイツリーライン「とうきょうスカイツリー駅」が便利です。'):
  print(token)

print('')
print(u'Tokenize with user dictionary (wakati mode)')
t = Tokenizer("user_ipadic.csv", udic_enc="utf8")
for token in t.tokenize(u'東京スカイツリーへのお越しは、東武スカイツリーライン「とうきょうスカイツリー駅」が便利です。', wakati = True):
  print(token)
Exemplo n.º 21
0
 def __init__(self):
     self.t = Tokenizer()
Exemplo n.º 22
0
 def __init__(self):
     self.t = Tokenizer()
     self.routine()
Exemplo n.º 23
0
repeat = 10
mmap = True
dump_file = 'memusage.dump'
if len(sys.argv) > 1 and sys.argv[1] == '-nommap':
    mmap = False
    dump_file = 'memusage_nommap.dump'

with open('text_lemon.txt') as f:
    s = f.read()

# Start tracing
tracemalloc.start(10)

# blocks allocated by initializing Tokenizer
t = Tokenizer(mmap=mmap)
snapshot1 = tracemalloc.take_snapshot()
top_stats1 = snapshot1.statistics('lineno')

with open(dump_file, 'w') as f:
    f.write('**Initializing Tokenizer**\n')
    f.write('[Top 10 lines]\n')
    for stat in top_stats1[:10]:
        f.write(str(stat))
        f.write('\n')
    f.write('\n')

# blocks allocated when tokenizing
list(t.tokenize(s))
snapshot2 = tracemalloc.take_snapshot()
top_stats2 = snapshot2.compare_to(snapshot1, 'lineno')
Exemplo n.º 24
0
 def __init__(self):
     self.janome_tokenizer = Tokenizer()
     self.exc_part_of_speech = {"名詞": ["非自立", "代名詞", "数"]}
     self.inc_part_of_speech = {"名詞": ["サ変接続", "一般", "固有名詞"]}
Exemplo n.º 25
0
#search_3_keitaisoを改変させたものになる。これにより単語を収集し、アイデア作成の助けになるはず。

import json, config
from requests_oauthlib import OAuth1Session
from janome.tokenizer import Tokenizer #形態素解析
from janome.analyzer import Analyzer
from janome.tokenfilter import POSKeepFilter,TokenCountFilter
import random #ランダム

CK = config.CONSUMER_KEY
CS = config.CONSUMER_SECRET
AT = config.ACCESS_TOKEN
ATS = config.ACCESS_TOKEN_SECRET
twitter = OAuth1Session(CK, CS, AT, ATS)
t = Tokenizer()#形態素のやつ
#動詞のリスト
dousi_list = []
#名詞のリスト
meisi_list = ["https","/","//","://",":","|","."]
#助詞リスト
josi_list = []

#a = Analyzer(token_filters=[POSKeepFilter('動詞')])




url = "https://api.twitter.com/1.1/search/tweets.json"

print("何を調べますか?")
keyword = input('>> ')
 def __init__(self, user_dic_path='', user_dic_enc='utf8'):
     self._t = Tokenizer(udic=user_dic_path, udic_enc=user_dic_enc)
Exemplo n.º 27
0
import csv
from janome.tokenizer import Tokenizer
import re
import nltk
from nltk.corpus import stopwords

import pandas as pd
df = pd.read_csv('syllabus.csv', usecols=[3])
array = df.values.tolist()
print(array)

t = Tokenizer()

stop_words = stopwords.words('english')
stop_words_number = list(range(10))
add_stop_words = [
    '.', '(', ')', '\"', '\'', 'がち', 'テーマ', 'の', '上', '系', 'こと', 'もの', '的',
    'よう', '授業', '前期', '後期', '研究', '演習', 'たち', '目標', 'the', 'The', 'in'
]
stop_words.extend(stop_words_number)
stop_words.extend(add_stop_words)

theme_word_list = []
for row in array:

    if type(row[0]) is str:

        row_ = re.sub('u3000', '', row[0])
        print(row_)

        token_list = t.tokenize(row_)
Exemplo n.º 28
0
 def __init__(self, text):
     self.text = text
     self.t = Tokenizer()
     self.tokens = self.t.tokenize(self.text)
     for i in self.tokens:
         print(i)
Exemplo n.º 29
0
import re
from janome.tokenizer import Tokenizer

TOKENIZER = Tokenizer()


def analyze(text):
    """文字列textを形態素解析し、[(surface, parts)]の形にして返す。"""
    return [(t.surface, t.part_of_speech) for t in TOKENIZER.tokenize(text)]


def is_keyword(part):
    """品詞partが学習すべきーワードであるかどうかを真偽値で返す。"""
    return bool(re.match(r'名詞,(一般|代名詞|固有名詞|サ変接続|形容動詞語幹)', part))
Exemplo n.º 30
0
def run_tubuyakiword_analysis(logger, execKinouId, syoriymd, db_connection,
                              db_cursol):

    from janome.tokenizer import Tokenizer

    from ...infr.kbn import C_SyoriKekka
    from ...infr.kbn import C_KousinJyoukyou

    logger.info('|▼tubuyakiword_analysis開始')

    # WK変数初期化
    cnt_syori = 0
    syorikekkakbn = C_SyoriKekka.OK

    # つぶやき単語性質履歴データテーブル登録処理
    # 銘柄マスタデータ削除処理
    where_values0 = "s_rirekiymd = '" + syoriymd + "'"
    #moduleDao.deleteTbl(logger, db_connection, db_cursol, 't_tubuyakitangoseisiturireki', where_values0)

    resultset0 = moduleDao.getSelectAll(
        logger, db_cursol, 't_tubuyakitangoseisitu',
        'n_tango_id, d_eikyoudo_age, d_eikyoudo_sage, n_update_cnt',
        'n_tango_id')

    # つぶやき単語性質データテーブルのレコード分、以下を繰り返す(添え字:idx1)
    '''
    for tubuyakidata_row_for_rireki in resultset0:
        insertvalue = str(tubuyakidata_row_for_rireki[0])
        insertvalue = insertvalue + ", '" + syoriymd + "'"
        insertvalue = insertvalue + ", " + str(tubuyakidata_row_for_rireki[1])
        insertvalue = insertvalue + ", " + str(tubuyakidata_row_for_rireki[2])
        insertvalue = insertvalue + ", " + str(tubuyakidata_row_for_rireki[3])

        moduleDao.insertTbl(logger, db_connection, db_cursol, 't_tubuyakitangoseisiturireki', "" , insertvalue)
    '''

    where_values3 = "n_eikyou_musi_flg = 1"
    resultset3 = moduleDao.getSelectByKey(logger, db_cursol,
                                          't_tubuyakitangoseisitu', 's_tango',
                                          where_values3)

    musu_tango_list = []
    for tango_musi_row in resultset3:
        musu_tango_list.append(str(tango_musi_row[0]))

    # つぶやきデータテーブルのレコード分、以下を繰り返す(添え字:idx1)
    where_values = "n_kousin_jyoukyou_kbn = 1"
    resultset = moduleDao.getSelectByKey(
        logger, db_cursol, 't_tubuyaki',
        'n_tubuyaki_id, s_tubuyaki, n_neugokikbn, n_followersuu', where_values)

    t = Tokenizer()

    for tubuyakidata_row in resultset:

        tubuyaki_id = str(tubuyakidata_row[0])
        tubuyaki = str(tubuyakidata_row[1]).replace('[改行]', '◆')
        neugoki_kbn = str(tubuyakidata_row[2])

        eikyoudo_up = 0
        eikyoudo_down = 0
        tubuyai_tango_list = []

        logger.info('||▼つぶやきID=%s', tubuyaki_id)
        logger.info('|||つぶやき文言=%s', tubuyaki)

        # 値動き影響値算出処理
        neugoki_eikyoudo = getNeugokiEikyoudo(int(neugoki_kbn))
        logger.info('|||値動き区分=%s 値動き影響度=%s', neugoki_kbn, neugoki_eikyoudo)

        for token in t.tokenize(tubuyaki):

            tmp_tango = str(token.base_form)

            if (tmp_tango in musu_tango_list):
                #logger.info('|||無視(単語)=%s', tmp_tango)
                continue
            else:
                pass

            if (tmp_tango in tubuyai_tango_list):
                pass
            else:
                tubuyai_tango_list.append(tmp_tango)

            #tmp_tango_hinsi = str(token.part_of_speech[0])
            #tmp_tango_hinsi = str(token.part_of_speech)
            tmp_tango_hinsi_list = getHinsiKbnList(token.part_of_speech)

            #print(token.base_form + '[' + token.part_of_speech[0] + ']')

            where_values2 = "s_tango = '" + tmp_tango + "'"
            resultset2 = moduleDao.getSelectByKey(
                logger, db_cursol, 't_tubuyakitangoseisitu',
                'n_tango_id, d_eikyoudo_age, d_eikyoudo_sage, n_update_cnt',
                where_values2)

            if len(resultset2) == 0:
                # つぶやき単語性質データテーブル登録処理
                insertvalue = "'" + tmp_tango + "'"
                insertvalue = insertvalue + ", " + tmp_tango_hinsi_list

                if neugoki_eikyoudo <= 0:
                    insertvalue = insertvalue + ", " + str(0)
                else:
                    insertvalue = insertvalue + ", " + str(neugoki_eikyoudo)

                if neugoki_eikyoudo >= 0:
                    insertvalue = insertvalue + ", " + str(0)
                else:
                    insertvalue = insertvalue + ", " + str(
                        int(neugoki_eikyoudo) * (-1))

                insertvalue = insertvalue + ", " + str(0)
                insertvalue = insertvalue + ", " + str(0)

                collist = "s_tango, s_hinsikbn_1, s_hinsikbn_2, s_hinsikbn_3, s_hinsikbn_4, d_eikyoudo_age, d_eikyoudo_sage, n_update_cnt, n_eikyou_musi_flg"

                moduleDao.insertTbl(logger, db_connection, db_cursol,
                                    't_tubuyakitangoseisitu', collist,
                                    insertvalue)

            else:
                # つぶやき単語性質データテーブル更新処理

                if neugoki_eikyoudo <= 0:
                    upadate_setvalue = "d_eikyoudo_sage = " + str(
                        int(resultset2[0][2]) + int(neugoki_eikyoudo) * (-1))
                else:
                    upadate_setvalue = "d_eikyoudo_age = " + str(
                        int(resultset2[0][1]) + int(neugoki_eikyoudo))

                upadate_setvalue = upadate_setvalue + ", n_update_cnt = " + str(
                    int(resultset2[0][3]) + 1)

                #logger.info('|||①%s : ②%s : ③%s : ④%s', str(neugoki_eikyoudo), str(resultset2[0][1]), str(resultset2[0][2]), upadate_setvalue)

                where_values3 = "n_tango_id = " + str(resultset2[0][0])

                moduleDao.updateTbl(logger, db_connection, db_cursol,
                                    't_tubuyakitangoseisitu', upadate_setvalue,
                                    where_values3)

        # select sum(d_eikyoudo_age), sum(d_eikyoudo_sage) from t_tubuyakitangoseisitu where s_tango in ('銘柄', 'ます', 'ない')
        in_values = ""

        for tmp_tubuyaki_tango in tubuyai_tango_list:
            if in_values != "":
                in_values = in_values + ", "

            in_values = in_values + "'" + tmp_tubuyaki_tango + "'"

        where_values5 = "s_tango in (" + in_values + ")"
        resultset5 = moduleDao.getSelectByKey(
            logger, db_cursol, 't_tubuyakitangoseisitu',
            'sum(d_eikyoudo_age), sum(d_eikyoudo_sage)', where_values5)

        # 更新状況区分を更新
        tmp_one_neugoki_eikyousisuu = float(resultset5[0][0]) - float(
            resultset5[0][1])
        tmp_all_neugoki_eikyousisuu = tmp_one_neugoki_eikyousisuu * int(
            tubuyakidata_row[3])

        upadate_setvalue2 = "d_one_neugoki_eikyousisuu = " + str(
            tmp_one_neugoki_eikyousisuu)
        upadate_setvalue2 = upadate_setvalue2 + ", d_all_neugoki_eikyousisuu = " + str(
            tmp_all_neugoki_eikyousisuu)
        upadate_setvalue2 = upadate_setvalue2 + ", n_kousin_jyoukyou_kbn = 2"
        where_values4 = "n_tubuyaki_id = " + str(tubuyaki_id)
        moduleDao.updateTbl(logger, db_connection, db_cursol, 't_tubuyaki',
                            upadate_setvalue2, where_values4)

        cnt_syori = cnt_syori + 1

        logger.info('||▲')

    moduleDao.insertBatchKekkaTbl(logger, execKinouId, syoriymd, syorikekkakbn,
                                  cnt_syori, db_connection, db_cursol)

    logger.info('||正常終了')
    logger.info('|▲tubuyakiword_analysis終了')