Пример #1
0
 def ConvertRawWordsToOrder(self, rawwords, nrange, ordertype="abs"):
     Analyzer = base_analyzer()
     WordRanker = ranker()
     Converter = word_convert()
     num_words = Converter.splitwords_bylen(rawwords, nrange)
     for len_word in num_words:
         num_words[len_word] = WordRanker.rank_words(num_words[len_word],
                                                     reverse=True)
         #num_words[len_word] = WordRanker.rank_tulple(num_words[len_word], reverse=True)
     PrimeWords = [word[0] for word in num_words[nrange]]
     PrimeOrders = {}
     for i in range(len(PrimeWords)):
         PrimeOrders[PrimeWords[i]] = i
     OrderWords = {}
     OrderWords[nrange] = PrimeOrders
     start_time = time.time()
     for i in range(1, nrange):
         if ordertype == 'abs':
             OrderWords[i] = self.ConvertWordToNumOrder(
                 [word[0] for word in num_words[i]], PrimeWords, rawwords)
         elif ordertype == 'Set':
             OrderWords[i] = self.ConvertwordToCntOrder(
                 num_words[i], num_words[nrange])
         else:
             OrderWords[i] = Converter.convert_word_order(
                 [word[0] for word in num_words[i]], PrimeWords)
     OrderWords = self.convert_order_to_raw(OrderWords)
     return OrderWords
Пример #2
0
 def __init__(self, messages):
     self.messages = messages
     self.cVerTer = Converter()
     self.freWords = {}
     self.mger = WordsMerger()
     self.analyzer = base_analyzer()
     self.features = None
Пример #3
0
 def __init__(self):
     self.msgLogic = MegSplitLogic()
     self.modbus = ModBusDataTuning()
     self.md = modbus()
     self.anlzer = base_analyzer()
     self.ftp = FTPDataTuning()
     self.ftpPaser = FTPParser()
     self.cmPaser = ComPaser()
     self.cvt = Converter()
     self.mtool = MessageSplitMeasure()
     self.rds = redis_deal()
Пример #4
0
 def split_by_words_type(self, datas, T_max_range):
     fields_set = []
     w_infer = word_infer()
     w_merger = base_merger()
     w_convert = Converter()
     b_analyzer = base_analyzer()
     for i in range(T_max_range):
         lo_datas = get_data_bylo(datas, i)
         w_cnt = w_convert.convert_raw_to_count(lo_datas)
         w_frequent = b_analyzer.convert_num_to_frequent(w_cnt)
         w_type = w_infer.is_const_word(w_frequent, 0.95)
         if w_type:
             t_field = loc_field((i,i), 0)
         else:
             t_field = loc_field((i,i), 4)
         fields_set.append(t_field)
     words_f = w_merger.merge_words(fields_set)
     candidate_borders = [w.loc[0] for w in words_f]
     return words_f, candidate_borders
Пример #5
0
 def __init__(self):
     super().__init__
     self.rank = ranker()
     self.analysist = base_analyzer()
Пример #6
0
 def __init__(self):
     super().__init__()
     self.analyzer = base_analyzer()
Пример #7
0
from log_info.logger import get_logger, vote_pre
from Fields_info.fields_measure import Fields_measure
from Config.modbus import modbus
from common.f_cg import transer
from common.Converter.base_convert import Converter
import time
from Data_base.Data_redis.redis_deal import redis_deal
from common.readdata import read_datas, get_puredatas
from functools import cmp_to_key
from common.analyzer.analyzer_common import base_analyzer
from Config.log_config import log_path
from Config.ve_strategy import ve_strategy

ve_stra_str = ve_strategy().get_strategy_str()

analyzer = base_analyzer()


def cmp_word(word_one, word_two):
    start_one = int(word_one[0].split(' ')[0])
    start_two = int(word_two[0].split(' ')[0])
    if start_one < start_two:
        return 1
    elif start_one > start_two:
        return -1
    else:
        if word_one[1] >= word_two[1]:
            return 1
        else:
            return 0
Пример #8
0
 def __init__(self):
     self.analyer = base_analyzer()
     self.convert = Converter()
     self.ranker = ranker()