Beispiel #1
0
def open_file():
    """select input file and show its data"""

    file_name, file_type = QFileDialog.getOpenFileName(MainWindow, '选择文件',
                                                       default_path,
                                                       'txt(*.txt)')
    if file_name == '':
        return
    temp_input = input.read_file(file_name)
    try:
        if temp_input.measurement_strategy == '0':
            ui.measurement_strategy.setCurrentIndex(0)
            ui.total_length.setText(temp_input.len_total)
            ui.length_step.setText(temp_input.len_step)
        elif temp_input.measurement_strategy == '1':
            ui.measurement_strategy.setCurrentIndex(1)
            ui.num_of_mea.setText(temp_input.num_of_mea)
        ui.frequency.setText(temp_input.frequency)
        ui.time_step.setText(temp_input.time_step)
        ui.na_average_facotr.setValue(int(temp_input.na_average_factor))
        ui.multi_measure.setValue(int(temp_input.multi_measure))
        ui.save_directory.setText(temp_input.directory)
        input_parameters.directory = temp_input.directory
        if temp_input.access_sensor_times == '0':
            ui.typein_t.setChecked(True)
            input_parameters.access_sensor_times = 0
            ui.temperature.setText(temp_input.temperature)
            ui.humidity.setText(temp_input.humidity)
        elif temp_input.access_sensor_times == '1':
            ui.measure_t_once.setChecked(True)
            input_parameters.access_sensor_times = 1
        elif temp_input.access_sensor_times == '2':
            ui.measure_t_repeatedly.setChecked(True)
            input_parameters.access_sensor_times = 2
        if temp_input.na_state is not None:
            ui.NA_state.setText(temp_input.na_state)
        input_parameters.motor_comp = temp_input.motor_comp
        input_parameters.sensor_comp = temp_input.sensor_comp
        input_parameters.NA_identifier = temp_input.NA_identifier
    except Exception:
        missing_parameters('文件格式错误,请补充相应数据')
Beispiel #2
0
from database import DatabaseArchiver
from database import HTTPArchiver
from algorithm import power_management
from input import read_file
from save_output_txt import write_to_txt
from input_charts import input_charts
from config import read_config
import sys

content = sys.stdin.readlines()
data = read_file(content)
output = power_management(data)
config = read_config()

if config[6] == 0:
    archiver = DatabaseArchiver('/tmp/BTS.db')
else:
    archiver = HTTPArchiver('http://localhost:5000')

for item in data:
    archiver.save_measurement(item)

for item in output:
    archiver.save_response(item)

archiver.flush()

write_to_txt(output)
input_charts(data)
Beispiel #3
0
import matplotlib.ticker as ticker
import numpy as np

from consts import *
from lang import Language
from input import read_file, read_cache
from model import Encoder, Decoder
from training import *

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

if USE_CACHE:
    input_lang, output_lang, pairs = (read_cache(c) for c in CACHE)
else:
    input_lang, output_lang, pairs = read_file(FILE,
                                               INCLUDE_PHRASES,
                                               suffix='i')

random.shuffle(pairs)
train_set = pairs[:int(0.8 * len(pairs))]
test_set = pairs[int(0.8 * len(pairs)):]

validation_set = train_set[int(0.8 * len(train_set)):]
train_set = train_set[:int(0.8 * len(train_set))]

print("{} train   {} validation    {} test".format(len(train_set),
                                                   len(validation_set),
                                                   len(test_set)),
      flush=True)

# coding UTF-8
from input import read_file, conv_str_to_kana, conv_kana_to_vec, conv_vec_to_kana, calc_accuracy, fix_data
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import LeaveOneOut
import numpy as np
import csv
import pickle

# ファイルから読み込み配列に代入
data = read_file('dataset_for.csv')
# タイトル群をカタカナに変換し,さらに母音子音情報に変換
kana_title = conv_str_to_kana(data[0])
kana_ans = conv_str_to_kana(data[1])
vec_title = conv_kana_to_vec(kana_title, 1, "T")
vec_ans = conv_kana_to_vec(kana_ans, 1, "R")

# 交差検証を実行
loo = LeaveOneOut()
lr = LinearRegression()
vec_ans = np.array(vec_ans)
vec_title = np.array(vec_title)
result = []
result_T = []
count = 0
for train_index, test_index in loo.split(vec_title):
    X_train, X_test = vec_title[train_index], vec_title[test_index]
    Y_train, Y_test = vec_ans[train_index], vec_ans[test_index]
    lr.fit(X_train, Y_train)
    Y_pred = lr.predict(X_test)
    Y_pred = Y_pred.tolist()
    Y_test = Y_test.tolist()
# coding UTF-8
from input import read_file, conv_str_to_kana, conv_kana_to_vec, conv_vec_to_kana
from sklearn import svm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
import matplotlib.pyplot as plt
import numpy as np
import csv

data = read_file('dataset_proto.csv')
kana_title, kana_ans = conv_str_to_kana(data[0],data[1])
vec_title = conv_kana_to_vec(kana_title,1,"T")
vec_ans = conv_kana_to_vec(kana_ans,1,"R")

"""
for i,kana_title in enumerate(kana_title):
    print(kana_title)
    print(vec_title[i])

clf = svm.SVC(gamma=0.001, C=100)
clf.fit(vec_title[:-1], vec_ans[:-1])

result = clf.predict(data[:-1])
print("実際の答え={0}, 予測結果={1}".format(vec_ans[-1], result))

result = clf.score(data, vec_ans)
print(result)
"""
# データセットを学習用とテスト用に分割
X_train, X_test, Y_train, Y_test = train_test_split(vec_title, vec_ans, train_size = 0.8, test_size = 0.2, random_state = 0)
Beispiel #6
0
 def read_file_test(self):
     result = input.read_file(self.path)
     print(result[1])
Beispiel #7
0
from collections import defaultdict
from gensim import corpora, models, similarities
from pprint import pprint  # pretty-printer

# documents = ["Human machine interface for lab abc computer applications",
#              "A survey of user opinion of computer system response time",
#              "The EPS user interface management system",
#              "System and human system engineering testing of EPS",
#              "Relation of user perceived response time to error measurement",
#              "The generation of random binary unordered trees",
#              "The intersection graph of paths in trees",
#              "Graph minors IV Widths of trees and well quasi ordering",
#              "Graph minors A survey"]

documents = input.read_file("raw-filename.txt", ".txt")

# remove common words and tokenize
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist]
         for document in documents]

# remove words that appear only once
frequency = defaultdict(int)
for text in texts:
    for token in text:
        frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts]
# pprint(texts)

dictionary = corpora.Dictionary(texts)
Beispiel #8
0
__author__ = 'naveed';

import input;
import model;
import numpy

file_name = input.read_file();
csv_list = input.list_converter(file_name);


name_list = [i[1] for i in csv_list];
stat_list = [i[2] for i in csv_list];

WORD_LENGTH = 5;
VECTOR_SIZE = 80;

training_examples = len(name_list);
feature_count = WORD_LENGTH * VECTOR_SIZE;
empty_vector = list(numpy.zeros(VECTOR_SIZE));

# Splitting name list and converting to unicode
name_list_split =  [i.split(';') for i in name_list];
name_list_dec = [[value[2:] for value in row] for row in name_list_split];
name_list_hex = [[hex(int(value)) for value in row] for row in name_list_dec];
name_list_uni = [[unichr(int(value)) for value in row] for row in name_list_dec];

# Truncating names and adding whitespace to make length equal 5
for i in name_list_uni:
    while len(i)<5:
        i.append(u' ');
    if len(i)>5: