def __init__(self, FLAGS): self.data_set = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) self.path = FLAGS.path + "_tf_sess.ckpt" self.log_dir = FLAGS.log_path self.FLAGS = FLAGS if (FLAGS.load != False): # load a existing model # self.inference() self.load_model() self.simple_log = utility.load_json(self.log_dir + "simple_log.json") self.current_i = self.simple_log["current_i"] else: # which means build a new model print("build a new model") self.x, self.y = self.build_graph_input() self.current_i = 0 self.simple_log = dict() self.inference(self.x, self.y) self.current_i = 0 self.sess = tf.InteractiveSession() tf.global_variables_initializer().run() self.saver = tf.train.Saver() utility.check_dir(self.log_dir) self.train_writer = tf.summary.FileWriter(self.log_dir + '/train', self.sess.graph) self.test_writer = tf.summary.FileWriter(self.log_dir + '/test')
def parse(task): """ 対応するタスクをパースする Returns: string -- 日付 string -- 機器ID string -- 時刻 dictionary -- データ """ pl = utility.load_json(task.payload) return pl['date'], pl['devid'], pl['time'], pl['data']
def read(self): """ GCSから読み込み Returns: dictionary -- 読み込んだ辞書 """ try: self.dic = utility.load_json(model.Storage.read_file(self.name)) logging.info("OPEN : %s", self.name) except storage.NotFoundError: self.dic = {} logging.info("DIARY WRITE : NEW %s", self.name) return self.dic
def get(self): """ キャッシュからJSONを読み込む Returns: dictionary -- 読み込んだ辞書 """ data = memcache.get(self.name) if data is not None: data = utility.load_json(data, charset=self.charset) logging.debug("Cache.get() : hit %s", self.name) else: data = {} logging.debug("Cache.get() : miss %s", self.name) return data
def main(): user_args = get_input_args() model, class_labels = utility.load_saved_model() cat_to_name = utility.load_json(user_args.category_names) probs, labels, _ = utility.predict(user_args.input, model, user_args.top_k, cat_to_name, class_labels, user_args.gpu) print("------------------Processing------------------") for i in range(len(probs)): result_label = labels[i] result_prob = probs[i] * 100 print("The probability of the {} is {:.2f} %.".format( result_label, result_prob))
def init_ui(self) -> None: """""" self.setWindowTitle(f"连接{self.gateway_name}") # Default setting provides field name, field data type and field default value. default_setting = self.main_engine.get_default_setting( self.gateway_name) # Saved setting provides field data used last time. loaded_setting = load_json(self.filename) # Initialize line edits and form layout based on setting. form = QtWidgets.QFormLayout() for field_name, field_value in default_setting.items(): field_type = type(field_value) if field_type == list: widget = QtWidgets.QComboBox() widget.addItems(field_value) if field_name in loaded_setting: saved_value = loaded_setting[field_name] ix = widget.findText(saved_value) widget.setCurrentIndex(ix) else: widget = QtWidgets.QLineEdit(str(field_value)) if field_name in loaded_setting: saved_value = loaded_setting[field_name] widget.setText(str(saved_value)) if "密码" in field_name: widget.setEchoMode(QtWidgets.QLineEdit.Password) form.addRow(f"{field_name} <{field_type.__name__}>", widget) self.widgets[field_name] = (widget, field_type) button = QtWidgets.QPushButton("连接") button.clicked.connect(self.connect) form.addRow(button) self.setLayout(form)
def __init__(self): # 기존에 저장된 파일 불러오기 try: self.data_dic = load_json('../data/ssu_notice.json') print('기존에 저장된 공지사항에 이어서 데이터 수집을 진행합니다.') except: self.data_dic = defaultdict(lambda: []) print('저장된 공지사항이 없습니다. 새롭게 데이터를 수집합니다.') self.title_ls = [dic['title'] for dic in self.data_dic['전체']] # 크롤링 소스 self.req = requests.get( 'http://www.ssu.ac.kr/web/kor/plaza_d_01').content self.soup = BeautifulSoup(self.req, 'html.parser') # table rows(tr) 따기 self.trs = list(self._get_trs()) return
def test(self): # Test trained model print("now start evaluating the trained model") acc = self.sess.run(self.accuracy, feed_dict={ self.x: self.data_set.test.images, self.y_: self.data_set.test.labels }) print("accuracy is ", acc) try: self.simple_log = utility.load_json(self.log_dir + "simple_log.json") except: print("simple log does not exist, create a new one") self.simple_log = dict() self.simple_log["current_i"] = self.current_i self.simple_log["acc at " + str(self.current_i)] = float(acc) utility.save_json(self.simple_log, self.log_dir + "simple_log.json")
def init_ui(self) -> None: """""" self.setWindowTitle("全局配置") self.setMinimumWidth(800) settings = copy(SETTINGS) settings.update(load_json(SETTING_FILENAME)) # Initialize line edits and form layout based on setting. form = QtWidgets.QFormLayout() for field_name, field_value in settings.items(): field_type = type(field_value) widget = QtWidgets.QLineEdit(str(field_value)) form.addRow(f"{field_name} <{field_type.__name__}>", widget) self.widgets[field_name] = (widget, field_type) button = QtWidgets.QPushButton("确定") button.clicked.connect(self.update_setting) form.addRow(button) self.setLayout(form)
"log.file": True, "email.server": "smtp.qq.com", "email.port": 465, "email.username": "", "email.password": "", "email.sender": "", "email.receiver": "", "rqdata.username": "******", "rqdata.password": "******", "database.timezone": get_localzone().zone, "database.driver": "mysql", # see database.Driver "database.database": "runoob", # for sqlite, use this as filepath "database.host": "localhost", "database.port": 3306, "database.user": "******", "database.password": "******", "database.authentication_source": "admin", # for mongodb } # Load global setting from json file. SETTING_FILENAME: str = "vt_setting.json" SETTINGS.update(load_json(SETTING_FILENAME)) def get_settings(prefix: str = "") -> Dict[str, Any]: prefix_length = len(prefix) return { k[prefix_length:]: v for k, v in SETTINGS.items() if k.startswith(prefix) }
import os import spacy from nltk.stem import WordNetLemmatizer from nltk.tokenize import sent_tokenize, word_tokenize from tqdm import tqdm import config from utility import load_json, save_json # 1. Strip empty lines CUR_PATH = os.path.abspath('.') if os.path.exists(config.PATH_STRIP): raw_wiki_lines = load_json(config.PATH_STRIP) else: print("Load raw files:") raw_wiki_lines = [] with open('computed/wiki.txt', 'r') as fin: for line in fin: line = line.strip() if len(line) > 0: raw_wiki_lines.append(line) try: save_json(raw_wiki_lines, config.PATH_STRIP) except: os.remove(config.PATH_STRIP) exit(1) # 2. Segment lines if os.path.exists(config.PATH_NEWLINE): new_lines = load_json(config.PATH_NEWLINE)
from collections import Counter import matplotlib.pyplot as plt import spacy from tqdm import tqdm from wordcloud import WordCloud import config from utility import load_json nlp = spacy.load('en_core_web_sm') stop_words = nlp.Defaults.stop_words try: sentences_data = load_json(config.PATH_NO_STOP) flatten_data = [] for sent in tqdm(sentences_data, desc="flat data for Counter", unit="Lines"): flatten_data.extend(sent) vocab_wikipedia = Counter(flatten_data) print the top 20 words: top_words = vocab_wikipedia.most_common(20) print("The top 20 most common words are:") print(", ".join([token for token, _ in top_words])) new_data = load_json(config.PATH_NEWLINE) wc = WordCloud(background_color="white", stopwords=stop_words).generate("\n".join(new_data)) plt.imshow(wc) plt.axis("off")