def setup_options_and_constraints(self): options = option.Options() options.add('test', 5) options.add('test2', "test") self.options = options self.constraints = {}
def setup_options_and_constraints(self): options = { 'sterics' : 1, 'verbose' : 0, 'frequency' : 10, 'max_node_level' : 20, 'max_steps' : 100000000, 'max_solutions' : 10, 'max_size' : 100000000, 'min_size' : 0, 'accept_score' : 10} self.options = option.Options(options)
def __init__(self, mtype=None, mid=None, code=EMPTY, payload='', token=''): if payload is None: raise TypeError( "Payload must not be None. Use empty string instead.") self.version = 1 self.mtype = mtype self.code = code self.mid = mid self.token = token self.opt = option.Options() self.payload = payload self.remote = None self.timeout = MAX_TRANSMIT_WAIT
def setup_options_and_constraints(self): options = {'sterics': 1} self.options = option.Options(options) self.constraints = {}
import pandas as pd import option as opt import os import numpy as np from keras.preprocessing.text import Tokenizer from keras import preprocessing op = opt.Options() tokenizer = Tokenizer(num_words=1000) def get_word_index(): # 단어 인덱스를 구축 for idx, dirnames in enumerate(op.clean_data_dir): for filename in os.listdir(dirnames): infilename = dirnames + "/" + filename news = pd.read_csv(infilename, header=None, encoding="euc-kr") tokenizer.fit_on_texts(news.iloc[:, 0]) #print("각 단어의 인덱스: \n", tokenizer.word_index) return tokenizer def make_onehot(t, data): # 원-핫 이진 벡터 표현 one_hot_results = t.texts_to_matrix(data) return one_hot_results def make_word_seq(t, data): word_seq_results = t.texts_to_sequences(data) word_seq_results = preprocessing.sequence.pad_sequences(word_seq_results, maxlen=op.max_len) return word_seq_results