def __init__(self, query): #Importing all dicts from conf file conf = shelve.open('conf') self.relations = conf['relations'] self.attr_relations = conf['attr_relations'] self.replace_attr = conf['replace_attr'] self.syn_attr = conf['syn_attr'] self.syn_common = conf['syn_common'] self.common_attr = conf['common_attr'] self.relations_attr = conf['relations_attr'] self.replace_contractions = conf['replace_contractions'] self.replace_operators = conf['replace_operators'] self.operator_list = conf['operator_list'] self.ant_operators = conf['ant_operators'] self.syn_aggregate = conf['syn_aggregate'] self.aggregate_list = conf['aggregate_list'] self.proper_nouns = conf['proper_nouns'] #Original Query self.original_query = query self.lowercase_query = self.original_query.lower() #Stop words list init = Initialization() self.stop_words = init.initializeStopWords()
def __init__ (self, query): #Importing all dicts from conf file conf = shelve.open('conf') self.relations = conf['relations'] self.attr_relations = conf['attr_relations'] self.replace_attr = conf['replace_attr'] self.syn_attr = conf['syn_attr'] self.syn_common = conf['syn_common'] self.common_attr = conf['common_attr'] self.relations_attr = conf['relations_attr'] self.replace_contractions = conf['replace_contractions'] self.replace_operators = conf['replace_operators'] self.operator_list = conf['operator_list'] self.ant_operators = conf['ant_operators'] self.syn_aggregate = conf['syn_aggregate'] self.aggregate_list = conf['aggregate_list'] self.proper_nouns = conf['proper_nouns'] #Original Query self.original_query = query self.lowercase_query = self.original_query.lower() #Stop words list init = Initialization() self.stop_words = init.initializeStopWords()
def configure(self): with open('Configurations/config.json') as base_json_file: base_data = json.load(base_json_file) config_file = base_data['default'] for elem in base_data['aircraft']: aircraft_contains = elem['aircraft_contains'] file = elem['file'] if aircraft_contains in str(self._aircraft): config_file = file self._configure_additional_simvars(base_data) if 'automatic_layer_revert' in base_data: GlobalStorage().active_layer_changer.enable_layer_revert_timer( base_data['automatic_layer_revert']) config_file = 'Configurations/' + config_file # Add folder prefix print("Loading config file:", config_file) with open(config_file) as json_file: data = json.load(json_file) self._configure_encoders(data['encoders']) self._configure_buttons(data['buttons']) self._configure_faders(data['faders']) self._configure_triggers(data['triggers']) Initialization(data.get('initialization', None))
from flask import Flask, request, abort import requests import re import random import configparser import urllib3 from bs4 import BeautifulSoup from initialization import Initialization '''import linebot sdk''' from linebot import (LineBotApi, WebhookHandler) from linebot.exceptions import (InvalidSignatureError) from linebot.models import * urllib3.disable_warnings() # initial Line Api Handler and Webhook. _initialization = Initialization() handler = _initialization.handler line_bot_api = _initialization.line_bot_api website_config = configparser.ConfigParser() website_config.read("CrawlingSites.ini") websites = website_config['TARGET_URL'] ReStart_Counter = 0 app = Flask(__name__) """ Define Fixed Reply """ REPLY_OK = "OK" REPLY_FAIL = "SYSTEM_FAIL"
def __init__(self, layers, actvtn_types, initn_weight_type, initn_bias_type, cost_type, reg_type, reg_lambda, traing_type, epochs, batch_size, learng_eta, groups, traing_x_list, traing_y_list, evaltn_x_list, evaltn_y_list, input_log, traing_qtts_log, traing_acts_log, traing_cost_log, evaltn_cost_log): ''' NN features ''' self.layers = layers self.actvtn_types = actvtn_types self.initn_weight_type = initn_weight_type self.initn_bias_type = initn_bias_type self.cost_type = cost_type self.reg_type = reg_type self.reg_lambda = reg_lambda ''' groups ''' self.groups = groups ''' training features ''' self.traing_type = traing_type self.epochs = epochs self.batch_size = batch_size self.learng_eta = learng_eta ''' training data ''' self.traing_x = np.array(traing_x_list) self.traing_y = np.array(traing_y_list) ''' evaluation data ''' self.evaltn_x = np.array(evaltn_x_list) self.evaltn_y = np.array(evaltn_y_list) ''' model : list of arrays ''' self.l_number = len(self.layers) self.l_sizes = [] self.l_weights = [] self.l_biases = [] self.l_actvtns = [] for l_ix in range(0, self.l_number): self.l_sizes.append(len(self.layers[l_ix])) self.l_weights.append(np.array([])) self.l_biases.append(np.array([])) self.l_actvtns.append(np.array([])) # weights and biases initialization for l_ix in range(1, self.l_number): w_size = (self.l_sizes[l_ix-1], self.l_sizes[l_ix]) b_size = (self.l_sizes[l_ix]) self.l_weights[l_ix] = ii.set_weights(self.initn_weight_type, w_size) / len(self.traing_x) # scaling self.l_biases[l_ix] = ii.set_biases(self.initn_bias_type, b_size) ''' monitor features ''' self.input_log = input_log self.traing_qtts_log = traing_qtts_log self.traing_acts_log = traing_acts_log self.traing_cost_log = traing_cost_log self.evaltn_cost_log = evaltn_cost_log self.monitor = mm(self) ''' show input ''' if self.input_log: if self.traing_x[0].shape == (2,): self.monitor.show_2d_input_map()
def tokenize(self): self.words = WordPunctTokenizer().tokenize(self.lowercase_query) init = Initialization() self.punct_list = init.initializePunctList() self.alpha_list = list(string.ascii_lowercase)