示例#1
0
    def __init__(self):
        print("Init")
        if self.instance is not None:
            raise ValueError("Bot fonksiyonunu çağır mayı unuttn? ")

        self.stemmer = LancasterStemmer()
        data = pickle.load(open(path.getPath('trained_data'), "rb"))
        self.words = data['words']
        self.classes = data['classes']
        train_x = data['train_x']
        train_y = data['train_y']
        with open(path.getJsonPath()) as json_data:
            self.intents = json.load(json_data)
        net = tflearn.input_data(shape=[None, len(train_x[0])])
        net = tflearn.fully_connected(net, 8)
        net = tflearn.fully_connected(net, 8)
        net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
        net = tflearn.regression(net)
        self.model = tflearn.DNN(net, tensorboard_dir=path.getPath('train_logs'))
        self.model.load(path.getPath('model.tflearn'))
示例#2
0
文件: train.py 项目: yurekliisa/BBC
import nltk
from nltk.stem.lancaster import LancasterStemmer

import numpy as np
import tensorflow as tf
import tflearn
import random
import pickle

from Bot import path
import json

stemmer = LancasterStemmer()
with open(path.getJsonPath()) as json_data:
    intents = json.load(json_data)

words = []
classes = []
documents = []
ignore_words = ['?']
for intent in intents['intents']:  #Tümsözcüklerde dolaş
    for pattern in intent['patterns']:
        w = nltk.word_tokenize(pattern)  #nltk tüm patternleri tokineze edecek
        words.extend(w)
        documents.append((w, intent['tag']))  #steming kısmı
        if intent['tag'] not in classes:
            classes.append(intent['tag'])

words = [
    stemmer.stem(w.lower()) for w in words if w not in ignore_words
]  #Burda tüm hepsini küçük harfe dönüştürdük çünkü  karşılaştırma yapıldıgında büyük küçük harf farkı olmasn