示例#1
0
def main(_):
    BSN=7
    learn_rate = 0.005
    batchsize = 1024
    epoch = 1000
    opt = Adam(lr=learn_rate)#, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False    
    opt1 = SGD(lr=learn_rate)#,decay=learn_rate / epoch)
    opt2=RMSprop(lr=learn_rate, rho=0.9, epsilon=None, decay=0.0)
    opt3=Adadelta(lr=learn_rate, rho=0.95, epsilon=None, decay=0.0)
    opt4=Nadam(lr=learn_rate, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.00004)

    trainX,trainY = init_data.load_data()
    print(np.shape(trainX))
    input = Input(shape=(14,30,1))
    #input = Input(shape=(6,5,60,1))
    predict = model_art.pred_step1(input)
    model = Model(inputs=input, outputs=predict)
    model.compile(optimizer=opt, loss=mse1, metrics=['accuracy',accuracy1])
    #model.load_weights('savemodel/64train1+1000.hdf5')
    model.summary()
    checkpoint = ModelCheckpoint('savemodel/step1+{epoch:02d}.hdf5',monitor='val_loss',verbose=1,
                                 save_weights_only=True, save_best_only=False, period=200)
    # checkpoint = ModelCheckpoint('savemodel/cnnt+{epoch:02d}.hdf5',monitor='val_loss',verbose=1,
    #                              save_weights_only=True, save_best_only=False, period=10)
    earlystop = EarlyStopping(patience=10, verbose=1)
    tensorboard = TensorBoard(write_graph=True)

    res = model.fit(trainX, trainY, steps_per_epoch=80000//5*4 //batchsize,epochs =epoch,callbacks=[checkpoint],validation_split=0.2,validation_steps=80000//5 //batchsize)
    loss_h=res.history['loss']
    np_loss=np.array(loss_h)
    np.savetxt('txt220.txt',np_loss)
def train():
    log_dir = "logs\\fit\\" + datetime.now().strftime("%Y%m%d-%H%M%S")
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

    vocab, labels, training, output = init_data.load_data()
    len_x = math.floor(len(training) * 0.8)
    len_y = math.floor(len(output) * 0.8)
    print(len_x)
    print(len_y)
    x_train, y_train = training[:len_x], output[:len_y]
    x_test, y_test = training[len_x:], output[len_y:]

    print(training[0])
    print(output[0])

    # Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons
    # equal to number of intents to predict output intent with softmax
    model = tf.keras.models.Sequential()
    model.add(
        tf.keras.layers.Dense(128,
                              input_shape=(len(training[0]), ),
                              activation='relu'))
    model.add(tf.keras.layers.Dropout(0.5))
    model.add(tf.keras.layers.Dense(64, activation='relu'))
    model.add(tf.keras.layers.Dropout(0.5))
    model.add(tf.keras.layers.Dense(len(output[0]), activation='softmax'))

    sgd = tf.keras.optimizers.SGD(lr=0.01,
                                  decay=1e-6,
                                  momentum=0.9,
                                  nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    hist = model.fit(
        np.array(x_train),
        np.array(y_train),
        epochs=1000,
        batch_size=8,
        verbose=1,
        validation_data=(np.array(x_test), np.array(y_test)),
        callbacks=[tensorboard_callback],
    )
    print("Average test loss: ", np.average(hist.history['loss']))

    model.save("chatbot_model.h5", hist)
def data_split():
    """
    data_split函数的主要作用是将原始数据分为训练数据和测试数据,其中训练数据和测试数据的比例为2:1
    """
    x_data,y_data = load_data()
    x_training = []
    x_test= []
    y_training = []
    y_test = []
    for i in range(len(x_data)):
        if random.random() > 0.67:
            x_training.append(x_data[i])
            y_training.append(y_data[i])
        else:
            x_test.append(x_data[i])
            y_test.append(y_data[i])
    x_training = np.array(x_training)
    y_training = np.array(y_training)
    x_test = np.array(x_test)
    y_test = np.array(y_test)
    return (x_training,x_test,y_training,y_test)
示例#4
0
from nltk.tokenize import word_tokenize
import json
import random
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
import tensorflow as tf
import numpy as np
import nltk
from nltk.stem import LancasterStemmer
import os
import argparse
import chatbot_train
stemmer = LancasterStemmer()

vocab, labels, training, output = init_data.load_data()


def bag_of_words(sentence, vocab):
    bag = [0 for _ in range(len(vocab))]
    s_words = nltk.word_tokenize(sentence)
    s_words = [stemmer.stem(word.lower()) for word in s_words]
    for se in s_words:
        for i, w in enumerate(vocab):
            if w == se:
                bag[i] = 1
    # print(bag)
    return np.array(bag)

# picks the highest probability and returns the index.
#!/usr/bin/env python
#_*_ coding:utf-8 _*_

#通过sklearn库中所提供的关于k-近邻算法相关的包来实现对鸢尾花数据集的建模与预测

from init_data import load_data
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split

x_data, y_data = load_data()  #获取数据
x_training, x_test, y_training, y_test = train_test_split(
    x_data, y_data, random_state=10)  #将数据分为训练集和测试集
estimotor = KNeighborsClassifier()  #构造k-近邻分类器
estimotor.fit(x_training, y_training)  #训练模型
y_predicted = estimotor.predict(x_test)  #用训练的模型进行预测
accuracy = np.mean(y_test == y_predicted) * 100  #计算预测结果的准确率
print('The accuracy is {0:1f}%'.format(accuracy))