Example #1
0
    def train(self):
        """ Start training """
        # 1: build a list of image filenames
        self.build_image_filenames_list()

        # 2: use list information to init our numpy variables
        self.init_np_variables()

        # 3: Add images to our Tensorflow dataset
        self.add_tf_dataset(self.list_cow_files, 0)
        self.add_tf_dataset(self.list_noncow_files, 1)

        # 4: Process TF dataset
        self.process_tf_dataset()

        # 5: Setup image preprocessing
        self.setup_image_preprocessing()

        # 6: Setup network structure
        self.setup_nn_network()

        # 7: Train our deep neural network
        tf_model = DNN(self.tf_network, tensorboard_verbose=3,
                       checkpoint_path='model_cows.tfl.ckpt')

        tf_model.fit(self.tf_x, self.tf_y, n_epoch=100, shuffle=True,
                     validation_set=(self.tf_x_test, self.tf_y_test),
                     show_metric=True, batch_size=96,
                     snapshot_epoch=True,
                     run_id='model_cows')

        # 8: Save model
        tf_model.save('model_cows.tflearn')
    def train(
        self,
        X_train,
        Y_train,
        X_val,
        Y_val
    ):

        with tf.Graph().as_default():
            print("Building Model...........")
            network = build_CNN()
            model = DNN(
                network,
                tensorboard_dir="path_to_logs",
                tensorboard_verbose=0,
                checkpoint_path="path_to_checkpoints",
                max_checkpoints=1
            )

            if self.is_training:
                # Training phase

                print("start training...")
                print("  - emotions = {}".format(7))
                print("  - optimizer = '{}'".format(self.optimizer))
                print("  - learning_rate = {}".format(0.016))
                print("  - learning_rate_decay = {}".format(self.learning_rate_decay))
                print("  - otimizer_param ({}) = {}".format(self.optimizer, self.optimizer_param))
                print("  - Dropout = {}".format(self.dropout))
                print("  - epochs = {}".format(self.epochs))

            start_time = time.time()
            model.fit(

                {'input': X_train.reshape(-1, 48, 48, 1)},

                {'output': Y_train},

                validation_set=(
                    {'input': X_val.reshape(-1, 48, 48, 1)},

                    {'output': Y_val},
                ),
                batch_size=128,
                n_epoch=10,
                show_metric=True,
                snapshot_step=100

            )

            training_time = time.time() - start_time
            print("training time = {0:.1f} sec".format(training_time))
            print("saving model...")
            model.save("saved_model.bin")
Example #3
0
def tflearn_cifar():
    """
    图像分类
    :return:
    """

    (X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
    X_train, Y_train = shuffle(X_train, Y_train)
    Y_train = to_categorical(Y_train, nb_classes=10)
    Y_test = to_categorical(Y_test, nb_classes=10)

    # 对数据集进行零中心化(即对整个数据集计算平均值),同时进行 STD 标准化(即对整个数据集计算标准差)
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # 通过随机左右翻转和随机旋转来增强数据集
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    # 定义模型
    network = input_data(shape=(None, 32, 32, 3),
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation="relu")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation="relu")
    network = conv_2d(network, 64, 3, activation="relu")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation="relu")
    network = dropout(network, 0.5)
    network = fully_connected(network, 10, activation="softmax")
    network = regression(network,
                         optimizer="adam",
                         loss="categorical_crossentropy",
                         learning_rate=0.001)

    # 训练模型
    model = DNN(network, tensorboard_verbose=0)
    model.fit(X_train,
              Y_train,
              n_epoch=50,
              shuffle=True,
              validation_set=(X_test, Y_test),
              show_metric=True,
              batch_size=96,
              run_id="cifar10_cnn")
Example #4
0
File: ready.py Project: matbur/inz
def use_tflearn(x_train, y_train, x_test, y_test):
    net = input_data(shape=[None, x_train.shape[1]], name='input')
    net = fully_connected(net, 24, activation='sigmoid', bias_init='normal')
    net = fully_connected(net, 16, activation='sigmoid', bias_init='normal')
    net = fully_connected(net, 12, activation='sigmoid', bias_init='normal')
    net = fully_connected(net, 8, activation='sigmoid', bias_init='normal')
    net = regression(net)
    model = DNN(net,
                tensorboard_dir=TENSORBOARD_DIR.as_posix(),
                tensorboard_verbose=3,
                best_checkpoint_path=CHECKPOINT_PATH.as_posix())
    model.fit(x_train, y_train,
              validation_set=(x_test, y_test),
              n_epoch=100,
              batch_size=10,
              show_metric=True,
              run_id='DNN-4f')
    model.save(MODEL_FILE.as_posix())
    return model
Example #5
0
# -*- coding: utf-8 -*-
"""
Source : https://towardsdatascience.com/tflearn-soving-xor-with-a-2x2x1-feed-forward-neural-network-6c07d88689ed
"""
from tflearn import DNN
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression

#Training examples
X = [[0,0], [0,1], [1,0], [1,1]]
Y = [[0], [1], [1], [0]]

input_layer = input_data(shape=[None, 2]) #input layer of size 2
hidden_layer = fully_connected(input_layer , 2, activation='tanh') #hidden layer of size 2
output_layer = fully_connected(hidden_layer, 1, activation='tanh') #output layer of size 1

#use Stohastic Gradient Descent and Binary Crossentropy as loss function
regression = regression(output_layer , optimizer='sgd', loss='binary_crossentropy', learning_rate=5)
model = DNN(regression)

#fit the model
model.fit(X, Y, n_epoch=5000, show_metric=True);

#predict all examples
print ('Expected:  ', [i[0] > 0 for i in Y])
print ('Predicted: ', [i[0] > 0 for i in model.predict(X)])

model.get_weights(hidden_layer.W)
model.get_weights(output_layer.W)

model.save("tflearn-xor")
Example #6
0
                 loss='binary_crossentropy',
                 learning_rate=0.005)
print("After regression : ", net.get_shape().as_list())

testX = trainX[int(0.3 * len(trainY)):]
testY = trainY[int(0.3 * len(trainY)):]

# Training
model = DNN(net, clip_gradients=0., tensorboard_verbose=2)
embeddingWeights = get_layer_variables_by_name('EmbeddingLayer')[0]
# Assign your own weights (for example, a numpy array [input_dim, output_dim])
model.set_weights(embeddingWeights, embeddings)
model.fit(trainX,
          trainY,
          n_epoch=3,
          validation_set=0.1,
          show_metric=True,
          batch_size=32,
          shuffle=True)
#print( model.evaluate(testX, testY) )
predictions = model.predict(testX)
predictions = prob2Onehot(predictions)
#print("Predictions : ", list(predictions[10]))

##Calculate F1 Score
tp = 0
tn = 0
fp = 0
fn = 0
for i in range(predictions.shape[0]):
    if list(testY[i]) == [1, 0]:
def train(optimizer=HYPERPARAMS.optimizer,
          optimizer_param=HYPERPARAMS.optimizer_param,
          learning_rate=HYPERPARAMS.learning_rate,
          keep_prob=HYPERPARAMS.keep_prob,
          learning_rate_decay=HYPERPARAMS.learning_rate_decay,
          decay_step=HYPERPARAMS.decay_step,
          train_model=True):

    print "loading dataset " + DATASET.name + "..."
    if train_model:
        data, validation = load_data(validation=True)
    else:
        data, validation, test = load_data(validation=True, test=True)

    with tf.Graph().as_default():
        print "building model..."
        network = build_model(optimizer, optimizer_param, learning_rate,
                              keep_prob, learning_rate_decay, decay_step)
        model = DNN(network,
                    tensorboard_dir=TRAINING.logs_dir,
                    tensorboard_verbose=0,
                    checkpoint_path=TRAINING.checkpoint_dir,
                    max_checkpoints=TRAINING.max_checkpoints)

        #tflearn.config.init_graph(seed=None, log_device=False, num_cores=6)

        if train_model:
            # Training phase
            print "start training..."
            print "  - emotions = {}".format(NETWORK.output_size)
            print "  - optimizer = '{}'".format(optimizer)
            print "  - learning_rate = {}".format(learning_rate)
            print "  - learning_rate_decay = {}".format(learning_rate_decay)
            print "  - otimizer_param ({}) = {}".format(
                'beta1' if optimizer == 'adam' else 'momentum',
                optimizer_param)
            print "  - keep_prob = {}".format(keep_prob)
            print "  - epochs = {}".format(TRAINING.epochs)
            print "  - use landmarks = {}".format(NETWORK.use_landmarks)
            print "  - use hog + landmarks = {}".format(
                NETWORK.use_hog_and_landmarks)
            print "  - use hog sliding window + landmarks = {}".format(
                NETWORK.use_hog_sliding_window_and_landmarks)
            print "  - use batchnorm after conv = {}".format(
                NETWORK.use_batchnorm_after_conv_layers)
            print "  - use batchnorm after fc = {}".format(
                NETWORK.use_batchnorm_after_fully_connected_layers)

            start_time = time.time()
            if NETWORK.use_landmarks:
                model.fit([data['X'], data['X2']],
                          data['Y'],
                          validation_set=([validation['X'],
                                           validation['X2']], validation['Y']),
                          snapshot_step=TRAINING.snapshot_step,
                          show_metric=TRAINING.vizualize,
                          batch_size=TRAINING.batch_size,
                          n_epoch=TRAINING.epochs)
            else:
                model.fit(data['X'],
                          data['Y'],
                          validation_set=(validation['X'], validation['Y']),
                          snapshot_step=TRAINING.snapshot_step,
                          show_metric=TRAINING.vizualize,
                          batch_size=TRAINING.batch_size,
                          n_epoch=TRAINING.epochs)
                validation['X2'] = None
            training_time = time.time() - start_time
            print "training time = {0:.1f} sec".format(training_time)

            if TRAINING.save_model:
                print "saving model..."
                model.save(TRAINING.save_model_path)
                if not(os.path.isfile(TRAINING.save_model_path)) and \
                        os.path.isfile(TRAINING.save_model_path + ".meta"):
                    os.rename(TRAINING.save_model_path + ".meta",
                              TRAINING.save_model_path)

            print "evaluating..."
            validation_accuracy = evaluate(model, validation['X'],
                                           validation['X2'], validation['Y'])
            print "  - validation accuracy = {0:.1f}".format(
                validation_accuracy * 100)
            return validation_accuracy
        else:
            # Testing phase : load saved model and evaluate on test dataset
            print "start evaluation..."
            print "loading pretrained model..."
            if os.path.isfile(TRAINING.save_model_path):
                model.load(TRAINING.save_model_path)
            else:
                print "Error: file '{}' not found".format(
                    TRAINING.save_model_path)
                exit()

            if not NETWORK.use_landmarks:
                validation['X2'] = None
                test['X2'] = None

            print "--"
            print "Validation samples: {}".format(len(validation['Y']))
            print "Test samples: {}".format(len(test['Y']))
            print "--"
            print "evaluating..."
            start_time = time.time()
            validation_accuracy = evaluate(model, validation['X'],
                                           validation['X2'], validation['Y'])
            print "  - validation accuracy = {0:.1f}".format(
                validation_accuracy * 100)
            test_accuracy = evaluate(model, test['X'], test['X2'], test['Y'])
            print "  - test accuracy = {0:.1f}".format(test_accuracy * 100)
            print "  - evalution time = {0:.1f} sec".format(time.time() -
                                                            start_time)
            return test_accuracy
def tflearn_OneClass_NN_linear(data_train, data_test, labels_train):

    X = data_train
    Y = labels_train

    D = X.shape[1]

    No_of_inputNodes = X.shape[1]

    # Clear all the graph variables created in previous run and start fresh
    tf.reset_default_graph()

    # Define the network
    input_layer = input_data(shape=[None,
                                    No_of_inputNodes])  # input layer of size

    np.random.seed(42)
    theta0 = np.random.normal(0, 1, K + K * D + 1) * 0.0001
    #theta0 = np.random.normal(0, 1, K + K*D + 1) # For linear
    hidden_layer = fully_connected(
        input_layer,
        4,
        bias=False,
        activation='linear',
        name="hiddenLayer_Weights",
        weights_init="normal")  # hidden layer of size 2

    output_layer = fully_connected(
        hidden_layer,
        1,
        bias=False,
        activation='linear',
        name="outputLayer_Weights",
        weights_init="normal")  # output layer of size 1

    # Initialize rho
    value = 0.01
    init = tf.constant_initializer(value)
    rho = va.variable(name='rho', dtype=tf.float32, shape=[], initializer=init)

    rcomputed = []
    auc = []

    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    # print sess.run(tflearn.get_training_mode()) #False
    tflearn.is_training(True, session=sess)
    print sess.run(tflearn.get_training_mode())  #now True

    temp = theta0[-1]

    oneClassNN_Net = oneClassNN(output_layer,
                                v,
                                rho,
                                hidden_layer,
                                output_layer,
                                optimizer='sgd',
                                loss='OneClassNN_Loss',
                                learning_rate=1)

    model = DNN(oneClassNN_Net, tensorboard_verbose=3)

    model.set_weights(output_layer.W, theta0[0:K][:, np.newaxis])
    model.set_weights(hidden_layer.W, np.reshape(theta0[K:K + K * D], (D, K)))

    iterStep = 0
    while (iterStep < 100):
        print "Running Iteration :", iterStep
        # Call the cost function
        y_pred = model.predict(data_train)  # Apply some ops
        tflearn.is_training(False, session=sess)
        y_pred_test = model.predict(data_test)  # Apply some ops
        tflearn.is_training(True, session=sess)
        value = np.percentile(y_pred, v * 100)
        tflearn.variables.set_value(rho, value, session=sess)
        rStar = rho
        model.fit(X, Y, n_epoch=2, show_metric=True, batch_size=100)
        iterStep = iterStep + 1
        rcomputed.append(rho)
        temp = tflearn.variables.get_value(rho, session=sess)

    # print "Rho",temp
    # print "y_pred",y_pred
    # print "y_predTest", y_pred_test

    # g = lambda x: x
    g = lambda x: 1 / (1 + tf.exp(-x))

    def nnScore(X, w, V, g):
        return tf.matmul(g((tf.matmul(X, w))), V)

    # Format the datatype to suite the computation of nnscore
    X = X.astype(np.float32)
    X_test = data_test
    X_test = X_test.astype(np.float32)
    # assign the learnt weights
    # wStar = hidden_layer.W
    # VStar = output_layer.W
    # Get weights values of fc2
    wStar = model.get_weights(hidden_layer.W)
    VStar = model.get_weights(output_layer.W)

    # print "Hideen",wStar
    # print VStar

    train = nnScore(X, wStar, VStar, g)
    test = nnScore(X_test, wStar, VStar, g)

    # Access the value inside the train and test for plotting
    # Create a new session and run the example
    # sess = tf.Session()
    # sess.run(tf.initialize_all_variables())
    arrayTrain = train.eval(session=sess)
    arrayTest = test.eval(session=sess)

    # print "Train Array:",arrayTrain
    # print "Test Array:",arrayTest

    # plt.hist(arrayTrain-temp,  bins = 25,label='Normal');
    # plt.hist(arrayTest-temp, bins = 25, label='Anomalies');
    # plt.legend(loc='upper right')
    # plt.title('r = %1.6f- Sigmoid Activation ' % temp)
    # plt.show()

    pos_decisionScore = arrayTrain - temp
    neg_decisionScore = arrayTest - temp

    return [pos_decisionScore, neg_decisionScore]
'''

# This is the set of possible values to feed to XOR
x_train = [[0, 0], [0, 1], [1, 0], [1, 1]]

# Given the values fed, this is the expected output
y_train = [[0], [1], [1], [0]]

# Let's define our DNN.  We've got one input layer with a single node,
# one hidden layer with two nodes, and one output layer with a single node..
input_layer = input_data(shape=[None, 2])
hidden_layer = fully_connected(input_layer, 2, activation='tanh')
output_layer = fully_connected(hidden_layer, 1, activation='tanh')

# Let's define our regression activation function for output layer.
# we define the optimizing function to be stochastic gradient descent
# we define our loss function as binary cross entropy.  We define
# our learning rate to be 5.
regression = regression(output_layer,
                        optimizer='sgd',
                        loss='binary_crossentropy',
                        learning_rate=1)
model = DNN(regression)

# Now we train the model.
model.fit(x_train, y_train, n_epoch=5000, show_metric=True)

# Let's see how it worked.
print('Expected: ', [i[0] > 0 for i in y_train])
print('Predicted: ', [i[0] > 0 for i in model.predict(x_train)])
Example #10
0
# -*- coding: utf-8 -*-
import datetime

from tflearn import DNN
from tflearn.layers.core import fully_connected
from tflearn.layers.core import input_data
from tflearn.layers.estimator import regression

date = str(datetime.datetime.now()).split(".")[0][-8:]

X = [[0, 0], [0, 1], [1, 0], [1, 1]]
Y = [[0], [1], [1], [0]]

input_layer = input_data(shape=[None, 2])
hidden_layer = fully_connected(input_layer, 2, activation="tanh")
output_layer = fully_connected(hidden_layer, 1, activation="tanh")

regression = regression(
    output_layer, optimizer="sgd", loss="binary_crossentropy", learning_rate=5
)
model = DNN(regression, tensorboard_verbose=3)

model.fit(X, Y, n_epoch=2000, show_metric=True, run_id=date)

print([i[0] > 0 for i in model.predict(X)])
Example #11
0
model.set_weights(output_layer.W, theta0[0:K][:, np.newaxis])
model.set_weights(hidden_layer.W, np.reshape(theta0[K:K + K * D], (D, K)))

iterStep = 0
while (iterStep < 100):
    print "Running Iteration :", iterStep
    # Call the cost function
    y_pred = model.predict(data_train)  # Apply some ops
    tflearn.is_training(False, session=sess)
    y_pred_test = model.predict(data_test)  # Apply some ops
    tflearn.is_training(True, session=sess)
    value = np.percentile(y_pred, v * 100)
    tflearn.variables.set_value(rho, value, session=sess)
    rStar = rho
    model.fit(X, Y, n_epoch=2, show_metric=True, batch_size=100)
    iterStep = iterStep + 1
    rcomputed.append(rho)
    temp = tflearn.variables.get_value(rho, session=sess)

    # print "Rho",temp
    # print "y_pred",y_pred
    # print "y_predTest", y_pred_test

# g = lambda x: x
g = lambda x: 1 / (1 + tf.exp(-x))


def nnScore(X, w, V, g):
    return tf.matmul(g((tf.matmul(X, w))), V)
from tflearn.layers.core import input_data, fully_connected
from tflearn.layers.estimator import regression

from PyGameSnakeNN import retro_snake

# Create the Network
network = input_data(shape=[None, 5, 1], name='input')
network = fully_connected(network, 25, activation='relu')
network = fully_connected(network, 1, activation='linear')
network = regression(network, optimizer='adam', learning_rate=1e-2, loss='mean_square', name='target')

# Create the model
model = DNN(network, tensorboard_dir='log')

# Load training data
X = np.array([i[0] for i in x]).reshape(-1, 5, 1)
Y = np.array([i[0] for i in y]).reshape(-1, 1)

# train the NN
NN_filename = "model.h5"
model.fit(X, Y, n_epoch=3, shuffle=True, run_id=NN_filename)

# load trained model
model.load("model.h5", weights_only=True)

# Let the NN play a game
s2 = retro_snake(gui=True) # retro_snake is a class which contains the Snake game
input_vect, output_vect = s2.play(testNN=True, _model=model) # Play method runs the game with trained NN model


Example #13
0
model = conv_2d(model, 60, 5, activation='relu')
model = max_pool_2d(model, 5)

model = fully_connected(model, 1000, activation='relu')
model = dropout(model, 0.9)

model = fully_connected(model, 2, activation='softmax')
model = regression(model, loss='categorical_crossentropy', name='targets')
model = DNN(model)

# model.load('ai')
#
# print("Ready")
# while True:
#     num = input()
#     image = cv2.imread('test1/{}.jpg'.format(num))
#     image = cv2.resize(image, (150, 150))
#     image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
#     image = np.float16(np.abs(np.subtract(np.divide(image, 255), 1)))
#     X = np.array(image).reshape(-1, 150, 150, 1)
#     print(model.predict(X))
print("Begin training...")
start = time.time()
model.fit(X,
          Y,
          n_epoch=10,
          show_metric=True,
          shuffle=True,
          validation_set=0.15)
print("Total time taken:", time.time() - start, 'seconds')
Example #14
0
from tflearn import fully_connected, input_data, regression, DNN
from tflearn.datasets import mnist
import time

trainX, trainY, testX, testY = mnist.load_data(one_hot=True)

model = input_data([None, 784])
model = fully_connected(model, 200, activation='relu')
model = fully_connected(model, 10, activation='softmax')
model = regression(model, optimizer='adam', loss='categorical_crossentropy')
model = DNN(model)

start = time.time()
model.fit(trainX,
          trainY,
          n_epoch=5,
          validation_set=(testX, testY),
          show_metric=True)
print("Total time taken:", time.time() - start, 'seconds')
Example #15
0
class Bot:
    def __init__(self):
        self.words = []
        self.labels = []
        self.docs_x = []
        self.docs_y = []
        self.stemmer = LancasterStemmer()
        self.data = []
        self.training = []
        self.output = []
        self.out_empty=[]
        self.model=[]
        self.count=-1
        self.say=""
        self.Network=Network()

    def read(self):
        with open("src/models/intents.json") as f:
            self.data=load(f)
    def dump(self):
        with open("src/models/data.pickle", "wb") as f:
            dump((self.words, self.labels, self.training, self.output), f)
    def stem(self):
        for intent in self.data["intents"]:
            for pattern in intent["patterns"]:
                wrds = word_tokenize(pattern)
                self.words.extend(wrds)
                self.docs_x.append(wrds)
                self.docs_y.append(intent["tag"])

            if intent["tag"] not in self.labels:
                self.labels.append(intent["tag"])

        self.words = [self.stemmer.stem(w.lower()) for w in self.words if w != "?"]
        self.words = sorted(list(set(self.words)))
        self.labels = sorted(self.labels)
    def modelsetup(self):
        self.out_empty = [0 for _ in range(len(self.labels))]

        for x, doc in enumerate(self.docs_x):
            bag = []

            wrds = [self.stemmer.stem(w.lower()) for w in doc]

            for w in self.words:
                if w in wrds:
                    bag.append(1)
                else:
                    bag.append(0)

            output_row = self.out_empty[:]
            output_row[self.labels.index(self.docs_y[x])] = 1
            self.training.append(bag)
            self.output.append(output_row)

        self.training = array(self.training)
        self.output = array(self.output)
        self.dump()

    def setup(self):
        ops.reset_default_graph()
        net = input_data(shape=[None, len(self.training[0])])
        net = fully_connected(net, 10)
        net = fully_connected(net, 10)
        net = fully_connected(net, len(self.output[0]), activation="softmax")
        net = regression(net)
        self.model = DNN(net)
        if exists("src/models/model.tflearn.index"):
            self.model.load("src/models/model.tflearn")
        else:
            self.model.fit(self.training, self.output, n_epoch=1000, batch_size=8, show_metric=True)
            self.model.save("src/models/model.tflearn")
    def indexWord(self,x,word):
        x=x.split(" ")
        ch=""
        for i in x:
            if i.find(word)!=-1:
                ch=i
        return ch
    def bag_of_words(self,s, words):
        bag = [0 for _ in range(len(words))]
        translate=[]
        s_words = word_tokenize(s)
        s_words = [self.stemmer.stem(word.lower()) for word in s_words]

        for se in s_words:
            for i, w in enumerate(words):
                if w == se:
                    bag[i] = 1
                if se not in words and se not in translate:
                    translate.append(se)

        return array(bag),translate
    def chat(self,x,ui):
        try:
            self.count+=1
            predinp,translate=self.bag_of_words(x, self.words)
            if translate:
                translate=self.indexWord(str(x),translate[0])
                print(translate)
            results = self.model.predict([predinp])
            results_index = argmax(results)
            tag = self.labels[results_index]
        except Exception as e:
            print(e)
        try:
            if results[0][results_index] > 0.4:
                for tg in self.data["intents"]:
                    if tg['tag'] == tag:
                        responses = tg['responses']
                self.say=choice(responses)
                if self.say=="Looking up":
                    self.say=self.Network.Connect(translate.upper())
                    ui.textEdit.setText(self.say)
                else:
                    ui.textEdit.setText(self.say)
            else:
                self.say="Sorry i can't understand i am still learning try again."
                ui.textEdit.setText(self.say)
        except Exception as e:
            print(e)