Пример #1
0
    def classify(self, sentence, model_output: tflearn.DNN, user_id="default") -> list:
        results = model_output.predict([self.bow(sentence, self.words)])[0]
        results = [[i, r] for i, r in enumerate(results) if r > self.ERROR_THRESHOLD]
        results.sort(key=lambda x: x[1], reverse=True)

        return_list = []
        for r in results:
            return_list.append((self.classes[r[0]], r[1]))

        # array is bad leaning
        msgError = []
        for n in return_list:
            err = np.array(n[1])
            if err < 0.50:
                msgError.append(err)

        # return length bad learning is greater than 2
        if len(msgError) > 1:
            msgError.sort(reverse=True)
            return ['Noel masih belajar nih, bisa diperjelas lagi pertanyaan nya?', 'error', '',
                    msgError[0].tolist()]

        context = {}
        for i in self.intents["intents"]:
            if i["tag"] == return_list[0][0]:
                context[user_id] = i["context_set"]

                output_var = [random.choice(i['responses']), i['tag'], context[user_id]]
                acc = return_list[0][1].tolist()
                output_var.append(acc)
                return output_var
Пример #2
0
def chat(model: tflearn.DNN, data: T.Dict[str, T.Any], words: T.List,
         labels: T.List) -> None:
    """
    Initialize a discussion with a user and respond to user input with trained model.
    """
    print("Start speaking with me! Enter Q to quit")

    while True:
        inp = input("You: ")
        if inp.lower() == "q" or inp.lower() == "Q":
            break

        results = model.predict([bag_of_words(inp, words)])
        results_index = numpy.argmax(results)
        tag = labels[results_index]

        for tg in data["intents"]:
            if tg['tag'] == tag:
                responses = tg['responses']

        print(random.choice(responses))
Пример #3
0
# -*- coding: utf-8 -*-
"""
Source : https://towardsdatascience.com/tflearn-soving-xor-with-a-2x2x1-feed-forward-neural-network-6c07d88689ed
"""
from tflearn import DNN
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression

#Training examples
X = [[0,0], [0,1], [1,0], [1,1]]
Y = [[0], [1], [1], [0]]

input_layer = input_data(shape=[None, 2]) #input layer of size 2
hidden_layer = fully_connected(input_layer , 2, activation='tanh') #hidden layer of size 2
output_layer = fully_connected(hidden_layer, 1, activation='tanh') #output layer of size 1

#use Stohastic Gradient Descent and Binary Crossentropy as loss function
regression = regression(output_layer , optimizer='sgd', loss='binary_crossentropy', learning_rate=5)
model = DNN(regression)

#fit the model
model.fit(X, Y, n_epoch=5000, show_metric=True);

#predict all examples
print ('Expected:  ', [i[0] > 0 for i in Y])
print ('Predicted: ', [i[0] > 0 for i in model.predict(X)])

model.get_weights(hidden_layer.W)
model.get_weights(output_layer.W)

model.save("tflearn-xor")
Пример #4
0
testY = trainY[int(0.3 * len(trainY)):]

# Training
model = DNN(net, clip_gradients=0., tensorboard_verbose=2)
embeddingWeights = get_layer_variables_by_name('EmbeddingLayer')[0]
# Assign your own weights (for example, a numpy array [input_dim, output_dim])
model.set_weights(embeddingWeights, embeddings)
model.fit(trainX,
          trainY,
          n_epoch=3,
          validation_set=0.1,
          show_metric=True,
          batch_size=32,
          shuffle=True)
#print( model.evaluate(testX, testY) )
predictions = model.predict(testX)
predictions = prob2Onehot(predictions)
#print("Predictions : ", list(predictions[10]))

##Calculate F1 Score
tp = 0
tn = 0
fp = 0
fn = 0
for i in range(predictions.shape[0]):
    if list(testY[i]) == [1, 0]:
        if list(predictions[i]) == [1, 0]:
            tp += 1
        else:
            fn += 1
    else:
Пример #5
0
class FireDetector:
    def __init__(self,
                 height=INPUT_HEIGHT,
                 width=INPUT_WIDTH,
                 n_channels=NUMBER_CHANNELS):
        self.height = height
        self.width = width
        self.n_channels = n_channels

        self.logger = create_logger('Fire Detector')

        self._build_network()

    def _build_network(self):
        self.logger.info('Started CNN structure construction')
        network = input_data(shape=[None, self.height, self.width, 3],
                             dtype=float32)

        network = conv_2d(network, 64, 5, strides=4, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = local_response_normalization(network)

        network = conv_2d(network, 128, 4, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = local_response_normalization(network)

        network = conv_2d(network, 256, 1, activation='relu')
        network = max_pool_2d(network, 3, strides=2)
        network = local_response_normalization(network)

        network = fully_connected(network, 4096, activation='tanh')
        network = dropout(network, 0.5)

        network = fully_connected(network, 4096, activation='tanh')
        network = dropout(network, 0.5)

        network = fully_connected(network, 2, activation='softmax')

        network = regression(network,
                             optimizer='momentum',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        self.cnn_ = DNN(network,
                        checkpoint_path='firenet',
                        max_checkpoints=1,
                        tensorboard_verbose=2)
        self.logger.info('Finished CNN structure construction')

    def load_weights(self, weights_path):
        self.logger.info('Loading weights...')
        self.cnn_.load(weights_path, weights_only=True)
        self.logger.info('Weights loaded successfully')

    def predict(self, images):
        images = self._ensure_expected_shape(images)
        predictions = self.cnn_.predict(images)
        predictions = [pred[0] for pred in predictions]
        return predictions

    def _ensure_expected_shape(self, images):
        images_reshaped = []
        expected_shape = (self.height, self.width, self.n_channels)

        for img in images:
            if img.shape != (expected_shape):
                img = reshape_image(img, self.height, self.width)
            images_reshaped.append(img)

        return images_reshaped
def tflearn_OneClass_NN_linear(data_train, data_test, labels_train):

    X = data_train
    Y = labels_train

    D = X.shape[1]

    No_of_inputNodes = X.shape[1]

    # Clear all the graph variables created in previous run and start fresh
    tf.reset_default_graph()

    # Define the network
    input_layer = input_data(shape=[None,
                                    No_of_inputNodes])  # input layer of size

    np.random.seed(42)
    theta0 = np.random.normal(0, 1, K + K * D + 1) * 0.0001
    #theta0 = np.random.normal(0, 1, K + K*D + 1) # For linear
    hidden_layer = fully_connected(
        input_layer,
        4,
        bias=False,
        activation='linear',
        name="hiddenLayer_Weights",
        weights_init="normal")  # hidden layer of size 2

    output_layer = fully_connected(
        hidden_layer,
        1,
        bias=False,
        activation='linear',
        name="outputLayer_Weights",
        weights_init="normal")  # output layer of size 1

    # Initialize rho
    value = 0.01
    init = tf.constant_initializer(value)
    rho = va.variable(name='rho', dtype=tf.float32, shape=[], initializer=init)

    rcomputed = []
    auc = []

    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    # print sess.run(tflearn.get_training_mode()) #False
    tflearn.is_training(True, session=sess)
    print sess.run(tflearn.get_training_mode())  #now True

    temp = theta0[-1]

    oneClassNN_Net = oneClassNN(output_layer,
                                v,
                                rho,
                                hidden_layer,
                                output_layer,
                                optimizer='sgd',
                                loss='OneClassNN_Loss',
                                learning_rate=1)

    model = DNN(oneClassNN_Net, tensorboard_verbose=3)

    model.set_weights(output_layer.W, theta0[0:K][:, np.newaxis])
    model.set_weights(hidden_layer.W, np.reshape(theta0[K:K + K * D], (D, K)))

    iterStep = 0
    while (iterStep < 100):
        print "Running Iteration :", iterStep
        # Call the cost function
        y_pred = model.predict(data_train)  # Apply some ops
        tflearn.is_training(False, session=sess)
        y_pred_test = model.predict(data_test)  # Apply some ops
        tflearn.is_training(True, session=sess)
        value = np.percentile(y_pred, v * 100)
        tflearn.variables.set_value(rho, value, session=sess)
        rStar = rho
        model.fit(X, Y, n_epoch=2, show_metric=True, batch_size=100)
        iterStep = iterStep + 1
        rcomputed.append(rho)
        temp = tflearn.variables.get_value(rho, session=sess)

    # print "Rho",temp
    # print "y_pred",y_pred
    # print "y_predTest", y_pred_test

    # g = lambda x: x
    g = lambda x: 1 / (1 + tf.exp(-x))

    def nnScore(X, w, V, g):
        return tf.matmul(g((tf.matmul(X, w))), V)

    # Format the datatype to suite the computation of nnscore
    X = X.astype(np.float32)
    X_test = data_test
    X_test = X_test.astype(np.float32)
    # assign the learnt weights
    # wStar = hidden_layer.W
    # VStar = output_layer.W
    # Get weights values of fc2
    wStar = model.get_weights(hidden_layer.W)
    VStar = model.get_weights(output_layer.W)

    # print "Hideen",wStar
    # print VStar

    train = nnScore(X, wStar, VStar, g)
    test = nnScore(X_test, wStar, VStar, g)

    # Access the value inside the train and test for plotting
    # Create a new session and run the example
    # sess = tf.Session()
    # sess.run(tf.initialize_all_variables())
    arrayTrain = train.eval(session=sess)
    arrayTest = test.eval(session=sess)

    # print "Train Array:",arrayTrain
    # print "Test Array:",arrayTest

    # plt.hist(arrayTrain-temp,  bins = 25,label='Normal');
    # plt.hist(arrayTest-temp, bins = 25, label='Anomalies');
    # plt.legend(loc='upper right')
    # plt.title('r = %1.6f- Sigmoid Activation ' % temp)
    # plt.show()

    pos_decisionScore = arrayTrain - temp
    neg_decisionScore = arrayTest - temp

    return [pos_decisionScore, neg_decisionScore]
'''

# This is the set of possible values to feed to XOR
x_train = [[0, 0], [0, 1], [1, 0], [1, 1]]

# Given the values fed, this is the expected output
y_train = [[0], [1], [1], [0]]

# Let's define our DNN.  We've got one input layer with a single node,
# one hidden layer with two nodes, and one output layer with a single node..
input_layer = input_data(shape=[None, 2])
hidden_layer = fully_connected(input_layer, 2, activation='tanh')
output_layer = fully_connected(hidden_layer, 1, activation='tanh')

# Let's define our regression activation function for output layer.
# we define the optimizing function to be stochastic gradient descent
# we define our loss function as binary cross entropy.  We define
# our learning rate to be 5.
regression = regression(output_layer,
                        optimizer='sgd',
                        loss='binary_crossentropy',
                        learning_rate=1)
model = DNN(regression)

# Now we train the model.
model.fit(x_train, y_train, n_epoch=5000, show_metric=True)

# Let's see how it worked.
print('Expected: ', [i[0] > 0 for i in y_train])
print('Predicted: ', [i[0] > 0 for i in model.predict(x_train)])
Пример #8
0
from tflearn import DNN
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression

## X = Input data, Y = Output data
X = [[0, 0], [0, 1], [1, 0], [1, 1]]
Y = [[0], [1], [1], [0]]

## One input layer, one hidden layer and one output layer
input_layer = input_data(shape=[None, 2])
hidden_layer = fully_connected(input_layer, 2, activation='tanh')
output_layer = fully_connected(hidden_layer, 1, activation='tanh')

regression = regression(output_layer,
                        optimizer='sgd',
                        loss='binary_crossentropy',
                        learning_rate=5)
model = DNN(regression)

model.fit(X, Y, n_epoch=5000, show_metric=True)

[i[0] > 0 for i in model.predict(X)]

print(model.get_weights(hidden_layer.W), model.get_weights(hidden_layer.b))
print(model.get_weights(output_layer.W), model.get_weights(output_layer.b))
Пример #9
0
                        hidden_layer,
                        output_layer,
                        optimizer='sgd',
                        loss='OneClassNN_Loss',
                        learning_rate=1)

model = DNN(oneClassNN, tensorboard_verbose=3)

model.set_weights(output_layer.W, theta0[0:K][:, np.newaxis])
model.set_weights(hidden_layer.W, np.reshape(theta0[K:K + K * D], (D, K)))

iterStep = 0
while (iterStep < 100):
    print "Running Iteration :", iterStep
    # Call the cost function
    y_pred = model.predict(data_train)  # Apply some ops
    tflearn.is_training(False, session=sess)
    y_pred_test = model.predict(data_test)  # Apply some ops
    tflearn.is_training(True, session=sess)
    value = np.percentile(y_pred, v * 100)
    tflearn.variables.set_value(rho, value, session=sess)
    rStar = rho
    model.fit(X, Y, n_epoch=2, show_metric=True, batch_size=100)
    iterStep = iterStep + 1
    rcomputed.append(rho)
    temp = tflearn.variables.get_value(rho, session=sess)

    # print "Rho",temp
    # print "y_pred",y_pred
    # print "y_predTest", y_pred_test
class CowClassifier(object):
    """ Cow classifier """
    def __init__(self):
        """ default constructor """
        # Image
        self.image_size = 32  # 32x32

        # tensorflow network variables
        self.tf_img_prep = None
        self.tf_img_aug = None
        self.tf_network = None
        self.tf_model = None

        # 1: setup image preprocessing
        self.setup_image_preprocessing()

        # 2: setup neural network
        self.setup_nn_network()

    def setup_image_preprocessing(self):
        """ Setup image preprocessing """
        # normalization of images
        self.tf_img_prep = ImagePreprocessing()
        self.tf_img_prep.add_featurewise_zero_center()
        self.tf_img_prep.add_featurewise_stdnorm()

        # Randomly create extra image data by rotating and flipping images
        self.tf_img_aug = ImageAugmentation()
        self.tf_img_aug.add_random_flip_leftright()
        self.tf_img_aug.add_random_rotation(max_angle=30.)

    def setup_nn_network(self):
        """ Setup neural network structure """

        # our input is an image of 32 pixels high and wide with 3 channels (RGB)
        # we will also preprocess and create synthetic images
        self.tf_network = input_data(
            shape=[None, self.image_size, self.image_size, 3],
            data_preprocessing=self.tf_img_prep,
            data_augmentation=self.tf_img_aug)

        # layer 1: convolution layer with 32 filters (each being 3x3x3)
        layer_conv_1 = conv_2d(self.tf_network,
                               32,
                               3,
                               activation='relu',
                               name='conv_1')

        # layer 2: max pooling layer
        self.tf_network = max_pool_2d(layer_conv_1, 2)

        # layer 3: convolution layer with 64 filters
        layer_conv_2 = conv_2d(self.tf_network,
                               64,
                               3,
                               activation='relu',
                               name='conv_2')

        # layer 4: Another convolution layer with 64 filters
        layer_conv_3 = conv_2d(layer_conv_2,
                               64,
                               3,
                               activation='relu',
                               name='conv_3')

        # layer 5: Max pooling layer
        self.tf_network = max_pool_2d(layer_conv_3, 2)

        # layer 6: Fully connected 512 node layer
        self.tf_network = fully_connected(self.tf_network,
                                          512,
                                          activation='relu')

        # layer 7: Dropout layer (removes neurons randomly to combat overfitting)
        self.tf_network = dropout(self.tf_network, 0.5)

        # layer 8: Fully connected layer with two outputs (cow or non cow class)
        self.tf_network = fully_connected(self.tf_network,
                                          2,
                                          activation='softmax')

        # define how we will be training our network
        accuracy = Accuracy(name="Accuracy")
        self.tf_network = regression(self.tf_network,
                                     optimizer='adam',
                                     loss='categorical_crossentropy',
                                     learning_rate=0.0005,
                                     metric=accuracy)

    def load_model(self, model_path):
        """ Load model """
        self.tf_model = DNN(self.tf_network, tensorboard_verbose=0)
        self.tf_model.load(model_path)

    def predict_image(self, image_path):
        """ Predict image """
        # Load the image file
        img = scipy.ndimage.imread(image_path, mode="RGB")

        # Scale it to 32x32
        img = scipy.misc.imresize(img, (32, 32),
                                  interp="bicubic").astype(np.float32,
                                                           casting='unsafe')

        # Predict
        return self.tf_model.predict([img])
Пример #11
0
class Bot:
    def __init__(self):
        self.words = []
        self.labels = []
        self.docs_x = []
        self.docs_y = []
        self.stemmer = LancasterStemmer()
        self.data = []
        self.training = []
        self.output = []
        self.out_empty=[]
        self.model=[]
        self.count=-1
        self.say=""
        self.Network=Network()

    def read(self):
        with open("src/models/intents.json") as f:
            self.data=load(f)
    def dump(self):
        with open("src/models/data.pickle", "wb") as f:
            dump((self.words, self.labels, self.training, self.output), f)
    def stem(self):
        for intent in self.data["intents"]:
            for pattern in intent["patterns"]:
                wrds = word_tokenize(pattern)
                self.words.extend(wrds)
                self.docs_x.append(wrds)
                self.docs_y.append(intent["tag"])

            if intent["tag"] not in self.labels:
                self.labels.append(intent["tag"])

        self.words = [self.stemmer.stem(w.lower()) for w in self.words if w != "?"]
        self.words = sorted(list(set(self.words)))
        self.labels = sorted(self.labels)
    def modelsetup(self):
        self.out_empty = [0 for _ in range(len(self.labels))]

        for x, doc in enumerate(self.docs_x):
            bag = []

            wrds = [self.stemmer.stem(w.lower()) for w in doc]

            for w in self.words:
                if w in wrds:
                    bag.append(1)
                else:
                    bag.append(0)

            output_row = self.out_empty[:]
            output_row[self.labels.index(self.docs_y[x])] = 1
            self.training.append(bag)
            self.output.append(output_row)

        self.training = array(self.training)
        self.output = array(self.output)
        self.dump()

    def setup(self):
        ops.reset_default_graph()
        net = input_data(shape=[None, len(self.training[0])])
        net = fully_connected(net, 10)
        net = fully_connected(net, 10)
        net = fully_connected(net, len(self.output[0]), activation="softmax")
        net = regression(net)
        self.model = DNN(net)
        if exists("src/models/model.tflearn.index"):
            self.model.load("src/models/model.tflearn")
        else:
            self.model.fit(self.training, self.output, n_epoch=1000, batch_size=8, show_metric=True)
            self.model.save("src/models/model.tflearn")
    def indexWord(self,x,word):
        x=x.split(" ")
        ch=""
        for i in x:
            if i.find(word)!=-1:
                ch=i
        return ch
    def bag_of_words(self,s, words):
        bag = [0 for _ in range(len(words))]
        translate=[]
        s_words = word_tokenize(s)
        s_words = [self.stemmer.stem(word.lower()) for word in s_words]

        for se in s_words:
            for i, w in enumerate(words):
                if w == se:
                    bag[i] = 1
                if se not in words and se not in translate:
                    translate.append(se)

        return array(bag),translate
    def chat(self,x,ui):
        try:
            self.count+=1
            predinp,translate=self.bag_of_words(x, self.words)
            if translate:
                translate=self.indexWord(str(x),translate[0])
                print(translate)
            results = self.model.predict([predinp])
            results_index = argmax(results)
            tag = self.labels[results_index]
        except Exception as e:
            print(e)
        try:
            if results[0][results_index] > 0.4:
                for tg in self.data["intents"]:
                    if tg['tag'] == tag:
                        responses = tg['responses']
                self.say=choice(responses)
                if self.say=="Looking up":
                    self.say=self.Network.Connect(translate.upper())
                    ui.textEdit.setText(self.say)
                else:
                    ui.textEdit.setText(self.say)
            else:
                self.say="Sorry i can't understand i am still learning try again."
                ui.textEdit.setText(self.say)
        except Exception as e:
            print(e)
Пример #12
0
output_layer = fully_connected(hidden_layer, 1,
                               activation='tanh')  #output layer of size 1

#use Stohastic Gradient Descent and Binary Crossentropy as loss function
regression = regression(output_layer,
                        optimizer='sgd',
                        loss='binary_crossentropy',
                        learning_rate=5)
model = DNN(regression, tensorboard_verbose=3)

#fit the model
model.fit(X, Y, n_epoch=5000, show_metric=True)

#predict all examples
print('Expected:  ', [i[0] > 0 for i in Y])
print('Predicted: ', [i[0] > 0 for i in model.predict(X)])

#Did this work ( from Chris )

print("Prints int results ( positive is True, ex 0.99 ,  False is -0.99 )")
for i in model.predict(X):
    print(i)

print("")
print("Should be 0")
print(model.predict([[0, 0]]))

print("")
print("Should be 1")
print(model.predict([[1, 0]]))
Пример #13
0
def foo(img_fn, model_fn='../data/model/model_weights'):
    img = cv2.imread(img_fn, cv2.IMREAD_GRAYSCALE)

    haar_fn = '../data/haarcascade_russian_plate_number.xml'
    haar = cv2.CascadeClassifier(haar_fn)
    detected = haar.detectMultiScale(img)
    plates = []
    for x, y, w, h in detected:
        obj = img[y:y + h, x:x + w]
        plates.append(obj)

    chars = plates[0] < filters.threshold_minimum(plates[0])

    labeled_chars, a = ndi.label(chars)
    labeled_chars = (labeled_chars > 1).astype(np.int8)

    c = measure.find_contours(labeled_chars, .1)

    letters = []
    for i, v in enumerate(c):
        xs, ys = zip(*[i for i in v])
        x = int(min(xs))
        y = int(min(ys))
        w = int(max(xs) - x + 2)
        h = int(max(ys) - y + 2)
        if w < 15:
            continue
        letters.append((y, x, h, w))

    letters = sorted(letters)

    letters_img = [plates[0][x:x + w, y:y + h] for y, x, h, w in letters]

    letters_img = [i for i in letters_img if i[0, 0] > 127]

    sizes = [image.size for image in letters_img]
    median = np.median(sizes)
    allowed_size = median + median / 4

    letters_img = [image for image in letters_img if image.size < allowed_size]

    size = 64

    normalized_img = []
    for i in letters_img:
        ratio = i.shape[0] / i.shape[1]
        img1 = transform.resize(i, [size, int(size / ratio)], mode='constant')
        width = img1.shape[1]
        missing = (size - width) // 2
        ones = np.ones([size, missing])
        img2 = np.append(ones, img1, 1)
        img3 = np.append(img2, ones, 1)
        if 2 * missing + width != size:
            one = np.ones([size, 1])
            img4 = np.append(img3, one, 1)
        else:
            img4 = img3
        normalized_img.append(img4 * 255)

    net_input = input_data(shape=[None, 64, 64, 1])

    conv1 = conv_2d(net_input,
                    nb_filter=4,
                    filter_size=5,
                    strides=[1, 1, 1, 1],
                    activation='relu')
    max_pool1 = max_pool_2d(conv1, kernel_size=2)

    conv2 = conv_2d(max_pool1,
                    nb_filter=8,
                    filter_size=5,
                    strides=[1, 2, 2, 1],
                    activation='relu')
    max_pool2 = max_pool_2d(conv2, kernel_size=2)

    conv3 = conv_2d(max_pool2,
                    nb_filter=12,
                    filter_size=4,
                    strides=[1, 1, 1, 1],
                    activation='relu')
    max_pool3 = max_pool_2d(conv3, kernel_size=2)

    fc1 = fully_connected(max_pool3, n_units=200, activation='relu')
    drop1 = dropout(fc1, keep_prob=.5)

    fc2 = fully_connected(drop1, n_units=36, activation='softmax')
    net = regression(fc2)

    model = DNN(network=net)
    model.load(model_file=model_fn)

    labels = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')

    predicted = []
    for i in normalized_img:
        y = model.predict(i.reshape([1, 64, 64, 1]))
        y_pred = np.argmax(y[0])
        predicted.append(labels[y_pred])

    return ''.join(predicted)