示例#1
0
def run_combo_XOR():
    X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
    Y_nand = [[1.], [1.], [1.], [0.]]
    Y_or = [[0.], [1.], [1.], [1.]]

    g = tflearn.input_data(shape=[None, 2])

    # Nand graph
    g_nand = tflearn.fully_connected(g, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
    g_nand = tflearn.regression(
        g_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

    # Nand graph
    g_or = tflearn.fully_connected(g, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
    g_or = tflearn.regression(
        g_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

    g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

    m = train_model(g_xor, X, [Y_nand, Y_or])
    # sess = tf.Session()  # separate from DNN session
    sess = m.session  # separate from DNN session
    print(
        sess.run(tflearn.merge([Y_nand, Y_or], mode='elemwise_mul')))
def xor_operation():
    # Function to simulate XOR operation using graph combo of NAND and OR
    X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
    Y_nand = [[1.], [1.], [1.], [0.]]
    Y_or = [[0.], [1.], [1.], [1.]]

    with tf.Graph().as_default():
        graph = tflearn.input_data(shape=[None, 2])
        graph_nand = tflearn.fully_connected(graph, 32, activation='linear')
        graph_nand = tflearn.fully_connected(graph_nand, 32, activation='linear')
        graph_nand = tflearn.fully_connected(graph_nand, 1, activation='sigmoid')
        graph_nand = tflearn.regression(graph_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

        graph_or = tflearn.fully_connected(graph, 32, activation='linear')
        graph_or = tflearn.fully_connected(graph_or, 32, activation='linear')
        graph_or = tflearn.fully_connected(graph_or, 1, activation='sigmoid')
        graph_or = tflearn.regression(graph_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

        graph_xor = tflearn.merge([graph_nand, graph_or], mode='elemwise_mul')

        # Model training
        model = tflearn.DNN(graph_xor)

        model.fit(X, [Y_nand, Y_or], n_epoch=100, snapshot_epoch=False)
        prediction = model.predict([[0., 1.]])
        print("Prediction: ", prediction)
示例#3
0
    def test_dnn(self):

        with tf.Graph().as_default():
            X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1]
            Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]
            input = tflearn.input_data(shape=[None])
            linear = tflearn.single_unit(input)
            regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
                                            metric='R2', learning_rate=0.01)
            m = tflearn.DNN(regression)
            # Testing fit and predict
            m.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False)
            res = m.predict([3.2])[0]
            self.assertGreater(res, 1.3, "DNN test (linear regression) failed! with score: " + str(res) + " expected > 1.3")
            self.assertLess(res, 1.8, "DNN test (linear regression) failed! with score: " + str(res) + " expected < 1.8")

            # Testing save method
            m.save("test_dnn.tflearn")
            self.assertTrue(os.path.exists("test_dnn.tflearn"))

        with tf.Graph().as_default():
            input = tflearn.input_data(shape=[None])
            linear = tflearn.single_unit(input)
            regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
                                            metric='R2', learning_rate=0.01)
            m = tflearn.DNN(regression)

            # Testing load method
            m.load("test_dnn.tflearn")
            res = m.predict([3.2])[0]
            self.assertGreater(res, 1.3, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected > 1.3")
            self.assertLess(res, 1.8, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected < 1.8")
示例#4
0
    def test_regression_placeholder(self):
        '''
        Check that regression does not duplicate placeholders
        '''

        with tf.Graph().as_default():

            g = tflearn.input_data(shape=[None, 2])
            g_nand = tflearn.fully_connected(g, 1, activation='linear')
            with tf.name_scope("Y"):
                Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")
            tflearn.regression(g_nand, optimizer='sgd',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression1",
                               name="Y")
            # for this test, just use the same default trainable_vars
            # in practice, this should be different for the two regressions
            tflearn.regression(g_nand, optimizer='adam',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression2",
                               name="Y")

            self.assertEqual(len(tf.get_collection(tf.GraphKeys.TARGETS)), 1)
def build_network():
    network = tflearn.input_data(shape=[None, 2])
    network = tflearn.fully_connected(network, 64, activation='relu', regularizer='L2', weight_decay=0.001)
    network = tflearn.fully_connected(network, 1, activation='sigmoid')
    network = tflearn.regression(network, optimizer='sgd', learning_rate=0.3,
                           loss='mean_square')
    return network
示例#6
0
def do_rnn(trainX, testX, trainY, testY):
    max_document_length=64
    y_test=testY
    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data([None, max_document_length])
    net = tflearn.embedding(net, input_dim=10240000, output_dim=64)
    net = tflearn.lstm(net, 64, dropout=0.1)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0,tensorboard_dir="dga_log")
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
              batch_size=10,run_id="dga",n_epoch=1)

    y_predict_list = model.predict(testX)
    #print y_predict_list

    y_predict = []
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    print(classification_report(y_test, y_predict))
    print metrics.confusion_matrix(y_test, y_predict)
示例#7
0
    def __init__(self, s_date, n_frame):
        self.n_epoch = 20
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
        self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
            self.n_epoch = 10
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
示例#8
0
文件: 16-3.py 项目: DemonZeros/1book
def do_rnn(trainX, testX, trainY, testY):
    global n_words
    # Data preprocessing
    # Sequence padding
    print "GET n_words embedding %d" % n_words


    trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data([None, MAX_DOCUMENT_LENGTH])
    net = tflearn.embedding(net, input_dim=n_words, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')

    # Training



    model = tflearn.DNN(net, tensorboard_verbose=3)
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
             batch_size=32,run_id="maidou")
示例#9
0
    def test_sequencegenerator(self):

        with tf.Graph().as_default():
            text = "123456789101234567891012345678910123456789101234567891012345678910"
            maxlen = 5

            X, Y, char_idx = \
                tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3)

            g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
            g = tflearn.lstm(g, 32)
            g = tflearn.dropout(g, 0.5)
            g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
            g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                                   learning_rate=0.1)

            m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                          seq_maxlen=maxlen,
                                          clip_gradients=5.0)
            m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
            res = m.generate(10, temperature=1., seq_seed="12345")
            self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'")

            # Testing save method
            m.save("test_seqgen.tflearn")
            self.assertTrue(os.path.exists("test_seqgen.tflearn"))

            # Testing load method
            m.load("test_seqgen.tflearn")
            res = m.generate(10, temperature=1., seq_seed="12345")
            self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
示例#10
0
def retrain(output_filename):
  num_classes = 120

  # Real-time data preprocessing
  img_prep = tflearn.ImagePreprocessing()
  img_prep.add_featurewise_zero_center()
  img_prep.add_featurewise_stdnorm()

  # Real-time data augmentation
  img_aug = tflearn.ImageAugmentation()
  img_aug.add_random_blur(sigma_max=5.)
  img_aug.add_random_crop((224, 224))
  img_aug.add_random_rotation(max_angle=25.)

  softmax = vgg16(softmax_size=num_classes, restore_softmax=False,
                  data_preprocessing=img_prep, data_augmentation=img_aug)
  regression = tflearn.regression(softmax, optimizer='rmsprop',
                                  loss='categorical_crossentropy',
                                  learning_rate=0.001)

  model = tflearn.DNN(regression, checkpoint_path=output_filename,
                      max_checkpoints=3, tensorboard_verbose=3)
  # Load pre-existing model, restoring all weights, except softmax layer ones
  model_file = 'vgg/vgg16.tflearn'
  if not os.path.exists(model_file):
    maybe_download(DATA_URL, 'vgg')
  model.load(model_file)

  # Start fine-tuning
  X, Y = grozi120.load_data()
  model.fit(X, Y, n_epoch=10, validation_set=0.1, shuffle=True,
            show_metric=True, batch_size=64, snapshot_step=200,
            snapshot_epoch=False, run_id=output_filename)

  model.save(output_filename)
示例#11
0
    def build_cnn_network(self, network):
        """ Build CNN network.

        Args:
            network: base network.

        Returns:
            model: CNN model.

        """
        print('Building CNN network.')
        # Convolutional network building
        network = tflearn.conv_2d(network, 32,
                            self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.conv_2d(network, 64,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.conv_2d(network, 64,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.fully_connected(
            network, 32 * 32, activation='relu')
        network = tflearn.dropout(network, 0.5)
        # Two category. positive or negative.
        network = tflearn.fully_connected(network, 2,
                                  activation='softmax')
        network = tflearn.regression(network, optimizer='adam',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        print("CNN network built.")
        return network
def generate_nnet(feats):
    """Generate a neural network.

    Parameters
    ----------
    feats : list with at least one feature vector

    Returns
    -------
    Neural network object
    """
    # Load it here to prevent crash of --help when it's not present
    import tflearn

    tflearn.init_graph(num_cores=2, gpu_memory_fraction=0.6)

    input_shape = (None,
                   feats[0].shape[0],
                   feats[0].shape[1],
                   feats[0].shape[2])
    logging.info("input shape: %s", input_shape)
    net = tflearn.input_data(shape=input_shape)
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.01,
                             loss='categorical_crossentropy', name='target')
    return tflearn.DNN(net)
示例#13
0
def do_rnn(x,y):
    global max_document_length
    print "RNN"
    trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
    y_test=testY

    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data([None, max_document_length])
    net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(trainX, trainY, validation_set=0.1, show_metric=True,
              batch_size=10,run_id="webshell",n_epoch=5)

    y_predict_list=model.predict(testX)
    y_predict=[]
    for i in y_predict_list:
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    do_metrics(y_test, y_predict)
示例#14
0
def run():
    # imagine cnn, the third dim is like the 'chnl'
    g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam',
                           loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='models/model_us_cities')

    for i in range(40):
        seed = random_sequence_from_textfile(path, maxlen)
        m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=1, run_id='us_cities')
        print("-- TESTING...")
        print("-- Test with temperature of 1.2 --")
        print(m.generate(30, temperature=1.2, seq_seed=seed))
        print("-- Test with temperature of 1.0 --")
        print(m.generate(30, temperature=1.0, seq_seed=seed))
        print("-- Test with temperature of 0.5 --")
        print(m.generate(30, temperature=0.5, seq_seed=seed))
示例#15
0
文件: 16-7.py 项目: DemonZeros/1book
def do_rnn(x_train,x_test,y_train,y_test):
    global n_words
    # Data preprocessing
    # Sequence padding
    print "GET n_words embedding %d" % n_words


    #x_train = pad_sequences(x_train, maxlen=100, value=0.)
    #x_test = pad_sequences(x_test, maxlen=100, value=0.)
    # Converting labels to binary vectors
    y_train = to_categorical(y_train, nb_classes=2)
    y_test = to_categorical(y_test, nb_classes=2)

    # Network building
    net = tflearn.input_data(shape=[None, 100,n_words])
    net = tflearn.lstm(net, 10,  return_seq=True)
    net = tflearn.lstm(net, 10, )
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.1,name="output",
                             loss='categorical_crossentropy')

    # Training

    model = tflearn.DNN(net, tensorboard_verbose=3)
    model.fit(x_train, y_train, validation_set=(x_test, y_test), show_metric=True,
             batch_size=32,run_id="maidou")
def main():
    load_vectors("./vectors.bin")
    init_seq()
    xlist = []
    ylist = []
    test_X = None
    #for i in range(len(seq)-100):
    for i in range(1000):
        sequence = seq[i:i+20]
        xlist.append(sequence)
        ylist.append(seq[i+20])
        if test_X is None:
            test_X = np.array(sequence)
            (match_word, max_cos) = vector2word(seq[i+20])
            print "right answer=", match_word, max_cos

    X = np.array(xlist)
    Y = np.array(ylist)
    net = tflearn.input_data([None, 20, 200])
    net = tflearn.lstm(net, 200)
    net = tflearn.fully_connected(net, 200, activation='linear')
    net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1,
                                     loss='mean_square')
    model = tflearn.DNN(net)
    model.fit(X, Y, n_epoch=1000, batch_size=1,snapshot_epoch=False,show_metric=True)
    model.save("model")
    predict = model.predict([test_X])
    #print predict
    #for v in test_X:
    #    print vector2word(v)
    (match_word, max_cos) = vector2word(predict[0])
    print "predict=", match_word, max_cos
示例#17
0
def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take):
    """

    :param beat_spectrum_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    beat_spec_len = 432
    with tf.Graph().as_default():
        input_layer = input_data(shape=[None, beat_spec_len, 1])
        conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2")
        max_pool1 = max_pool_1d(conv1, 2)
        conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2")
        max_pool2 = max_pool_1d(conv2, 2)
        fully1 = fully_connected(max_pool2, 128, activation="relu")
        dropout1 = dropout(fully1, 0.8)
        fully2 = fully_connected(dropout1, 256, activation="relu")
        dropout2 = dropout(fully2, 0.8)
        linear = fully_connected(dropout2, 1, activation="linear")
        regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            beat_spectrum_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
示例#18
0
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take):
    """

    :param mfcc_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    with tf.Graph().as_default():
        network = input_data(shape=[None, 13, 100, 1])
        network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = fully_connected(network, 128, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 256, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 1, activation="linear")
        regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            mfcc_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
示例#19
0
    def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            # TODO: Fix test
            #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2)
示例#20
0
def _model2():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    net = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)
    n = 2
    j = 64
    '''
    net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    '''
    net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
    net = max_pool_2d(net, 2, strides=2)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, len(yTest[0]), activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                     loss='categorical_crossentropy')
    model = tflearn.DNN(net, checkpoint_path='model2_resnet',
                max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
示例#21
0
def yn_net():
    net = tflearn.input_data(shape=[None, img_rows, img_cols, 1]) #D = 256, 256
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.1')
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool0') #D = 128, 128
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.1')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool1') #D = 64,  64
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool2') #D = 32 by 32
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool3') #D = 16 by 16
    net = tflearn.dropout(net,0.75,name='dropout0')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.1')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.2')
#    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool4') #D = 8 by 8
#    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.fully_connected(net, n_units = 128, activation='relu', name='fc1')
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001)
    model = tflearn.DNN(net, tensorboard_verbose=1,tensorboard_dir='/tmp/tflearn_logs/')
    return model
示例#22
0
def use_tflearn():
    import tflearn

    # Data loading and preprocessing
    import tflearn.datasets.mnist as mnist
    X, Y, testX, testY = mnist.load_data(one_hot=True)

    # Building deep neural network
    input_layer = tflearn.input_data(shape=[None, 784])
    dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout1 = tflearn.dropout(dense1, 0.8)
    dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout2 = tflearn.dropout(dense2, 0.8)
    softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')

    # Regression using SGD with learning rate decay and Top-3 accuracy
    sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
    top_k = tflearn.metrics.Top_k(3)
    net = tflearn.regression(softmax, optimizer=sgd, metric=top_k,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(X, Y, n_epoch=20, validation_set=(testX, testY),
              show_metric=True, run_id="dense_model")
示例#23
0
def model_for_type(neural_net_type, tile_size, on_band_count):
    """The neural_net_type can be: one_layer_relu,
                                   one_layer_relu_conv,
                                   two_layer_relu_conv."""
    network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])

    # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    if neural_net_type == "one_layer_relu":
        network = tflearn.fully_connected(network, 64, activation="relu")
    elif neural_net_type == "one_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
    elif neural_net_type == "two_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
        network = conv_2d(network, 128, 4, activation="relu")
    else:
        print("ERROR: exiting, unknown layer type for neural net")

    # classify as road or not road
    softmax = tflearn.fully_connected(network, 2, activation="softmax")

    # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    momentum = tflearn.optimizers.Momentum(learning_rate=0.005, momentum=0.9, lr_decay=0.0002, name="Momentum")

    net = tflearn.regression(softmax, optimizer=momentum, loss="categorical_crossentropy")

    return tflearn.DNN(net, tensorboard_verbose=0)
示例#24
0
    def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = tflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = tflearn.fully_connected(g, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = tflearn.regression(g_nand, optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # Or operator definition
            g_or = tflearn.fully_connected(g, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = tflearn.regression(g_or, optimizer='sgd',
                                      learning_rate=2.,
                                      loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = tflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = tflearn.input_data(shape=[None, 2])
            net = tflearn.flatten(net)
            net = tflearn.reshape(net, new_shape=[-1])
            net = tflearn.activation(net, 'relu')
            net = tflearn.dropout(net, 0.5)
            net = tflearn.single_unit(net)
示例#25
0
 def simple_learn(self):
     tflearn.init_graph()
     net=tflearn.input_data(shape=[None,64,64,3])
     net=tflearn.fully_connected(net,64)
     net=tflearn.dropout(net,.5)
     net=tflearn.fully_connected(net,10,activation='softmax')
     net=tflearn.regression(net,optimizer='adam',loss='softmax_categorical_crossentropy')
     model = tflearn.DNN(net)
     model.fit(self.trainset,self.trainlabels)
示例#26
0
def define_dnn_topology(input_num, first_layer, second_layer):
    tf.Graph().as_default()
    g = tflearn.input_data(shape=[None, input_num])
    g = tflearn.fully_connected(g, first_layer, activation='linear')
    g = tflearn.fully_connected(g, second_layer, activation='linear')
    g = tflearn.fully_connected(g, 1, activation='sigmoid')
    g = tflearn.regression(g, optimizer='sgd', learning_rate=2., loss='mean_square')
    tf.Graph().finalize() 
    return g 
示例#27
0
def generate_net(embedding):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=300000, output_dim=128)
    net = tflearn.lstm(net, 128)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy')
    return net
    def run(self):

        # Real-time pre-processing of the image data
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()

        # Resnet model below:  Adapted from tflearn website
        self.n = 5 #32 layer resnet

        # Building Residual Network
        net = tflearn.input_data(shape=[None, 48, 48, 1], data_preprocessing=img_prep, data_augmentation=img_aug)
        net = tflearn.conv_2d(net, nb_filter=16, filter_size=3, regularizer='L2', weight_decay=0.0001)
        net = tflearn.residual_block(net, self.n, 16)
        net = tflearn.residual_block(net, 1, 32, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 32)
        net = tflearn.residual_block(net, 1, 64, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 64)
        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        net = tflearn.global_avg_pool(net)

        # Regression
        net = tflearn.fully_connected(net, 7, activation='softmax')
        mom = tflearn.Momentum(learning_rate=0.1, lr_decay=0.0001, decay_step=32000, staircase=True, momentum=0.9)
        net = tflearn.regression(net, optimizer=mom,
                                 loss='categorical_crossentropy')

        self.model = tflearn.DNN(net, checkpoint_path='models/model_resnet_emotion',
                            max_checkpoints=10, tensorboard_verbose=0,
                            clip_gradients=0.)

        self.model.load('model.tfl')

        face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        cap = cv2.VideoCapture(0)

        #Main Loop where we will be capturing live webcam feed, crop image and process the image for emotion recognition on trained model
        while True:
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]
                self.image_processing(roi_gray, img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'sim_mat'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    print('Loading sim_mat and sdrs')
    sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)
    print('sim_mat and sdrs loaded')

    print('splitting and grooming data')
    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([sim_mat_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([sim_mat_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    print('setting up CNN')
    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, beat_spec_len, 1])
    network = conv_2d(network, 32, 10, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 20, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    print('running CNN')
    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=10,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))

    predicted = np.array(model.predict(testX))[:,0]

    print('plotting')
    plot(testY, predicted)
def create_net(in_sx, in_sy, out_sx):
	"""
	Creates a tflearn neural network with the correct
	architecture for learning to hear the keyword
	"""
	net = tflearn.input_data([None, in_sx, in_sy])
	net = tflearn.lstm(net, lstm_size, dropout=lstm_dropout)
	net = tflearn.fully_connected(net, out_sx, activation='softmax')
	net = tflearn.regression(net, learning_rate=learning_rate, optimizer='adam', loss='categorical_crossentropy')
	return net
示例#31
0
    def model(self,
              mode="train",
              num_layers=1,
              cell_size=32,
              cell_type="BasicLSTMCell",
              embedding_size=20,
              learning_rate=0.0001,
              tensorboard_verbose=0,
              checkpoint_path=None):
        '''
        Build tensor specifying graph of operations for the seq2seq neural network model.
        mode = string, either "train" or "predict"
        cell_type = attribute of rnn_cell specifying which RNN cell type to use
        cell_size = size for the hidden layer in the RNN cell
        num_layers = number of RNN cell layers to use
        Return TFLearn model instance.  Use DNN model for this.
        '''
        assert mode in ["train", "predict"]

        checkpoint_path = checkpoint_path or (
            "%s%ss2s_checkpoint.tfl" %
            (self.data_dir or "", "/" if self.data_dir else ""))
        GO_VALUE = self.out_max_int + 1  # unique integer value used to trigger decoder outputs in the seq2seq RNN

        network = tflearn.input_data(
            shape=[None, self.in_seq_len + self.out_seq_len],
            dtype=tf.int32,
            name="XY")
        encoder_inputs = tf.slice(network, [0, 0], [-1, self.in_seq_len],
                                  name="enc_in")  # get encoder inputs
        encoder_inputs = tf.unstack(
            encoder_inputs, axis=1
        )  # transform into list of self.in_seq_len elements, each [-1]

        decoder_inputs = tf.slice(network, [0, self.in_seq_len],
                                  [-1, self.out_seq_len],
                                  name="dec_in")  # get decoder inputs
        decoder_inputs = tf.unstack(
            decoder_inputs, axis=1
        )  # transform into list of self.out_seq_len elements, each [-1]

        go_input = tf.multiply(
            tf.ones_like(decoder_inputs[0], dtype=tf.int32), GO_VALUE
        )  # insert "GO" symbol as the first decoder input; drop the last decoder input
        decoder_inputs = [
            go_input
        ] + decoder_inputs[:self.out_seq_len -
                           1]  # insert GO as first; drop last decoder input

        feed_previous = not (mode == "train")

        if self.verbose > 3:
            print("feed_previous = %s" % str(feed_previous))
            print("encoder inputs: %s" % str(encoder_inputs))
            print("decoder inputs: %s" % str(decoder_inputs))
            print("len decoder inputs: %s" % len(decoder_inputs))

        self.n_input_symbols = self.in_max_int + 1  # default is integers from 0 to 9
        self.n_output_symbols = self.out_max_int + 2  # extra "GO" symbol for decoder inputs

        single_cell = getattr(rnn_cell, cell_type)(cell_size,
                                                   state_is_tuple=True)
        if num_layers == 1:
            cell = single_cell
        else:
            cell = rnn_cell.MultiRNNCell([single_cell] * num_layers)

        if self.seq2seq_model == "embedding_rnn":
            model_outputs, states = seq2seq.embedding_rnn_seq2seq(
                encoder_inputs,  # encoder_inputs: A list of 2D Tensors [batch_size, input_size].
                decoder_inputs,
                cell,
                num_encoder_symbols=self.n_input_symbols,
                num_decoder_symbols=self.n_output_symbols,
                embedding_size=embedding_size,
                feed_previous=feed_previous)
        elif self.seq2seq_model == "embedding_attention":
            model_outputs, states = seq2seq.embedding_attention_seq2seq(
                encoder_inputs,  # encoder_inputs: A list of 2D Tensors [batch_size, input_size].
                decoder_inputs,
                cell,
                num_encoder_symbols=self.n_input_symbols,
                num_decoder_symbols=self.n_output_symbols,
                embedding_size=embedding_size,
                num_heads=1,
                initial_state_attention=False,
                feed_previous=feed_previous)
        else:
            raise Exception('[TFLearnSeq2Seq] Unknown seq2seq model %s' %
                            self.seq2seq_model)

        tf.add_to_collection(
            tf.GraphKeys.LAYER_VARIABLES + '/' + "seq2seq_model",
            model_outputs)  # for TFLearn to know what to save and restore

        # model_outputs: list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_size] containing the generated outputs.
        if self.verbose > 2: print("model outputs: %s" % model_outputs)
        network = tf.stack(
            model_outputs, axis=1
        )  # shape [-1, n_decoder_inputs (= self.out_seq_len), num_decoder_symbols]
        if self.verbose > 2: print("packed model outputs: %s" % network)

        if self.verbose > 3:
            all_vars = tf.get_collection(tf.GraphKeys.VARIABLES)
            print("all_vars = %s" % all_vars)

        with tf.name_scope(
                "TargetsData"
        ):  # placeholder for target variable (i.e. trainY input)
            targetY = tf.placeholder(shape=[None, self.out_seq_len],
                                     dtype=tf.int32,
                                     name="Y")

        network = tflearn.regression(network,
                                     placeholder=targetY,
                                     optimizer='adam',
                                     learning_rate=learning_rate,
                                     loss=self.sequence_loss,
                                     metric=self.accuracy,
                                     name="Y")

        model = tflearn.DNN(network,
                            tensorboard_verbose=tensorboard_verbose,
                            checkpoint_path=checkpoint_path)
        return model
inputs_drop = tflearn.dropout(inputs, keeping_rate)
hidden1 = tflearn.fully_connected(inputs_drop,
                                  n_hidden1,
                                  activation='relu',
                                  name='hidden1')
hidden1_drop = tflearn.dropout(hidden1, keeping_rate)

hidden2 = tflearn.fully_connected(hidden1_drop,
                                  n_hidden2,
                                  activation='relu',
                                  name='hidden2')
hidden2_drop = tflearn.dropout(hidden2, keeping_rate)

softmax = tflearn.fully_connected(hidden2_drop,
                                  n_outputs,
                                  activation='softmax',
                                  name='output')
net = tflearn.regression(softmax)

# 모델 객체 생성
model = tflearn.DNN(
    net, checkpoint_path="train_model/hidden(8,20,24,9)_Dropout.ckpt")

# 모델 학습
model.fit(x_train,
          y_train,
          validation_set=None,
          n_epoch=n_epochs,
          batch_size=batch_size)

model.save("드롭아웃을 적용시켜 학습을 하는 코드")
示例#33
0
文件: rtb_tflearn.py 项目: Pirolf/RTB
                                 weights_init=tflearn.initializations.normal(),
                                 regularizer='L2',
                                 weight_decay=0.01)
dense4 = tflearn.fully_connected(dense3,
                                 16,
                                 activation='relu',
                                 weights_init=tflearn.initializations.normal(),
                                 regularizer='L2',
                                 weight_decay=0.01)
bn2 = tflearn.batch_normalization(dense4)
dropout2 = tflearn.dropout(bn2, 0.9)
softmax = tflearn.fully_connected(dropout2, 2, activation='softmax')

# Regression using SGD with learning rate decay
sgd = tflearn.SGD(learning_rate=0.01, lr_decay=0.95, decay_step=20)
net = tflearn.regression(softmax, optimizer=sgd, loss='roc_auc_score')
input_path = '~/data/biddings.csv'
data = pd.read_csv(input_path)
print(data.shape)

train = data[:800000]
test = data[800000:]

sample = train.sample(frac=1)
features = sample.drop('convert', axis=1).values
labels_1d = sample.convert.ravel()
labels = to_categorical(labels_1d)

test_features = test.drop('convert', axis=1).values
test_labels_1d = test.convert.ravel()
test_labels = to_categorical(test.convert.ravel())
示例#34
0
from __future__ import division, print_function, absolute_import

import tflearn
import h5py


def load_h5(filename):
    h5f = h5py.File(filename, 'r')
    X = h5f['X']
    Y = h5f['Y']
    return X, Y

X, Y = load_h5('train.h5')
testX, testY = load_h5('test.h5')
input_layer = tflearn.input_data(shape=[None, 25, 36, 64, 3])
out = tflearn.fully_connected(input_layer, 5, activation='sigmoid')
net = tflearn.regression(out, optimizer='adam',
                         loss='mean_square', learning_rate=1e-3)

model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=50, validation_set=(testX, testY),
          show_metric=False, run_id="dense_model")
    output_row[labels.index(docs_y[x])] = 1

    training.append(bag)
    output.append(output_row)

training = numpy.array(training)
output = numpy.array(output)

print(len(output[0]))

net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net, optimizer='adam', metric='accuracy')

model = tflearn.DNN(net,
                    tensorboard_verbose=3,
                    tensorboard_dir='/tmp/tflearn_logs/')
model.load("model/model.tflearn_indo_scrape_fix")
# model.fit(training, output, n_epoch=1500, batch_size=8, show_metric=True)
# model.save("model/model.tflearn_indo_scrape_7")


def bag_of_words(s, words):
    bag = [0 for _ in range(len(words))]

    s_words = nltk.word_tokenize(s)
    s_words = [stemmer.stem(word.lower()) for word in s_words]
示例#36
0
ctrl = ctrl_net(input_map, input_state)
value_example = value_net(input_map, input_state, input_example_ctrl)
value = value_net(input_map, input_state, ctrl, reuse=True)

#评分(越大越好)
y_example = tf.placeholder(tf.float32, [None, 1])

value_loss = tf.reduce_mean(tf.square(y_example - value_example))
ctrl_loss = tf.reduce_mean(tf.square(1 - value))

value_vars = tfl.get_layer_variables_by_scope('value')
value_reg = tfl.regression(value_example,
                           placeholder=y_example,
                           optimizer='adam',
                           loss=value_loss,
                           trainable_vars=value_vars,
                           batch_size=512,
                           name='target_value',
                           op_name='value_reg')

ctrl_vars = tfl.get_layer_variables_by_scope('ctrl')
ctrl_reg = tfl.regression(value,
                          placeholder=None,
                          optimizer='adam',
                          loss=ctrl_loss,
                          trainable_vars=ctrl_vars,
                          batch_size=512,
                          name='target_ctrl',
                          op_name='ctrl_reg')

#model_value_example=tfl.DNN(value_example)
示例#37
0
文件: tf.py 项目: PythonOrR/CISDI_
    total=0
    for i in range(len(a)):
        if label_data[argsort(-a[i])[0]]==1:
            sum_1+=1
            total+=1
        else:
            total+=1
    return sum_1/total
num_mat,categories_mat,label_data,combine_mat=np.process_data()
x_train,x_test,y_train,y_test=train_test_split(combine_mat,label_data,test_size=0.2,random_state=42)
tf.app.flags.DEFINE_integer('epochs',10,'Training epochs')
FLAGS=tf.app.flags.FLAGS
n_features = combine_mat.shape[1]
input = tflearn.input_data ([None, n_features])
network = tflearn.layers.fully_connected (input, 2000, activation='relu')
network = tflearn.dropout(network, 0.5)
network = tflearn.layers.fully_connected (network, 2000, activation='relu')
net = tflearn.dropout(network, 0.5)
y_pred = tflearn.layers.fully_connected (network, 4, activation='softmax')
net = tflearn.regression (y_pred,optimizer='adam',loss='categorical_crossentropy')
model = tflearn.DNN (net)
model.fit (x_train, y_train, validation_set=0.1, n_epoch=FLAGS.epochs)
#metric = model.evaluate (x_test, y_test)
prdict_y=model.predict(x_test)
#test
a=array(prdict_y)
print calculate_accuracy()



示例#38
0
    lr = 0.0001

    net = tflearn.input_data(placeholder=x)
    net = tflearn.embedding(net, input_dim=21, output_dim=32, weights_init='xavier')
    net = tflearn.fully_connected(net, 100, activation='prelu')
    net = tflearn.layers.normalization.batch_normalization(net)
    net = tflearn.dropout(net, 0.1)
    net = tflearn.fully_connected(net, 1, activation='sigmoid')

    loss = tf.reduce_mean(tf.square(net - y_))
    accuracy = tf.contrib.metrics.streaming_root_mean_squared_error(net, y_)
    optimizer = tf.train.RMSPropOptimizer(lr)

    # trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer, metric=accuracy[0], batch_size=batch_size)

    net = tflearn.regression(net, optimizer=optimizer, loss=loss, metric=None)
    model = tflearn.DNN(net, tensorboard_verbose=3)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        tflearn.is_training(True, session=sess)

        for step in range(epochs):
            total_batch = int(xTr.shape[0] / batch_size)

            for i in range(total_batch):
                batch_x, batch_y = mhcPreds.utils.get_batch(xTr, yTr, batch_size)
                # batch_y = numpy.reshape(batch_y, (batch_x.shape[0], 1))

                sess.run(train_op, feed_dict={x: batch_x, y_: batch_y})
示例#39
0
dropout1 = tflearn.dropout(dense1, 0.8)

dense2 = tflearn.fully_connected(dropout1, 500, activation='relu')
dropout2 = tflearn.dropout(dense2, 0.8)

#dense3 = tflearn.fully_connected(dense2, 100, activation='sigmoid',regularizer='L2', weight_decay=0.001)
#dropout3 = tflearn.dropout(dense3, 0.8)

softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')

# Regression using SGD with learning rate decay and Top-3 accuracy
sgd = tflearn.SGD(learning_rate=0.001, lr_decay=0.96, decay_step=1000)
top_k = tflearn.metrics.Top_k(3)

net = tflearn.regression(softmax,
                         optimizer=sgd,
                         metric=top_k,
                         loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(train_x_data,
          train_y_data,
          n_epoch=30,
          validation_set=(test_x_data, test_y_data),
          snapshot_step=500,
          show_metric=True,
          run_id="dense model")
#model.save('speech_recognition_numbers.tflearn')
##print(val_val)
##va=np.argmax(model.predict(val))
##print(va)
示例#40
0
import tflearn
import tflearn.datasets.mnist as mnist

# Loading data and labels for training and testing and reshaping data
x, y, testx, testy = mnist.load_data(one_hot=True)
x = x.reshape([-1, 28, 28, 1])

# Input Layer
net = tflearn.input_data(shape=[28, 28, 1])

# Deep Layers
net = tflearn.fully_connected(net, 500, activation='relu')
net = tflearn.fully_connected(net, 500, activation='relu')
net = tflearn.fully_connected(net, 500, activation='relu')

# output layer ( N_Nodes = N_Classes)
net = tflearn.fully_connected(net, 10, activation='softmax')

# define regression for updating weights and stuff
net = tflearn.regression(net, n_classes=10, batch_size=100)

# Define and fit model
model = tflearn.DNN(net)
model.fit(x, y, n_epoch=2, show_metric=True)
示例#41
0
def main():

    data, labels = load_csv(argv[1])

    data = np.array(data)
    data = data.astype(float)
    data_test = data
    data -= np.mean(data)
    data /= np.max(data)
    labels = np.array(labels)
    labels = labels.astype(float)
    label_test = labels

    net = tflearn.input_data(shape=[None, 13])
    net = tflearn.fully_connected(net, 32)
    net = tflearn.fully_connected(net, 32)

    net = tflearn.fully_connected(net, 1, activation='relu')
    adam = Adam(learning_rate=0.001, beta1=0.99)
    net = tflearn.regression(net,
                             optimizer=adam,
                             loss='mean_square',
                             metric=R2())

    # Define model
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.load(argv[2] + ".tflearn")

    prediction = model.predict(data_test)

    df = pd.read_csv('realTime_data.csv', usecols=['Resource_List.walltime'])
    userPredicted = np.array(df.values)
    userPredicted = userPredicted.astype(float)

    label_test = np.reshape(label_test, (-1, 1))
    difference_algo = np.subtract(prediction, label_test)
    difference_user = np.subtract(userPredicted, label_test)

    acc = 0

    for x in np.nditer(difference_algo):
        if (x >= 0):
            acc += 1

    print("Algorithm Accuracy is " +
          "{:.9f}".format(float(acc / float(difference_algo.size))))
    print("Algorithm STD is " +
          "{:.9f}".format(float(np.std(difference_algo) / 3600.0)))

    acc = 0
    for x in np.nditer(difference_user):
        if (x >= 0):
            acc += 1

    print("User Accuracy is " +
          "{:.9f}".format(float(acc / float(difference_user.size))))
    print("Standard Deviation is " +
          "{:.9f}".format(float(np.std(difference_user) / 3600.0)))

    difference_algo /= 3600.0
    difference_user /= 3600.0

    for diff_user, diff_algo, actualRun in zip(difference_user,
                                               difference_algo, label_test):
        actualRun = actualRun / 3600.0
        handleRange(actualRun, diff_user, diff_algo)

    myFig = plt.figure()
    ax = myFig.add_subplot(111)

    width = 0.75
    ind = np.arange(0, 24, 4)
    margin = 0.3

    rects1 = ax.bar(ind,
                    np.array([
                        ZeroToTwo['Positive User Prediction'],
                        TwoToFour['Positive User Prediction'],
                        FourToSix['Positive User Prediction'],
                        SixToEight['Positive User Prediction'],
                        EightToTen['Positive User Prediction'],
                        MoreThanTen['Positive User Prediction']
                    ]),
                    width=width,
                    color='r',
                    align='center')
    rects2 = ax.bar(ind + width + margin,
                    np.array([
                        ZeroToTwo['Positive Algorithm Prediction'],
                        TwoToFour['Positive Algorithm Prediction'],
                        FourToSix['Positive Algorithm Prediction'],
                        SixToEight['Positive Algorithm Prediction'],
                        EightToTen['Positive Algorithm Prediction'],
                        MoreThanTen['Positive Algorithm Prediction']
                    ]),
                    width=width,
                    color='b',
                    align='center')

    rects3 = ax.bar(ind + width * 2,
                    np.array([
                        ZeroToTwo['Negative User Prediction'],
                        TwoToFour['Negative User Prediction'],
                        FourToSix['Negative User Prediction'],
                        SixToEight['Negative User Prediction'],
                        EightToTen['Negative User Prediction'],
                        MoreThanTen['Negative User Prediction']
                    ]),
                    width=width,
                    color='g',
                    align='center')
    rects4 = ax.bar(ind + width * 3,
                    np.array([
                        ZeroToTwo['Negative Algorithm Prediction'],
                        TwoToFour['Negative Algorithm Prediction'],
                        FourToSix['Negative Algorithm Prediction'],
                        SixToEight['Negative Algorithm Prediction'],
                        EightToTen['Negative Algorithm Prediction'],
                        MoreThanTen['Negative Algorithm Prediction']
                    ]),
                    width=width,
                    color='y',
                    align='center')

    ax.set_ylabel('Frequency of occurrences')
    ax.set_xlabel('Time range')
    ax.set_xticks(ind + width)
    ax.set_title('Predictions vs Actual Running Time in terms of time range')
    ax.set_xticklabels(('0-2h', '2h-4h', '4h-6h', '6h-8h', '8h-10h', '>10h'))
    ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),
              ('Positive User Prediction', 'Positive Algorithm Prediction',
               'Negative User Prediction', 'Negative Algorithm Prediction'))

    # To label each column data (if necessary)

    #    def autolabel(rects, message):
    #        shift = 0
    #        if (message == 'right'):
    #            shift = 0.13
    #
    #        for rect in rects:
    #            h = rect.get_height()
    #            ax.text(rect.get_x() + rect.get_width() / 2. + shift, h + 5, '%d' % int(h),
    #                    ha=message, va='bottom', fontsize = 8.5)

    # autolabel(rects1, 'center')
    # autolabel(rects2, 'center')
    # autolabel(rects3, 'center')
    # autolabel(rects4, 'center')

    myFig.savefig('running time predictions.png')
    plt.show()
def build_and_run_model(soundList,
                        maxlen,
                        sound_idx,
                        distinct_sounds,
                        seq_len=50,
                        out_file='model.tflearn',
                        iters=20,
                        durations=False):
    """ Build and create sequences with different temperatures (measure of "innovation")"""
    tf.reset_default_graph()
    with tf.Graph().as_default():
        net = tflearn.input_data([None, maxlen, len(sound_idx)])
        net = tflearn.lstm(net, 512, return_seq=True)
        net = tflearn.dropout(net, 0.5)
        net = tflearn.lstm(net, 512, return_seq=True)
        net = tflearn.dropout(net, 0.5)
        net = tflearn.lstm(net, 512)
        net = tflearn.fully_connected(net,
                                      len(sound_idx),
                                      activation='softmax')
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 loss='categorical_crossentropy',
                                 learning_rate=0.001)
        model = tflearn.SequenceGenerator(net,
                                          dictionary=sound_idx,
                                          seq_maxlen=maxlen,
                                          clip_gradients=5.0,
                                          tensorboard_verbose=3)


#    X = np.zeros((len(primary_sounds), maxlen, len(distinct_sounds)),
#                             dtype=np.bool)
#    y = np.zeros((len(primary_sounds), len(distinct_sounds)), dtype=np.bool)
#    for i, chunk in enumerate(primary_sounds):
#
#        for t, sound in enumerate(chunk):
#            X[i, t, sound_idx[sound]] = 1
#        y[i, sound_idx[next_sounds[i]]] = 1
#    for i in range(0, len(primary_sounds)-batch, batch):
#            X = np.zeros((batch, maxlen, len(distinct_sounds)),

#                             dtype=np.bool)
#            y = np.zeros((batch, len(distinct_sounds)), dtype=np.bool)
#            for i, chunk in enumerate(primary_sounds[i:i+batch]):
#                for t, sound in enumerate(chunk):
#                    X[i, t, sound_idx[sound]] = 1
#                y[i, sound_idx[next_sounds[i]]] = 1
    load = False
    big_batch = 2 * 42870
    for i in range(0, len(soundList), big_batch):
        X, y = preprocess(soundList[i:i + big_batch], distinct_sounds, maxlen,
                          sound_idx)
        if not load:
            model.fit(X, y, validation_set=0.1, batch_size=128, n_epoch=1)
            #model.save(out_file)
        else:
            pass
            #model = model.load(out_file)
    print('Trained')
    temperatures_low = []
    temperatures_high = []
    temperatures_mid = []
    for i in range(iters):
        seed = select_random_seed(soundList, maxlen)
        temperatures_low.append(
            model.generate(seq_len, temperature=0.1, seq_seed=seed))
        temperatures_mid.append(
            model.generate(seq_len, temperature=0.5, seq_seed=seed))
        temperatures_high.append(
            model.generate(seq_len, temperature=1, seq_seed=seed))
        #        temperatures_high.append(model.generate(seq_len, temperature=1,
        #                                                seq_seed=seed))
        print(i)
    print('Sequences generated')
    return temperatures_high, temperatures_low, temperatures_mid
示例#43
0
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample, reuse=True)

# Define Loss
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
gen_loss = -tf.reduce_mean(tf.log(disc_fake))

# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope) and set
# 'placeholder=None' because we do not need to feed any target.
gen_vars = tflearn.get_layer_variables_by_scope('Generator')
gen_model = tflearn.regression(gen_sample,
                               placeholder=None,
                               optimizer='adam',
                               loss=gen_loss,
                               trainable_vars=gen_vars,
                               batch_size=64,
                               name='target_gen',
                               op_name='GEN')
disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')
disc_model = tflearn.regression(disc_real,
                                placeholder=None,
                                optimizer='adam',
                                loss=disc_loss,
                                trainable_vars=disc_vars,
                                batch_size=64,
                                name='target_disc',
                                op_name='DISC')
# Define GAN model, that output the generated images.
gan = tflearn.DNN(gen_model)
示例#44
0
import tflearn
import pandas as pd
from sklearn.preprocessing import OneHotEncoder

df = pd.read_csv("iris.csv")
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values

# convert y into one-hot encoded format
y = y.reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(y)
Y = enc.transform(y).toarray()

net = tflearn.input_data(shape=[None, 4])
net = tflearn.fully_connected(net, 64)
#net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 128)
net = tflearn.fully_connected(net, 3, activation='sigmoid')
net = tflearn.regression(net,
                         optimizer='sgd',
                         loss='categorical_crossentropy',
                         metric='accuracy')

model = tflearn.DNN(net)
model.fit(X, Y, show_metric=True, n_epoch=500)
示例#45
0
def createmodel():
    with open("intentschat.json") as file:
        data = json.load(file)

    try:
        with open("data.pickle", "rb") as f:
            words, labels, training, output = pickle.load(f)
    except:
        words = []
        labels = []
        docs_x = []
        docs_y = []

        for intent in data["intents"]:
            for pattern in intent["patterns"]:
                wrds = nltk.word_tokenize(pattern)
                words.extend(wrds)
                docs_x.append(wrds)
                docs_y.append(intent["tag"])

            if intent["tag"] not in labels:
                labels.append(intent["tag"])

        words = [stemmer.stem(w.lower()) for w in words if w != "?"]
        words = sorted(list(set(words)))

        labels = sorted(labels)

        training = []
        output = []

        out_empty = [0 for _ in range(len(labels))]

        for x, doc in enumerate(docs_x):
            bag = []

            wrds = [stemmer.stem(w.lower()) for w in doc]

            for w in words:
                if w in wrds:
                    bag.append(1)
                else:
                    bag.append(0)

            output_row = out_empty[:]
            output_row[labels.index(docs_y[x])] = 1

            training.append(bag)
            output.append(output_row)


        training = numpy.array(training)
        output = numpy.array(output)

        with open("data.pickle", "wb") as f:
            pickle.dump((words, labels, training, output), f)

    tensorflow.reset_default_graph()

    net = tflearn.input_data(shape=[None, len(training[0])])
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
    net = tflearn.regression(net)

    model = tflearn.DNN(net)

    # try:
    model.load("model.tflearn")
    # # except:
    # model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
    # model.save("moodel.tflearn")
    return model,words,labels,data
示例#46
0
random.shuffle(training)
training = np.array(training)


# create train and test lists
train_x = list(training[:, 0])
train_y = list(training[:, 1])

# reset underlying graph data
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape = [None, len(train_x[0])])
net = tflearn.fully_connected(net, 8) # Insert the first regression layer
net = tflearn.fully_connected(net, 8) # Insert the second regression layer
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax') # Use softmax as the activation layer we can use any layer here
net = tflearn.regression(net) # Regression layer's object is invoked

# Define model and setup tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training (apply gradient descent algorithm)
# Tune the Hyper parameters
model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
model.save('model.tflearn')

pickle.dump({
    'words': words,
    'classes': classes,
    'train_x': train_x,
    'train_y': train_y
}, open('training_data', 'wb'))
示例#47
0
        output.append(output_row)


    training = numpy.array(training)
    output = numpy.array(output)

    with open("data.pickle", "wb") as f:
        pickle.dump((words, labels, training, output), f)

tensorflow.reset_default_graph()

net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)

model = tflearn.DNN(net)

try:
    x #1. add anything here to re-run the DNN
    #model.load("model.tflearn") #1.comment this out to re-run the DNN
except:
    model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
    model.save("model.tflearn")


def bag_of_words(s, words):
    bag = [0 for _ in range(len(words))]

    s_words = nltk.word_tokenize(s)
示例#48
0
#train_X = min_max_scaler.fit_transform(train_X)
X = np.split(train_X, 2353, 0)
train_Y = dataset[1:2354, :]
#train_Y = train_Y + train_noise
Y = np.reshape(train_Y, [-1, 5])

# """"""""""""""""""""""""Network building""""""""""""""""""""""""
net = tflearn.input_data(shape=[None, 1, 5])
#net = tflearn.embedding(net, input_dim=2, output_dim=3)  for text prediction
net = tflearn.lstm(net, 256, activation='linear')
#net = tflearn.fully_connected(net, 128, activation='linear')
#net = tflearn.fully_connected(net, 256, activation='linear')
net = tflearn.fully_connected(net, 5, activation='linear')  # correct f(x)=x
#net = tflearn.dropout(net, 0.8)
net = tflearn.regression(net,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='mean_square')

# """""""""""""""""""""""Training"""""""""""""""""""""""
model = tflearn.DNN(net, clip_gradients=0.0, tensorboard_verbose=0)
model.fit(X, Y, show_metric=True, n_epoch=30, validation_set=0.2)
# show the predicted and actual value of training
predictY = model.predict(X)
x_axis = range(0, 2353, 1)
plt.plot(x_axis, predictY[:, 0], 'r-', x_axis, train_Y[:, 0], 'b')
plt.show()
plt.plot(x_axis, predictY[:, 1], 'r-', x_axis, train_Y[:, 1], 'b')
plt.show()
plt.plot(x_axis, predictY[:, 2], 'r-', x_axis, train_Y[:, 2], 'b')
plt.show()
plt.plot(x_axis, predictY[:, 3], 'r-', x_axis, train_Y[:, 3], 'b')
示例#49
0
#importing libs
import numpy as np
import tflearn

#loading the data set
import tflearn.datasets.mnist as mnist

#dataset loaded with onehotkey enabled
X, Y, testX, testY = mnist.load_data(one_hot=True)
#downsampling
X = np.reshape(X, (-1, 28, 28))
testX = np.reshape(testX, (-1, 28, 28))

#Defining a RNN model

net = tflearn.input_data(shape=[None, 28, 28])
net = tflearn.lstm(net, 128, return_seq=True)
net = tflearn.lstm(net, 128)
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         name="output1")
model = tflearn.DNN(net, tensorboard_verbose=2)
model.fit(X,
          Y,
          n_epoch=1,
          validation_set=0.1,
          show_metric=True,
          snapshot_step=100)
示例#50
0
def mcresponse(request):
    response={'status':"Failure",'respmsg':"",'respvoice':""}
    try:

        body_unicode = request.body.decode('utf-8')
        body_data = json.loads(body_unicode)

        msg=body_data['msg']
        msg=msg.replace('?','')
        if msg.lower()=="hi" or msg.lower()=="hello":
            voice=getTextToSpeech("Hello")
            response={'status':"Success",'respmsg':"Hello",'respvoice':voice}
        elif os.path.exists(os.getcwd()+os.path.sep+"intents.json")==False:
            print(os.getcwd()+" DOES NOT")
            voice = getTextToSpeech("Unable to understand you.Sorry.")
            response = {'status': "Success", 'respmsg': "Unable to understand you.Sorry.", 'respvoice': voice}
        elif os.path.exists(os.getcwd()+os.path.sep+"intents.json"):
            print("EXISTS")

            print("uuuuu")
            if os.path.exists(os.getcwd()+os.path.sep+"data.pickle")==False:
                dataPickle()
            print("ffff")
            with open("data.pickle", "rb") as f:
                words, labels, training, output = pickle.load(f)
            tensorflow.reset_default_graph()

            net = tflearn.input_data(shape=[None, len(training[0])])
            net = tflearn.fully_connected(net, 8)
            net = tflearn.fully_connected(net, 8)
            net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
            net = tflearn.regression(net)

            model = tflearn.DNN(net)

            model.load("model.tflearn")
            results = model.predict([bag_of_words(msg, words)])
            print(results)

            num = results[0][0] * 10
            num2=results[0][1]*10
            print(num2)
            if num > 1 and num2>1:
                voice = getTextToSpeech("Unable to understand you.Sorry.")
                response = {'status': "Success", 'respmsg': "Unable to understand you.Sorry.", 'respvoice': voice}
            else:


                results_index = numpy.argmax(results)
                print(results_index)
                tag = labels[results_index]

                with open(os.getcwd() + os.path.sep + "intents.json") as f:
                    data = json.load(f)

                    for tg in data['intents']:

                        if tg['tag'] == tag:
                            print(tg['tag'])
                            resp = tg['responses']
                            respf=''
                            print(respf)
                            if tg['context_set']!=None and tg['context_set'].strip()!='':

                                if tg['context_set']=='college_name':

                                    #EmpDetails.objects.get(id=body_data['id'])
                                    cname=uniqueWords(msg,words).lower()
                                    listOfColleges=list(CollegeDetails.objects.values())
                                    college=None
                                    try:
                                        college=CollegeDetails.objects.filter(name=cname).first()
                                        if college==None:
                                            college=CollegeDetails.objects.filter(shortForm=cname).first()
                                    except Exception as e:
                                        print(str(e))
                                    print(college)
                                    cnt1=0



                                    if college!=None:
                                        c_id=college.id
                                        print("Found----"+cname)
                                        respf = college.name + ' is located in ' + college.address
                                        courses=[]
                                        print(respf)
                                        courses=Courses.objects.filter(cid=c_id)
                                        if len(courses)>0:
                                            respf = respf + ' and offers courses like '
                                            cnt=0
                                            for k in courses:
                                                cnt = cnt + 1
                                                if cnt==((len(courses))):
                                                    respf=respf + ' & '+str(k.name)+'.'
                                                else:
                                                    respf = respf + str(k.name) +', '

                                        voice = getTextToSpeech(respf)
                                        response = {'status': "Success", 'respmsg': respf,
                                                    'respvoice': voice}
                                        break
                                    else:
                                        print("yyyy")
                                        voice = getTextToSpeech(
                                            "Requested college details not found in the database. Sorry.")
                                        respf = "Requested college details not found in the database. Sorry."
                                        response = {'status': "Success", 'respmsg': respf,
                                                    'respvoice': voice}
                                    break
                                elif tg['context_set']=='admissionCriteria':
                                    print('Admission Criteria')
                                    cname = uniqueWords(msg, words).lower()
                                    college = None
                                    try:
                                        college=CollegeDetails.objects.filter(name=cname).first()
                                        if college==None:
                                            college=CollegeDetails.objects.filter(shortForm=cname).first()
                                    except Exception as e:
                                        print(str(e))
                                    print(college)
                                    cnt1 = 0


                                    if college!=None:
                                        respf = 'The criteria to secure admission in '+college.name +' is '+' '+str(college.admitCriteria)+'% in 12th grade (HSC).'
                                        print(respf)
                                        voice = getTextToSpeech(respf)
                                        response = {'status': "Success", 'respmsg': respf,
                                                    'respvoice': voice}
                                        break
                                    else:
                                        voice = getTextToSpeech(
                                            "Requested college details not found in the database. Sorry.")
                                        respf = "Requested college details not found in the database. Sorry."
                                        response = {'status': "Success", 'respmsg': respf,
                                                    'respvoice': voice}

                                        break


                                elif tg['context_set']=='fees':
                                    print('FEES')
                                    cList=list(CollegeDetails.objects.values())

                                    feesRequired=None
                                    collegeId=0
                                    collegeName=''
                                    for it in cList:
                                        if is_part(msg.lower(),it['name'].lower())==True or is_part(msg.lower(),it['shortForm']):
                                            collegeName=it['name']
                                            collegeId=it['id']
                                            feesRequired=Fees.objects.filter(cid=collegeId).first()
                                            print(feesRequired)
                                            break
                                    if feesRequired!=None:
                                        if is_part(msg.lower(),'open')==True:
                                            respf = 'The fees for ' + collegeName + ' for ' + 'Open' + ' category is '+str(feesRequired.openCategory)+' Rupees.'
                                        elif is_part(msg.lower(),'obc')==True:
                                            respf = 'The fees for ' + collegeName + ' for ' + 'OBC' + ' category is ' + str(feesRequired.obc)+' Rupees.'
                                        elif is_part(msg.lower(),'sbc'):
                                            respf = 'The fees for ' + collegeName + ' for ' + 'SBC' + ' category is ' + str(feesRequired.sbc)+' Rupees.'
                                        elif is_part(msg.lower(),'sc'):
                                            respf = 'The fees for ' + collegeName + ' for ' +'SC' + ' category is ' + str(feesRequired.sc)+' Rupees.'
                                        elif is_part(msg.lower(),'st'):
                                            respf = 'The fees for ' + collegeName + ' for ' + 'ST' + ' category is ' + str(feesRequired.st)+' Rupees.'
                                        else:
                                            respf = 'The requested college or category is not available. Sorry'
                                    else:
                                        respf = 'The requested college, category or fee detail is not available. Sorry.'
                                    voice = getTextToSpeech(respf)
                                    response = {'status': "Success", 'respmsg': respf,
                                                'respvoice': voice}
                                    break



                                elif tg['context_set']=='type':
                                    print('TYPE')
                                    cname = uniqueWords2(msg, words).lower()
                                    list1 = None
                                    print(cname)
                                    try:

                                       list1=CollegeDetails.objects.filter(typeOfClg=cname)
                                    except Exception as e:
                                        print(str(e))
                                    print(list1)



                                    if list1!=None:
                                        respf='The '+cname+' based colleges in Savitribai Phule Pune University are as follows-'
                                        tmpcnt=0
                                        print(len(list1)!='0')
                                        if len(list1)!='0':
                                            for temp in list1:
                                                tmpcnt=tmpcnt+1

                                                respf=respf+' '+str(tmpcnt)+') '+temp.name+'. '
                                        else:
                                            respf='There are no colleges of requested type. Sorry.'
                                        print(respf)
                                        voice = getTextToSpeech(respf)
                                        response = {'status': "Success", 'respmsg': respf,
                                                    'respvoice': voice}
                                        break

                                    else:
                                        voice = getTextToSpeech(
                                            "Requested college details not found in the database. Sorry.")
                                        respf = "Requested college details not found in the database. Sorry."
                                        response = {'status': "Success", 'respmsg': respf,
                                                    'respvoice': voice}
                                        break
                                elif tg['context_set'] == 'seats':
                                    print("Seats")
                                    courseList=list(Courses.objects.values())
                                    courseName=''

                                    if courseList!=None:
                                        for temp1 in courseList:


                                            if is_part(msg,temp1['name'].lower())==True:
                                                print("Matched")
                                                courseName=temp1['name']
                                                print("Course Name is-"+courseName)
                                                break
                                        collegeName=uniqueWords3(msg,words,courseName)
                                        print("College Name is-"+collegeName)
                                        collegeId=None
                                        if CollegeDetails.objects.filter(name=collegeName).first()!=None or CollegeDetails.objects.filter(shortForm=collegeName).first()!=None:
                                            collegeId=CollegeDetails.objects.filter(shortForm=collegeName).first()
                                        if collegeId!=None:
                                            print("KKLLKK"+courseName)
                                            cobj=Courses.objects.filter(cid=collegeId.id,name=courseName).first()
                                            respf = 'The '+cobj.name+' course at '+collegeId.name+' has '+str(cobj.seats)+' seats available.'
                                        else:
                                            respf = 'There is no college or course of requested type. Sorry.'
                                    else:
                                        respf = 'There is no college or course of requested type. Sorry.'
                                    voice = getTextToSpeech(respf)

                                    response = {'status': "Success", 'respmsg': respf,
                                                'respvoice': voice}
                            else:
                                respf = random.choice(resp)
                            voice = getTextToSpeech(respf)
                            response = {'status': "Success", 'respmsg': respf,
                                        'respvoice': voice}

                            break
        else:
            voice = getTextToSpeech("Unable to understand you.Sorry.")
            response = {'status': "Success", 'respmsg': "Unable to understand you.Sorry.", 'respvoice': voice}
    except Exception as e:
        print(str(e))

    return JsonResponse(response,safe=False)
示例#51
0
def retrain():
    # Retrain additional confirmation
    if request.method == "POST":
        confirmation_value = request.form['retrain']
        if confirmation_value == "TRAIN/model":
            model_name = 'model.tflearn'
            existing_models_list = [
                model_name + '.data-00000-of-00001', model_name + '.index',
                model_name + '.meta'
            ]
            for item in existing_models_list:
                os.remove(item)

            with open("training_data.json") as file:
                data = json.load(file)

            words = []
            labels = []
            docs_x = []
            docs_y = []

            for intent in data["intents"]:
                for pattern in intent["patterns"]:
                    wrds = nltk.word_tokenize(pattern)
                    words.extend(wrds)
                    docs_x.append(wrds)
                    docs_y.append(intent["tag"])

                if intent["tag"] not in labels:
                    labels.append(intent["tag"])

            words = [stemmer.stem(w.lower()) for w in words if w != "?"]
            words = sorted(list(set(words)))

            labels = sorted(labels)

            training = []
            output = []

            out_empty = [0 for _ in range(len(labels))]

            for x, doc in enumerate(docs_x):
                bag = []

                wrds = [stemmer.stem(w.lower()) for w in doc]

                for w in words:
                    if w in wrds:
                        bag.append(1)
                    else:
                        bag.append(0)

                output_row = out_empty[:]
                output_row[labels.index(docs_y[x])] = 1

                training.append(bag)
                output.append(output_row)

            training = numpy.array(training)
            output = numpy.array(output)

            tensorflow.reset_default_graph()

            net = tflearn.input_data(shape=[None, len(training[0])])
            net = tflearn.fully_connected(net, 8)
            net = tflearn.fully_connected(net, 8)
            net = tflearn.fully_connected(net,
                                          len(output[0]),
                                          activation="softmax")
            net = tflearn.regression(net)

            model = tflearn.DNN(net)

            # ============== Saving model ==============
            model_name = "model.tflearn"

            def train_model():
                model.fit(training,
                          output,
                          n_epoch=1000,
                          batch_size=8,
                          show_metric=True)
                model.save(model_name)

            train_model()

            # ============== Loading model ==============
            def load_model():
                model.load(model_name)

            load_model()

            model.load(model_name)

            flash("Model has been retrained successfully!", "success")

        else:
            flash("Please ensure that you type 'TRAIN/model' correctly",
                  "danger")

    return redirect(url_for('table'))
示例#52
0
net = tflearn.activations.leaky_relu(net)
net = tflearn.dropout(net, 0.6)
net = tflearn.fully_connected(net,
                              512,
                              weights_init='xavier',
                              regularizer='L2')
net = tflearn.normalization.batch_normalization(net, gamma=1.2, beta=0.2)
net = tflearn.activations.leaky_relu(net)
net = tflearn.dropout(net, 0.7)
net = tflearn.fully_connected(net,
                              128,
                              weights_init='xavier',
                              regularizer='L2')
net = tflearn.normalization.batch_normalization(net, gamma=1.4, beta=0.4)
net = tflearn.activations.leaky_relu(net)
net = tflearn.dropout(net, 0.7)
net = tflearn.fully_connected(net, 3, weights_init='xavier', regularizer='L2')
net = tflearn.normalization.batch_normalization(net, gamma=1.3, beta=0.3)
net = tflearn.activations.softmax(net)
net = tflearn.regression(net,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy')
model = tflearn.DNN(net,
                    checkpoint_path=os.path.join(root, 'model.tfl.ckpt'),
                    max_checkpoints=3)  #model definition
#
ckpt = root
# model.load(ckpt) #loading checkpoints
#
# model.fit(datanp, truenp, batch_size = 8, show_metric=True)   #training with batch size of 8
示例#53
0
# Building Residual Network
net = tflearn.input_data(shape=[None, 28, 28, 1])
net = tflearn.conv_2d(net, 64, 3, activation='relu', bias=False)
net = tflearn.residual_bottleneck(net, 3, 16, 64)
net = tflearn.residual_bottleneck(net, 1, 32, 128, downsample=True)
net = tflearn.residual_bottleneck(net, 2, 32, 128)
net = tflearn.residual_bottleneck(net, 1, 64, 256, downsample=True)
net = tflearn.residual_bottleneck(net, 2, 64, 256)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
net = tflearn.fully_connected(net, 10, activation='softmax')

net = tflearn.regression(net,
                         optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.1)
# Training
model = tflearn.DNN(net,
                    checkpoint_path='model_resnet_mnist',
                    max_checkpoints=10,
                    tensorboard_verbose=0,
                    clip_gradients=0.)

model.fit(X,
          Y,
          n_epoch=100,
          validation_set=(testX, testY),
          show_metric=True,
          batch_size=256,
          shuffle=True,
示例#54
0
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)

# Building the encoder
encoder = tflearn.input_data(shape=[None, 784])
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)

# Building the decoder
decoder = tflearn.fully_connected(encoder, 256)
decoder = tflearn.fully_connected(decoder, 784)

# Regression, with mean square error
net = tflearn.regression(decoder,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='mean_square',
                         metric=None)

# Training the auto encoder
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X,
          X,
          n_epoch=10,
          validation_set=(testX, testX),
          run_id="auto_encoder",
          batch_size=256)

# Encoding X[0] for test
print("\nTest encoding of X[0]:")
encoding_model = tflearn.DNN(encoder)
示例#55
0
training = numpy.array(training)
output = numpy.array(output)

tensorflow.reset_default_graph()  # resetting underline graph data

net = tflearn.input_data(
    shape=[None, len(training[0])]
)  # building neural network (1)input layer to feeded the data to the neural network
net = tflearn.fully_connected(
    net, 8)  # these two layer are hidden layer both have 10 nodes and edges
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(
    net, len(output[0]), activation="softmax"
)  # output layer have no. of classes depect by len(output[0]), activation function is used softmax
net = tflearn.regression(
    net
)  # its an regression layer is used in tflearn for apply regression for provided input

model = tflearn.DNN(
    net
)  # dnn is an model rapper that can automatically performed a neural network classifer task such as prediction, training save

model.fit(training, output, n_epoch=1000, batch_size=8,
          show_metric=True)  # fit model with training data
model.save("model.tflearn")


def bag_of_words(s, words):
    bag = [0 for _ in range(len(words))]

    s_words = nltk.word_tokenize(s)
示例#56
0
Codice algoritmo in Python 2.7
from __future__ import division, print_function, absolute_import import tflearn
from tflearn.data_utils import to_categorical, pad_sequences from tflearn.datasets import imdb
# IMDB Dataset loading
train, test, _ = imdb.load_data(path=’imdb.pkl’, n_words=10000, valid_portion=0.1)
trainX, trainY = train testX, testY = test
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=100, value=0.) testX = pad_sequences(testX, maxlen=100, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, 100])
net = tflearn.embedding(net, input_dim=10000, output_dim=128) net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation=’softmax’)
net = tflearn.regression(net, optimizer=’adam’, learning_rate=0.001,
loss=’categorical_crossentropy’)
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32)
示例#57
0
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_crop([32, 32], padding=4)

# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
# net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
                         loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, checkpoint_path='model_resnet_cifar10',
                    max_checkpoints=10, tensorboard_verbose=2,
                    clip_gradients=0.)

model.fit(X, Y, n_epoch=200, validation_set=(X_test, Y_test),
          snapshot_epoch=False, snapshot_step=500,
          show_metric=True, batch_size=128, shuffle=True,
          run_id='resnet_cifar10')
示例#58
0
""" Linear Regression Example """

from __future__ import absolute_import, division, print_function

import tflearn

# Regression data
X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1]
Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]

# Linear Regression graph
input_ = tflearn.input_data(shape=[None])
linear = tflearn.single_unit(input_)
regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
                                metric='R2', learning_rate=0.01)
m = tflearn.DNN(regression)
m.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False)

print("\nRegression result:")
print("Y = " + str(m.get_weights(linear.W)) +
      "*X + " + str(m.get_weights(linear.b)))

print("\nTest prediction for x = 3.2 and y = 4.5:")
print(m.predict([3.2, 4.5]))
示例#59
0
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.datasets import imdb

train, test, _ = imdb.load_data(path="imdb.pkl",
                                n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test

trainX = pad_sequences(trainX, maxlen=100, value=0.)
testX = pad_sequences(testX, maxlen=100, value=0.)

trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

net = tflearn.input_data([None, 100])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation="softmax")
net = tflearn.regression(net,
                         optimizer="adam",
                         learning_rate=0.0001,
                         loss="categorical_crossentropy")

model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX,
          trainY,
          validation_set=(testX, testY),
          show_metric=True,
          batch_size=32)
示例#60
0
import nltk

from nltk.stem.lancaster import LancasterStemmer

import tensorflow
import tflearn
import json

stemmer = LancasterStemmer

with open('vocabs.json') as file:
    data = json.load(file)

words, labels, training, output = dump('data.joblib')
network = tflearn.input_data([None, len(training[0])])
network = tflearn.fully_connected(network, 8)
network = tflearn.fully_connected(network, 8)
network = tflearn.fully_connected(network,
                                  len(output[0]),
                                  activation='softmax')
network = tflearn.regression(network)

model = tflearn.DM(network)

model.load('model.tflearn')