示例#1
0
def build_nce_model(num_words, num_docs, doc_embedding_size=doc_embedding_size, word_embedding_size=word_embedding_size):
    X1 = tflearn.input_data(shape=[None, 1])
    X2 = tflearn.input_data(shape=[None, 3])
    
    Y = tf.placeholder(tf.float32, [None, 1])

    d1, = tflearn.embedding(X1, input_dim=num_docs, output_dim=doc_embedding_size)
    w1, w2, w3 = tflearn.embedding(X2, input_dim=num_words, output_dim=word_embedding_size)

    embedding_layer = tflearn.merge([d1, w1, w2, w3], mode='concat')

    num_classes = num_words
    dim = doc_embedding_size + 3*word_embedding_size
        
    with tf.variable_scope("NCELoss"):
        weights = tflearn.variables.variable('W', [num_classes, dim])
        biases  = tflearn.variables.variable('b', [num_classes])

        batch_loss = tf.nn.nce_loss(weights, biases, embedding_layer, Y, num_sampled=100, num_classes=num_classes)
        loss = tf.reduce_mean(batch_loss)

    optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
    
    trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer,
                          metric=None, batch_size=32)

    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0, checkpoint_path='embedding_model_nce')
    return trainer, X1, X2, Y
示例#2
0
    def test_dnn(self):

        with tf.Graph().as_default():
            X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1]
            Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]
            input = tflearn.input_data(shape=[None])
            linear = tflearn.single_unit(input)
            regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
                                            metric='R2', learning_rate=0.01)
            m = tflearn.DNN(regression)
            # Testing fit and predict
            m.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False)
            res = m.predict([3.2])[0]
            self.assertGreater(res, 1.3, "DNN test (linear regression) failed! with score: " + str(res) + " expected > 1.3")
            self.assertLess(res, 1.8, "DNN test (linear regression) failed! with score: " + str(res) + " expected < 1.8")

            # Testing save method
            m.save("test_dnn.tflearn")
            self.assertTrue(os.path.exists("test_dnn.tflearn"))

        with tf.Graph().as_default():
            input = tflearn.input_data(shape=[None])
            linear = tflearn.single_unit(input)
            regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
                                            metric='R2', learning_rate=0.01)
            m = tflearn.DNN(regression)

            # Testing load method
            m.load("test_dnn.tflearn")
            res = m.predict([3.2])[0]
            self.assertGreater(res, 1.3, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected > 1.3")
            self.assertLess(res, 1.8, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected < 1.8")
示例#3
0
    def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            # TODO: Fix test
            #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2)
def xor_operation():
    # Function to simulate XOR operation using graph combo of NAND and OR
    X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
    Y_nand = [[1.], [1.], [1.], [0.]]
    Y_or = [[0.], [1.], [1.], [1.]]

    with tf.Graph().as_default():
        graph = tflearn.input_data(shape=[None, 2])
        graph_nand = tflearn.fully_connected(graph, 32, activation='linear')
        graph_nand = tflearn.fully_connected(graph_nand, 32, activation='linear')
        graph_nand = tflearn.fully_connected(graph_nand, 1, activation='sigmoid')
        graph_nand = tflearn.regression(graph_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

        graph_or = tflearn.fully_connected(graph, 32, activation='linear')
        graph_or = tflearn.fully_connected(graph_or, 32, activation='linear')
        graph_or = tflearn.fully_connected(graph_or, 1, activation='sigmoid')
        graph_or = tflearn.regression(graph_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

        graph_xor = tflearn.merge([graph_nand, graph_or], mode='elemwise_mul')

        # Model training
        model = tflearn.DNN(graph_xor)

        model.fit(X, [Y_nand, Y_or], n_epoch=100, snapshot_epoch=False)
        prediction = model.predict([[0., 1.]])
        print("Prediction: ", prediction)
示例#5
0
 def generate_network(self):
     """ Return tflearn cnn network.
     """
     print(self.image_size, self.n_epoch, self.batch_size)
     if not isinstance(self.image_size, list) \
         or not isinstance(self.n_epoch, int) \
         or not isinstance(self.batch_size, int):
         raise ValueError("Insufficient values to generate network.\n"
                          "Need (n_epoch, int), (batch_size, int),"
                          "(image_size, list)")
     network = tflearn.input_data(
         shape=[None, self.image_size[0], self.image_size[1],
                self.IMAGE_CHANNEL_NUM],
         data_preprocessing=self.generate_image_preprocessing(),
         data_augmentation=self.generate_image_augumentation())
     dnn_network = DnnNetwork()
     if self.network_type == NetworkType.cnn.name:
         network = dnn_network.build_cnn_network(network)
     elif self.network_type == NetworkType.resnet.name:
         network = dnn_network.build_residual_network(network)
     elif self.network_type == NetworkType.alex.name:
         network = dnn_network.build_alex_network(network)
     elif self.network_type == NetworkType.vgg.name:
         network = dnn_network.build_vgg_network(network)
     elif self.network_type == NetworkType.net_in_net.name:
         network = dnn_network.build_network_in_network(network)
     elif self.network_type == NetworkType.lenet.name:
         network = dnn_network.build_le_network(network)
     else:
         raise NameError("invalid network_type: {}".format(self.network_type))
     return network
示例#6
0
def vgg16(placeholderX=None):

    x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
                           placeholder=placeholderX)

    x = tflearn.conv_2d(x, 64, 3, activation='relu', name='conv1_1')
    x = tflearn.conv_2d(x, 64, 3, activation='relu', name='conv1_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', name='conv2_1')
    x = tflearn.conv_2d(x, 128, 3, activation='relu', name='conv2_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', name='conv3_1')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', name='conv3_2')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', name='conv3_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv4_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv4_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv4_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='pool5')

    x = tflearn.conv_2d(x, 4096, 7, activation='relu', name='fc6')
    x = tflearn.dropout(x, 0.5)

    x = tflearn.conv_2d(x, 4096, 1, activation='relu', name='fc7')
    x = tflearn.dropout(x, 0.5)

    return x
示例#7
0
def do_rnn(x,y):
    global max_document_length
    print "RNN"
    trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
    y_test=testY

    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data([None, max_document_length])
    net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(trainX, trainY, validation_set=0.1, show_metric=True,
              batch_size=10,run_id="webshell",n_epoch=5)

    y_predict_list=model.predict(testX)
    y_predict=[]
    for i in y_predict_list:
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    do_metrics(y_test, y_predict)
示例#8
0
def use_tflearn():
    import tflearn

    # Data loading and preprocessing
    import tflearn.datasets.mnist as mnist
    X, Y, testX, testY = mnist.load_data(one_hot=True)

    # Building deep neural network
    input_layer = tflearn.input_data(shape=[None, 784])
    dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout1 = tflearn.dropout(dense1, 0.8)
    dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout2 = tflearn.dropout(dense2, 0.8)
    softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')

    # Regression using SGD with learning rate decay and Top-3 accuracy
    sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
    top_k = tflearn.metrics.Top_k(3)
    net = tflearn.regression(softmax, optimizer=sgd, metric=top_k,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(X, Y, n_epoch=20, validation_set=(testX, testY),
              show_metric=True, run_id="dense_model")
示例#9
0
def yn_net():
    net = tflearn.input_data(shape=[None, img_rows, img_cols, 1]) #D = 256, 256
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.1')
    net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool0') #D = 128, 128
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.1')
    net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool1') #D = 64,  64
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool2') #D = 32 by 32
    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.1')
    net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.2')
    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool3') #D = 16 by 16
    net = tflearn.dropout(net,0.75,name='dropout0')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.1')
#    net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.2')
#    net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool4') #D = 8 by 8
#    net = tflearn.dropout(net,0.75,name='dropout0')
    net = tflearn.fully_connected(net, n_units = 128, activation='relu', name='fc1')
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001)
    model = tflearn.DNN(net, tensorboard_verbose=1,tensorboard_dir='/tmp/tflearn_logs/')
    return model
示例#10
0
def run_combo_XOR():
    X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
    Y_nand = [[1.], [1.], [1.], [0.]]
    Y_or = [[0.], [1.], [1.], [1.]]

    g = tflearn.input_data(shape=[None, 2])

    # Nand graph
    g_nand = tflearn.fully_connected(g, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
    g_nand = tflearn.regression(
        g_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

    # Nand graph
    g_or = tflearn.fully_connected(g, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
    g_or = tflearn.regression(
        g_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

    g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

    m = train_model(g_xor, X, [Y_nand, Y_or])
    # sess = tf.Session()  # separate from DNN session
    sess = m.session  # separate from DNN session
    print(
        sess.run(tflearn.merge([Y_nand, Y_or], mode='elemwise_mul')))
示例#11
0
def model_for_type(neural_net_type, tile_size, on_band_count):
    """The neural_net_type can be: one_layer_relu,
                                   one_layer_relu_conv,
                                   two_layer_relu_conv."""
    network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])

    # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    if neural_net_type == "one_layer_relu":
        network = tflearn.fully_connected(network, 64, activation="relu")
    elif neural_net_type == "one_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
    elif neural_net_type == "two_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
        network = conv_2d(network, 128, 4, activation="relu")
    else:
        print("ERROR: exiting, unknown layer type for neural net")

    # classify as road or not road
    softmax = tflearn.fully_connected(network, 2, activation="softmax")

    # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    momentum = tflearn.optimizers.Momentum(learning_rate=0.005, momentum=0.9, lr_decay=0.0002, name="Momentum")

    net = tflearn.regression(softmax, optimizer=momentum, loss="categorical_crossentropy")

    return tflearn.DNN(net, tensorboard_verbose=0)
def build_network():
    network = tflearn.input_data(shape=[None, 2])
    network = tflearn.fully_connected(network, 64, activation='relu', regularizer='L2', weight_decay=0.001)
    network = tflearn.fully_connected(network, 1, activation='sigmoid')
    network = tflearn.regression(network, optimizer='sgd', learning_rate=0.3,
                           loss='mean_square')
    return network
示例#13
0
文件: 16-7.py 项目: DemonZeros/1book
def do_rnn(x_train,x_test,y_train,y_test):
    global n_words
    # Data preprocessing
    # Sequence padding
    print "GET n_words embedding %d" % n_words


    #x_train = pad_sequences(x_train, maxlen=100, value=0.)
    #x_test = pad_sequences(x_test, maxlen=100, value=0.)
    # Converting labels to binary vectors
    y_train = to_categorical(y_train, nb_classes=2)
    y_test = to_categorical(y_test, nb_classes=2)

    # Network building
    net = tflearn.input_data(shape=[None, 100,n_words])
    net = tflearn.lstm(net, 10,  return_seq=True)
    net = tflearn.lstm(net, 10, )
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.1,name="output",
                             loss='categorical_crossentropy')

    # Training

    model = tflearn.DNN(net, tensorboard_verbose=3)
    model.fit(x_train, y_train, validation_set=(x_test, y_test), show_metric=True,
             batch_size=32,run_id="maidou")
示例#14
0
    def __init__(self, s_date, n_frame):
        self.n_epoch = 20
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
        self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
            self.n_epoch = 10
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
示例#15
0
def do_rnn(trainX, testX, trainY, testY):
    max_document_length=64
    y_test=testY
    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data([None, max_document_length])
    net = tflearn.embedding(net, input_dim=10240000, output_dim=64)
    net = tflearn.lstm(net, 64, dropout=0.1)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0,tensorboard_dir="dga_log")
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
              batch_size=10,run_id="dga",n_epoch=1)

    y_predict_list = model.predict(testX)
    #print y_predict_list

    y_predict = []
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    print(classification_report(y_test, y_predict))
    print metrics.confusion_matrix(y_test, y_predict)
示例#16
0
文件: 16-3.py 项目: DemonZeros/1book
def do_rnn(trainX, testX, trainY, testY):
    global n_words
    # Data preprocessing
    # Sequence padding
    print "GET n_words embedding %d" % n_words


    trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data([None, MAX_DOCUMENT_LENGTH])
    net = tflearn.embedding(net, input_dim=n_words, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')

    # Training



    model = tflearn.DNN(net, tensorboard_verbose=3)
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
             batch_size=32,run_id="maidou")
def main():
    load_vectors("./vectors.bin")
    init_seq()
    xlist = []
    ylist = []
    test_X = None
    #for i in range(len(seq)-100):
    for i in range(1000):
        sequence = seq[i:i+20]
        xlist.append(sequence)
        ylist.append(seq[i+20])
        if test_X is None:
            test_X = np.array(sequence)
            (match_word, max_cos) = vector2word(seq[i+20])
            print "right answer=", match_word, max_cos

    X = np.array(xlist)
    Y = np.array(ylist)
    net = tflearn.input_data([None, 20, 200])
    net = tflearn.lstm(net, 200)
    net = tflearn.fully_connected(net, 200, activation='linear')
    net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1,
                                     loss='mean_square')
    model = tflearn.DNN(net)
    model.fit(X, Y, n_epoch=1000, batch_size=1,snapshot_epoch=False,show_metric=True)
    model.save("model")
    predict = model.predict([test_X])
    #print predict
    #for v in test_X:
    #    print vector2word(v)
    (match_word, max_cos) = vector2word(predict[0])
    print "predict=", match_word, max_cos
示例#18
0
    def test_sequencegenerator(self):

        with tf.Graph().as_default():
            text = "123456789101234567891012345678910123456789101234567891012345678910"
            maxlen = 5

            X, Y, char_idx = \
                tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3)

            g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
            g = tflearn.lstm(g, 32)
            g = tflearn.dropout(g, 0.5)
            g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
            g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                                   learning_rate=0.1)

            m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                          seq_maxlen=maxlen,
                                          clip_gradients=5.0)
            m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
            res = m.generate(10, temperature=1., seq_seed="12345")
            self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'")

            # Testing save method
            m.save("test_seqgen.tflearn")
            self.assertTrue(os.path.exists("test_seqgen.tflearn"))

            # Testing load method
            m.load("test_seqgen.tflearn")
            res = m.generate(10, temperature=1., seq_seed="12345")
            self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
    def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        cc_input_var = {}
        cc_embed_var = {}
        flat_vars = []
        if self.verbose:
            print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs))
        for cc, cc_size in self.categorical_columns.items():
            cc_input_var[cc] = tflearn.input_data(shape=[None, 1], name="%s_in" % cc,  dtype=tf.int32)
            # embedding layers only work on CPU!  No GPU implementation in tensorflow, yet!
            cc_embed_var[cc] = tflearn.layers.embedding_ops.embedding(cc_input_var[cc],    cc_size,  8, name="deep_%s_embed" % cc)
            if self.verbose:
                print ("    %s_embed = %s" % (cc, cc_embed_var[cc]))
            flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc))

        network = tf.concat(1, [wide_inputs] + flat_vars, name="deep_concat")
        for k in range(len(n_nodes)):
            network = tflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k+1))
            if use_dropout:
                network = tflearn.dropout(network, 0.5, name="deep_dropout%d" % (k+1))
        if self.verbose:
            print ("Deep model network before output %s" % network)
        network = tflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False)
        network = tf.reshape(network, [-1, 1])	# so that accuracy is binary_accuracy
        if self.verbose:
            print ("Deep model network %s" % network)
        return network
def generate_nnet(feats):
    """Generate a neural network.

    Parameters
    ----------
    feats : list with at least one feature vector

    Returns
    -------
    Neural network object
    """
    # Load it here to prevent crash of --help when it's not present
    import tflearn

    tflearn.init_graph(num_cores=2, gpu_memory_fraction=0.6)

    input_shape = (None,
                   feats[0].shape[0],
                   feats[0].shape[1],
                   feats[0].shape[2])
    logging.info("input shape: %s", input_shape)
    net = tflearn.input_data(shape=input_shape)
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.01,
                             loss='categorical_crossentropy', name='target')
    return tflearn.DNN(net)
示例#21
0
def run():
    # imagine cnn, the third dim is like the 'chnl'
    g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam',
                           loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='models/model_us_cities')

    for i in range(40):
        seed = random_sequence_from_textfile(path, maxlen)
        m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=1, run_id='us_cities')
        print("-- TESTING...")
        print("-- Test with temperature of 1.2 --")
        print(m.generate(30, temperature=1.2, seq_seed=seed))
        print("-- Test with temperature of 1.0 --")
        print(m.generate(30, temperature=1.0, seq_seed=seed))
        print("-- Test with temperature of 0.5 --")
        print(m.generate(30, temperature=0.5, seq_seed=seed))
示例#22
0
    def test_regression_placeholder(self):
        '''
        Check that regression does not duplicate placeholders
        '''

        with tf.Graph().as_default():

            g = tflearn.input_data(shape=[None, 2])
            g_nand = tflearn.fully_connected(g, 1, activation='linear')
            with tf.name_scope("Y"):
                Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")
            tflearn.regression(g_nand, optimizer='sgd',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression1",
                               name="Y")
            # for this test, just use the same default trainable_vars
            # in practice, this should be different for the two regressions
            tflearn.regression(g_nand, optimizer='adam',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression2",
                               name="Y")

            self.assertEqual(len(tf.get_collection(tf.GraphKeys.TARGETS)), 1)
示例#23
0
    def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = tflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = tflearn.fully_connected(g, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = tflearn.regression(g_nand, optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # Or operator definition
            g_or = tflearn.fully_connected(g, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = tflearn.regression(g_or, optimizer='sgd',
                                      learning_rate=2.,
                                      loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = tflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = tflearn.input_data(shape=[None, 2])
            net = tflearn.flatten(net)
            net = tflearn.reshape(net, new_shape=[-1])
            net = tflearn.activation(net, 'relu')
            net = tflearn.dropout(net, 0.5)
            net = tflearn.single_unit(net)
示例#24
0
文件: ddpg.py 项目: ataitler/DQN
    def create_critic_network(self):
        inputs = tflearn.input_data(shape=[None, self.s_dim])
        action = tflearn.input_data(shape=[None, self.a_dim])
        net = tflearn.fully_connected(inputs, 400, activation='relu')

        # Add the action tensor in the 2nd hidden layer
        # Use two temp layers to get the corresponding weights and biases
        t1 = tflearn.fully_connected(net, 300)
        t2 = tflearn.fully_connected(action, 300)

        net = tflearn.activation(tf.matmul(net,t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')

        # linear layer connected to 1 output representing Q(s,a) 
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        out = tflearn.fully_connected(net, 1, weights_init=w_init)
        return inputs, action, out
 def __init__(self):
     network = tflearn.input_data(shape=[None, 784], name="input")
     network = self.make_core_network(network)
     network = regression(network, optimizer='adam', learning_rate=0.01,
                          loss='categorical_crossentropy', name='target')
     
     model = tflearn.DNN(network, tensorboard_verbose=0)
     self.model = model
示例#26
0
文件: ddpg.py 项目: ataitler/DQN
 def create_actor_network(self): 
     inputs = tflearn.input_data(shape=[None, self.s_dim])
     net = tflearn.fully_connected(inputs, 400, activation='relu')
     net = tflearn.fully_connected(net, 300, activation='relu')
     # Final layer weights are init to Uniform[-3e-3, 3e-3]
     w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
     out = tflearn.fully_connected(net, self.a_dim, activation='tanh', weights_init=w_init)
     scaled_out = tf.mul(out, self.action_bound) # Scale output to -action_bound to action_bound
     return inputs, out, scaled_out 
示例#27
0
 def build_simple_model(self):
     """Build a simple model for test
     Returns:
         DNN, [ (input layer name, input placeholder, input data) ], Target data
     """
     inputPlaceholder1, inputPlaceholder2 = \
         tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2")
     input1 = tflearn.input_data(placeholder = inputPlaceholder1)
     input2 = tflearn.input_data(placeholder = inputPlaceholder2)
     network = tflearn.merge([ input1, input2 ], "sum")
     network = tflearn.reshape(network, (1, 1))
     network = tflearn.fully_connected(network, 1)
     network = tflearn.regression(network)
     return (
         tflearn.DNN(network),
         [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ],
         self.TARGET,
     )
示例#28
0
 def simple_learn(self):
     tflearn.init_graph()
     net=tflearn.input_data(shape=[None,64,64,3])
     net=tflearn.fully_connected(net,64)
     net=tflearn.dropout(net,.5)
     net=tflearn.fully_connected(net,10,activation='softmax')
     net=tflearn.regression(net,optimizer='adam',loss='softmax_categorical_crossentropy')
     model = tflearn.DNN(net)
     model.fit(self.trainset,self.trainlabels)
示例#29
0
def define_dnn_topology(input_num, first_layer, second_layer):
    tf.Graph().as_default()
    g = tflearn.input_data(shape=[None, input_num])
    g = tflearn.fully_connected(g, first_layer, activation='linear')
    g = tflearn.fully_connected(g, second_layer, activation='linear')
    g = tflearn.fully_connected(g, 1, activation='sigmoid')
    g = tflearn.regression(g, optimizer='sgd', learning_rate=2., loss='mean_square')
    tf.Graph().finalize() 
    return g 
示例#30
0
def generate_net(embedding):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=300000, output_dim=128)
    net = tflearn.lstm(net, 128)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy')
    return net
y_train = enc.fit_transform(y_train.reshape(-1, 1)).toarray()
y_test = enc.fit_transform(y_test.reshape(-1, 1)).toarray()

n_inputs = 6  #[성별코드, 연령대코드, 신장, 체중, 허리둘레, 흡연상태]
n_hidden1 = 18
n_hidden2 = 20
n_hidden3 = 24
n_hidden4 = 9
n_outputs = 2  #마지막에 결과는 당뇨병 이다 / 아니다 이기때문에 -> 2개

n_epochs = 50
batch_size = 128

# 망

inputs = tflearn.input_data(shape=[None, n_inputs])
hidden1 = tflearn.fully_connected(inputs,
                                  n_hidden1,
                                  activation='relu',
                                  name='hidden1')
hidden2 = tflearn.fully_connected(hidden1,
                                  n_hidden2,
                                  activation='relu',
                                  name='hidden2')
hidden3 = tflearn.fully_connected(hidden2,
                                  n_hidden3,
                                  activation='relu',
                                  name='hidden3')
hidden4 = tflearn.fully_connected(hidden3,
                                  n_hidden4,
                                  activation='relu',
示例#32
0
def train_nn_tflearn(data_handler, num_epochs=50):

    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    #tflearn.init_graph(gpu_memory_fraction=0.5)

    batch_size = data_handler.mini_batch_size
    classes = data_handler.num_labels

    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25)
    #img_aug.add_random_crop([32,32], padding=4)

    x = tflearn.input_data(shape=[None, 128, 128, 1],
                           dtype='float',
                           data_preprocessing=img_prep,
                           data_augmentation=img_aug)
    # x = tf.placeholder('float', [None, 32, 32, 3])
    #y = tf.placeholder('float', [None, 10])

    # test_data, test_labels = data_handler.get_test_data()
    # test_data = test_data.reshape([-1,32,32,3])

    ntrain = data_handler.train_size
    ntest = data_handler.meta['num_cases_per_batch']

    # from tflearn.datasets import cifar10
    # (X, Y), (X_test, Y_test) = cifar10.load_data(dirname="/home/hamza/meh/bk_fedora24/Documents/tflearn_example/cifar-10-batches-py")
    # X, Y = tflearn.data_utils.shuffle(X, Y)
    # Y = tflearn.data_utils.to_categorical(Y, 10)
    # Y_test = tflearn.data_utils.to_categorical(Y_test, 10)

    X, Y = data_handler.get_all_train_data()

    X, Y = tflearn.data_utils.shuffle(X, Y)

    #X = np.dstack((X[:, :128*128], X[:, 128*128:]))
    X = X[:, :128 * 128]

    #X = X/255.0

    #X = X.reshape([-1,128,128,2])
    X = X.reshape([-1, 128, 128, 1])

    Y = tflearn.data_utils.to_categorical(Y, classes)

    X_test, Y_test = data_handler.get_test_data()

    #X_test = np.dstack((X_test[:, :128*128], X_test[:, 128*128:]))
    X_test = X_test[:, :128 * 128]
    #X_test = X_test/255.0

    #X_test = X_test.reshape([-1,128,128,2])
    X_test = X_test.reshape([-1, 128, 128, 1])
    #network = tflearn.regression(net3(x),optimizer='adam',loss='categorical_crossentropy',learning_rate=0.001)
    #mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    #network = tflearn.regression(resnet1(x),optimizer='sgd',loss='categorical_crossentropy')
    network = tflearn.regression(resnet1(x, classes),
                                 optimizer='adam',
                                 loss='categorical_crossentropy')
    print np.shape(X)
    print np.shape(Y)
    print network

    if not os.path.exists('/tmp/tflearn/checkpoints'):
        os.makedirs('/tmp/tflearn/checkpoints')

    model = tflearn.DNN(network,
                        tensorboard_verbose=3,
                        checkpoint_path='/tmp/tflearn/checkpoints/',
                        best_checkpoint_path='best/',
                        best_val_accuracy=0.90)
    model.fit(X,
              Y,
              n_epoch=num_epochs,
              shuffle=True,
              validation_set=(X_test, Y_test),
              show_metric=True,
              batch_size=data_handler.mini_batch_size,
              run_id='mstar_cnn')
X, Y = next(batch)

# train, test, _ = ,X
trainX, trainY = X, Y
testX, testY = X, Y #overfit for now

# Data preprocessing
# Sequence padding
# trainX = pad_sequences(trainX, maxlen=100, value=0.)
# testX = pad_sequences(testX, maxlen=100, value=0.)
# # Converting labels to binary vectors
# trainY = to_categorical(trainY, nb_classes=2)
# testY = to_categorical(testY, nb_classes=2)

# Network building
net = tflearn.input_data([None, width, height])
# net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, classes, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.load("tflearn.lstm.model")
while 1: #training_iters
  model.fit(trainX, trainY, n_epoch=100, validation_set=(testX, testY), show_metric=True,
          batch_size=batch_size)
  _y=model.predict(X)
model.save("tflearn.lstm.model")
print (_y)
print (y)
示例#34
0
allNames = [
    'Ground', 'Stair', 'Treetop', 'Block', 'Bar', 'Koopa', 'Koopa 2',
    'PipeBody', 'Pipe', 'Question', 'Coin', 'Goomba', 'CannonBody', 'Cannon',
    'Lakitu', 'Bridge', 'Hard Shell', 'SmallCannon', 'Plant', 'Waves', 'Hill',
    'Castle', 'Snow Tree 2', 'Cloud 2', 'Cloud', 'Bush', 'Tree 2', 'Bush 2',
    'Tree', 'Snow Tree', 'Fence', 'Bark', 'Flag', 'Mario'
]
actions = [
    'Ground', 'Stair', 'Treetop', 'Block', 'Bar', 'Koopa', 'Koopa 2',
    'PipeBody', 'Pipe', 'Question', 'Coin', 'Goomba', 'CannonBody', 'Cannon',
    'Lakitu', 'Bridge', 'Hard Shell', 'SmallCannon', 'Plant', 'Waves', 'Hill',
    'Castle', 'Snow Tree 2', 'Cloud 2', 'Cloud', 'Bush', 'Tree 2', 'Bush 2',
    'Tree', 'Snow Tree', 'Fence', 'Bark', 'Nothing'
]

networkInput = tflearn.input_data(shape=[None, 40, 15, len(allNames)])
conv = conv_2d(networkInput, 8, 4, activation='leaky_relu')
conv2 = conv_2d(conv, 16, 3, activation='leaky_relu')
conv3 = conv_2d(conv2, 32, 3, activation='leaky_relu')
fc = tflearn.fully_connected(conv3,
                             40 * 15 * len(actions),
                             activation='leaky_relu')
mapShape = tf.reshape(fc, [-1, 40, 15, len(actions)])
network = tflearn.regression(mapShape,
                             optimizer='adam',
                             metric='accuracy',
                             loss='mean_square',
                             learning_rate=0.004)
model = tflearn.DNN(network)
#model.load("testFull.tflearn")#smbANDtestFull
print("MODEL LOADED")
示例#35
0
import numpy as np
import tflearn
import csv

# Load CSV file, indicate that the first column represents labels
from tflearn.data_utils import load_csv
data, labelsA = load_csv('clus_tj_training_3.2_2.5.csv', target_column=0,
                       categorical_labels=True, n_classes=2)
input, labelsB = load_csv('clus_tj_test_3.2_2.5.csv', target_column=0,
                        categorical_labels=True, n_classes=2)

# Build neural network
#Data has 5 features
net = tflearn.input_data(shape=[None, 5]) 
net = tflearn.fully_connected(net, 32)
dropout1 = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(dropout1, 2, activation='softmax', bias=False, weights_init='truncated_normal')

net = tflearn.regression(net)

# Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(data, labelsA, n_epoch=10, batch_size=1, show_metric=True, validation_set=0.1)

# Test Data
predict = model.predict(input)

with open('/Users/Anne-Katherine/Desktop/ATLAS_ML/clus_tj_out_25_32.csv','wb') as f:
	csv_writer = csv.writer(f, delimiter=',')
	for x in predict:
示例#36
0
# reading the crime data for getting the info about a crime
with open('crimes_info.json') as file:
    crime_data = json.load(file)

# reding the help file
with open('help_data.txt','r') as f:
    help_data = f.read()

# importing the processed data
with open("saved_data.pickle", "rb") as f:
    words, labels, training_questions, training_tag = pickle.load(f)

# creating a neural Network
tensorflow.reset_default_graph()

network = tflearn.input_data(shape=[None, len(words)])
network = tflearn.fully_connected(network, len(labels) + 6)
network = tflearn.fully_connected(network, len(labels) + 6)
network = tflearn.fully_connected(network, len(labels), activation="softmax")
network = tflearn.regression(network)

nn_model = tflearn.DNN(network)

# loading the pre_trained model
nn_model.load("nn_model.tflearn")

# creating stem object
stemmer = LancasterStemmer()

# converting the query into a word vector
def query_to_word_vector(query, words):
    with tf.Graph().as_default():
        y = Y[d].astype(np.float32)
        y = y.reshape(-1, 1)
        y = to_categorical(y, nb_classes=2)  # Convert label to categorical to train with tflearn

        # Train and test data
        X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.1, random_state=0)

        # Standardize the data
        sc = StandardScaler()
        sc.fit(X_train)
        X_train_sd = sc.transform(X_train)
        X_test_sd = sc.transform(X_test)

        # Model
        input_layer = tflearn.input_data(shape=[None, 100], name='input')
        dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
        dropout1 = tflearn.dropout(dense1, 0.8)
        dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
        dropout2 = tflearn.dropout(dense2, 0.8)
        output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
        regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy', learning_rate=.001)

        # Define model with checkpoint (autosave)
        model = tflearn.DNN(regression, tensorboard_verbose=3)

        # load the previously trained model
        model.load('Saved_Models/Fully_Connected/dense_fully_connected_dropout_5645_{}.tfl'.format(d))

        ''''# Train model with checkpoint every epoch and every 500 steps
        model.fit(X_train_sd, Y_train, n_epoch=n_epoch, show_metric=True, snapshot_epoch=True, snapshot_step=500,
import tensorflow as tf
import librosa
import tflearn
import os
import numpy as np

width = 120  # mfcc features
height = 20

script_patch = os.path.dirname(os.path.abspath(__file__))
net = tflearn.input_data([None, height, width])
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net,
                         optimizer='adam',
                         learning_rate=500,
                         loss='categorical_crossentropy')

model = tflearn.DNN(net)
model.load("tflearn.lstm.model")


def mfcc_generator(wave_path, PAD_WIDTH=width):
    wave, sr = librosa.load(wave_path, mono=True)
    mfccs = librosa.feature.mfcc(y=wave, sr=sr, n_mfcc=height)
    mfccs = np.pad(mfccs, ((0, 0), (0, PAD_WIDTH - len(mfccs[0]))),
                   mode='constant')
    return mfccs


sounds = []
示例#39
0
def chat(inp):
    f = open("language.txt", "r")
    training_model = False
    language=f.read()
    f.close()
    if(language == "Arabic"):
       arabic_mode = True
    else:
        arabic_mode = False    
    if(arabic_mode):
        with open("intents-arabic.json",  encoding='utf8') as file:
            data = json.load(file)
    else:
        with open("intents.json",  encoding='utf8') as file:
            data = json.load(file)        
        

    if(not training_model): 
        #make error here if you want to update
        if(arabic_mode):
            with open("data-arabic.pickle","rb") as f:
                words, labels, training,output = pickle.load(f)
        else:
            with open("data.pickle","rb") as f:
                words, labels, training,output = pickle.load(f)        
            

    else:
        words = []
        labels = []
        docs_x = []
        docs_y = []

        for intent in data["intents"]:
            for pattern in intent["patterns"]:
                wrds = nltk.word_tokenize(pattern)
                words.extend(wrds)
                docs_x.append(wrds)
                docs_y.append(intent["tag"])
            if intent["tag"] not in labels:
                labels.append(intent["tag"])

        # Part 2        

        words = [stemmer.stem(w.lower()) for w in words if w != "?"] #stemmer and lowercase
        words = sorted(list(set(words))) #remove duplicates and list to return it as list and sort words

        labels = sorted(labels)

        training = []
        output = []

        out_empty = [0 for _ in range(len(labels))]

        for x, doc in enumerate(docs_x):
            bag = []
            wrds = [stemmer.stem(w.lower()) for w in doc] #stemmer and lowercase
            for w in words:
                if w in wrds:
                    bag.append(1)
                else:
                    bag.append(0)

            output_row = out_empty[:]
            output_row[labels.index(docs_y[x])] = 1

            training.append(bag)
            output.append(output_row)

        training =  np.array(training)
        output = np.array(output)

        # Part 3


        if(arabic_mode):
            with open("data-arabic.pickle","wb") as f:
                pickle.dump((words, labels, training,output),f)
        else:
            with open("data.pickle","wb") as f:
                pickle.dump((words, labels, training,output),f)        
        

    tf.reset_default_graph()

    net = tflearn.input_data(shape=[None, len(training[0])])

    net = tflearn.fully_connected(net, 320) #320 neurons
    net = tflearn.fully_connected(net, 160) #160 neurons
    net = tflearn.fully_connected(net, 80) #80 neurons
    net = tflearn.fully_connected(net, len(output[0]), activation="softmax") 
    net = tflearn.regression(net)

    model = tflearn.DNN(net)
    
    if(not training_model):
        if(arabic_mode):
            model.load("model-arabic.tflearn")
        else:    
            model.load("model.tflearn")
    else:    
        model.fit(training, output, n_epoch=2000, batch_size=32, show_metric=True)
        if(arabic_mode):
            model.save("model-arabic.tflearn")
        else:    
            model.save("model.tflearn")
    
    while True:
        if inp.lower() == "quit":
            break

        results = model.predict([bag_of_words(inp,words)])[0]
        results_index = np.argmax(results) #Get Index of Max Prediction
        tag = labels[results_index]
        if results[results_index] > 0.7:  
            for tg in data["intents"]:
                if tg["tag"] == tag:
                    responses = tg["responses"]
            response = random.choice(responses)
            return 0, response
        else:  
            if(arabic_mode):  
                response = "انا لا افهم"
            else:
                response = "Sorry, I don't understand ! "    
            return 0, response
示例#40
0
    def __init__(self, observation_space, action_space, scope=None,
                 learning_rate=1e-04,
                 hidden_sizes=[],
                 act_fn=tf.nn.relu,
                 filter_obs=False,
                 seed=None,
                 entropy_coeff=None,
                 weight_decay=0.0):
        """
        observation_space:
        n_logits:
        scope:
        learning_rate:
        hidden_sizes:
        train_type:
        act_fn:
        filter_obs:
        seed:
        entropy_coeff:
        """
        obs_dim = _get_space_size(observation_space)
        n_logits = _get_space_size(action_space)
        self.graph = tf.Graph()
        scope = '' if scope is None else scope
        self.entropy_coeff = entropy_coeff
        self.learning_rate = learning_rate
        if seed is not None:
            np.random.seed(seed)

        with self.graph.as_default():

            with tf.variable_scope(scope):

                self.params = []
                self.obs_input = tflearn.input_data(shape=[None, obs_dim],
                                                    name='obs_input')
                self.obs_dim = tuple([-1, obs_dim])

                if filter_obs:
                    with tf.variable_scope('obfilter'):

                        self.rms_sum = tf.get_variable(
                            dtype=tf.float64,
                            shape=obs_dim,
                            initializer=tf.constant_initializer(1.0),
                            name='runningsum', trainable=False)
                        self.rms_sumsq = tf.get_variable(
                            dtype=tf.float64,
                            shape=obs_dim,
                            initializer=tf.constant_initializer(1.0),
                            name='runningsumsq', trainable=False)
                        self.rms_count = tf.get_variable(
                            dtype=tf.float64,
                            shape=(),
                            initializer=tf.constant_initializer(1.0),
                            name='count', trainable=False)
                        mean = tf.to_float(self.rms_sum / self.rms_count)
                        var = tf.to_float(self.rms_sumsq / self.rms_count)
                        var = var - tf.square(mean)
                        var = tf.maximum(var, 1e-2)
                        std = tf.sqrt(var)
                        self.params.extend([self.rms_sum, self.rms_sumsq,
                                            self.rms_count])

                    prev = tf.clip_by_value((self.obs_input - mean) / std,
                                            -5.0, 5.0)
                else:
                    prev = self.obs_input

                init = tflearn.initializations.truncated_normal(seed=seed)
                for idx, size in enumerate(hidden_sizes):
                    prev = tflearn.fully_connected(prev, size,
                                                   name='hidden_layer%d' % idx,
                                                   activation=act_fn,
                                                   weights_init=init,
                                                   weight_decay=weight_decay)
                    self.params.extend([prev.W, prev.b])

                self.logits = tflearn.fully_connected(prev, n_logits,
                                                      name='logits',
                                                      weights_init=init,
                                                      weight_decay=weight_decay)
                self.params.extend([self.logits.W, self.logits.b])
    86(11):2278-2324, November 1998.
Links:
    [MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import

import numpy as np
import matplotlib.pyplot as plt
import tflearn

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)

# Building the encoder
encoder = tflearn.input_data(shape=[None, 784])
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)

# Building the decoder
decoder = tflearn.fully_connected(encoder, 256)
decoder = tflearn.fully_connected(decoder, 784, activation='sigmoid')

# Regression, with mean square error
net = tflearn.regression(decoder, optimizer='adam', learning_rate=0.001,
                         loss='mean_square', metric=None)

# Training the auto encoder
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, X, n_epoch=20, validation_set=(testX, testX),
          run_id="auto_encoder", batch_size=1)
validation_portion = 0.1  # will use later

# Split our data into proportional chunks
trainX, trainY = data[:training_size], labels[training_size:]
testX, testY = data[:training_size], labels[training_size:]

# convert to one-hot?
trainY = to_categorical(trainY, nb_classes=11)
testY = to_categorical(testY, nb_classes=11)

# The Network

# as many inputs as there are columns... I don't know how to pick this..
number_of_inputs = len(trainX[0])

net = tflearn.input_data([None, number_of_inputs])
net = tflearn.embedding(net, input_dim=number_of_inputs, output_dim=121)
net = tflearn.lstm(net, 121, dropout=0.8)
net = tflearn.fully_connected(net, 11, activation='softmax')
net = tflearn.regression(net,
                         optimizer='adam',
                         learning_rate=0.01,
                         loss='categorical_crossentropy')

model = tflearn.DNN(net)
model.fit(trainX,
          trainY,
          validation_set=(testX, testY),
          show_metric=True,
          batch_size=32)
示例#43
0
        Aux.append([])
        Aux[i] = X[i][0]

    X = Aux
    Aux = []
    for i in range(len(Xtest)):
        Aux.append([])
        Aux[i] = Xtest[i][0]

    Xtest = Aux

except:
    print("Base corrompida ou inexistente, verifique")
    exit()

encoder = tflearn.input_data(shape=[None, 13, 216])
encoder = tflearn.dropout(encoder, 0.6)
encoder = tflearn.layers.recurrent.simple_rnn(
    encoder, 128, return_seq=True, activation='relu')  #,dynamic=True
encoder = tflearn.layers.recurrent.simple_rnn(
    encoder, 128, return_seq=False,
    activation='relu')  #,dynamic=True #,dropout=0.5
encoder = tflearn.dropout(encoder, 0.6)
encoder = tflearn.fully_connected(encoder, 200, activation='elu')

net = tflearn.dropout(encoder, 0.6)
net = tflearn.fully_connected(
    net, number_classes, activation='softmax'
)  #number_classes,  numero de locutores  para essa camada e 'softmax' nome ou função  de ativação para essa camada, default "linear"
#uma camada de regressão (a seguir à saída) é necessária como parte das operações de treinamento da estrutura.
net = tflearn.regression(
示例#44
0
#if not os.path.isfile(path):
#    urllib.request.urlretrieve("https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/shakespeare_input.txt", path)

maxlen = 25

char_idx = None
if os.path.isfile(char_idx_file):
    print('Loading previous char_idx')
    char_idx = pickle.load(open(char_idx_file, 'rb'))

X, Y, char_idx = \
    textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=10)

pickle.dump(char_idx, open(char_idx_file, 'wb'))

g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 128, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 128, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 128)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g,
                       optimizer='adam',
                       loss='categorical_crossentropy',
                       learning_rate=0.001)

m = tflearn.SequenceGenerator(g,
                              dictionary=char_idx,
                              seq_maxlen=maxlen,
示例#45
0
# data1.shape
# #(301, 10)
# data2.shape
# #(301, 29)

# Build the Neural Network

num_stresses = 10
num_kinase = 29
num_transcription_factors = 200
num_genes = 6692

# Build neural network
# Input variables (10)
# Which Node to dropout (32)
stress = tflearn.input_data(shape=[None, num_stresses])
kinase_deletion = tflearn.input_data(shape=[None, num_kinase])

# This is the layer that I want to perform selective dropout on,
# I should be able to specify which of the 32 nodes should output zero
# based on a 1X32 vector of ones and zeros.
kinase = tflearn.fully_connected(stress, num_kinase, activation='relu')
kinase_dropout = tf.mul(kinase, kinase_deletion)

transcription_factor = tflearn.fully_connected(kinase_dropout,
                                               num_transcription_factors,
                                               activation='relu')

gene = tflearn.fully_connected(transcription_factor,
                               num_genes,
                               activation='linear')
示例#46
0
from tflearn.data_utils import VocabularyProcessor
import sys

# get all arguments
games_to_predict = sys.argv[1:]

if len(games_to_predict) is 0:
    print("Type games to predict when you run the script af arguments")
    exit()

# create and load vocal vector model
word_processor = VocabularyProcessor(15)
word_processor.restore("wordprocessor")

# create and load ML model
net = tflearn.input_data([None, 15])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=.8)
net = tflearn.fully_connected(net, 11, activation='softmax')
net = tflearn.regression(net) # adam, 0.001

model = tflearn.DNN(net, tensorboard_verbose=0)
model.load("model.tfl")

# use labels for output
ratings = [
    'Unbearable',  # 72
    'Disaster', # 4
    'Awful', # 664
    'Painful', # 340
    'Bad', # 1269
import tflearn
import os
from tflearn import fully_connected, regression, input_data

FOLDER = '/home/jer/Workspace/cs5600/project1/trained_nets/'
NAME = 'ANN_Bee2_1S_3Layer'
SAVE_POINT = f'{FOLDER}' + f'{NAME}' + '.tfl'
CHECK_POINT = SAVE_POINT + '.meta'

beeX, beeY = tflearn.data_utils.image_preloader('BEE2_1S/class_labels.txt',
                                                image_shape=(90, 90),
                                                mode='file',
                                                categorical_labels=True,
                                                normalize=True)

input_layer = input_data(shape=[None, 90, 90, 3])
fc_layer_1 = fully_connected(input_layer,
                             8100,
                             activation='relu',
                             name='fc_layer_1')
fc_layer_2 = fully_connected(fc_layer_1,
                             500,
                             activation='softmax',
                             name='fc_layer_2')
fc_layer_3 = fully_connected(fc_layer_2,
                             2,
                             activation='softmax',
                             name='fc_layer_3')
network = regression(fc_layer_3,
                     optimizer='sgd',
                     loss='categorical_crossentropy',
    b_y = np.load(data_folder + "1_all_y.npy")
    c_x = np.load(data_folder + "2_all_x.npy")
    c_y = np.load(data_folder + "2_all_y.npy")
    d_x = np.load(data_folder + "3_all_x.npy")
    d_y = np.load(data_folder + "3_all_y.npy")
    all_x = np.concatenate((a_x, b_x, c_x, d_x))
    all_y = np.concatenate((a_y, b_y, c_y, d_y))
    train_x, train_y = unison_shuffled_copies(all_x, all_y)
valid_x = np.load(data_folder + "random_examples/valid_x.npy")
valid_y = np.load(data_folder + "random_examples/valid_y.npy")
test_x = np.load(data_folder + "random_examples/test_x.npy")
test_y = np.load(data_folder + "random_examples/test_y.npy")
features_cols = test_x[0].shape[0]
features_rows = test_x[0].shape[1]
parameter_size = test_y[0].shape[0]
net = tflearn.input_data([None, features_cols, features_rows])
net = tflearn.lstm(net, number_hidden, dropout=0.8)
net = tflearn.fully_connected(net, parameter_size, activation='relu')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                         loss='mean_square')
model = tflearn.DNN(net, tensorboard_verbose=0,
                    checkpoint_path=(data_folder + 'model.tfl.ckpt'))
if training:
    model.fit(train_x, train_y, validation_set=((valid_x, valid_y)),
              show_metric=True, batch_size=batch_size, n_epoch=training_iters,
              snapshot_epoch=True, snapshot_step=1000, run_id='granulator_lstm')
else:
    model.load(data_folder + checkpoint)
feature_distances = []
parameter_distances = []
predicted_patches = []
示例#49
0
            tf.nn.softmax_cross_entropy_with_logits(logits=y_pred_ac,
                                                    labels=y_true_ac))
        #loss_at = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_pred_at, labels=y_true_at))
        #loss_at = tf.scalar_mul(3,loss_at)
        loss = tf.add_n([loss_tt, loss_ac])
        return loss


relu_weights_init = tflearn.initializations.xavier(seed=20171011)
relu_bias_init = tf.contrib.keras.initializers.Constant(value=0.001)
relu_regularizer = 'L2'
softmax_regularizer = 'L2'
softmax_weights_init = tflearn.initializations.xavier(seed=20171013)
softmax_bias_init = 'zeros'

input = tflearn.input_data(shape=[None, 121], name='input')

shared_hl_1 = tflearn.fully_connected(input,
                                      128,
                                      activation='prelu',
                                      bias=True,
                                      weights_init=relu_weights_init,
                                      regularizer=None,
                                      bias_init=relu_bias_init,
                                      name='shared_hl_1')
shared_hl_2 = tflearn.fully_connected(shared_hl_1,
                                      128,
                                      activation='prelu',
                                      bias=True,
                                      weights_init=relu_weights_init,
                                      regularizer=None,
示例#50
0
                  random_state=None,
                  shrinking=True,
                  tol=0.001,
                  verbose=False)
    elif classifier == "linearsvc":
        clf = LinearSVC()
    elif classifier == "knn":
        clf = KNeighborsClassifier(5)
    elif classifier == 'decisiontree':
        clf = DecisionTreeClassifier()
    elif classifier == 'randomforest':
        clf = RandomForestClassifier()
    elif classifier == 'mlp':
        labels_train = hot_enconding(labels_train)
        # Building deep neural network
        net = tflearn.input_data(shape=[None, X_train.shape[1]])
        net = tflearn.fully_connected(net,
                                      X_train.shape[1] / 2,
                                      activation='relu')
        net = tflearn.fully_connected(net,
                                      X_train.shape[1] / 3,
                                      activation='relu')
        net = tflearn.fully_connected(net, 2, activation='softmax')
        net = tflearn.regression(net)
        # Training
        clf = tflearn.DNN(net, tensorboard_verbose=0)

    clf.fit(X_train, labels_train)

    fprs = []
    fnrs = []
示例#51
0
def train():
    global stemmer, data, words, labels, training, output
    stemmer = LancasterStemmer()

    with open("intents.json") as file:
        data = json.load(file)

    try:
        file = open('reporter.bin', 'rb')
        changes = pickle.load(file)
        if changes:
            print(1 / 0)
        else:
            with open('data.pickle', 'rb') as f:
                words, labels, training, output = pickle.load(f)

    except:

        words = []
        labels = []
        docs_x = []
        docs_y = []

        for intent in data['intents']:
            for pattern in intent["patterns"]:
                wrds = nltk.word_tokenize(pattern)
                words.extend(wrds)
                docs_x.append(wrds)
                docs_y.append(intent['tag'])

            if intent['tag'] not in labels:
                labels.append(intent['tag'])

        words = [stemmer.stem(w.lower()) for w in words]
        words = sorted(list(set(words)))

        training = []
        output = []

        out_empty = [0 for _ in range(len(labels))]

        for x, doc in enumerate(docs_x):
            bag = []
            wrds = [stemmer.stem(w) for w in doc if w != '?']

            for w in words:
                if w in wrds:
                    bag.append(1)
                else:
                    bag.append(0)

            output_row = out_empty[:]
            output_row[labels.index(docs_y[x])] = 1

            training.append(bag)
            output.append(output_row)

        training = numpy.array(training)
        output = numpy.array(output)

        with open('data.pickle', 'wb') as f:
            pickle.dump((words, labels, training, output), f)

    tensorflow.reset_default_graph()

    net = tflearn.input_data(shape=[None, len(training[0])])
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, len(output[0]), activation='softmax')
    net = tflearn.regression(net)

    global model
    model = tflearn.DNN(net)

    try:
        if not changes:
            model.load('model.tflearn')
        else:
            print(1 / 0)
    except:
        model.fit(training,
                  output,
                  n_epoch=1000,
                  batch_size=8,
                  show_metric=False)
        model.save('model.tflearn')
Created on Fri Sep 20 22:33:05 2019

@author: rebecca
"""

import tflearn
import speech_data

lr = 0.001
epochs = 30000

batch = word_batch = speech_data.mfcc_batch_generator(64)
x, y = next(batch)
trainX, trainY = x, y
testX, testY, x, y
net = tflearn.input_data([None, 20, 80])
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net,
                         optimizer='adam',
                         learning_rate=lr,
                         loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)

while 1:
    model.fit(trainX,
              trainY,
              n_epoch=10,
              validation_set=(testX, testY),
              show_metric=True,
              batch_size=64)
示例#53
0
    with tf.variable_scope('Generator', reuse=reuse):
        x = tflearn.fully_connected(x, 256, activation='relu')
        x = tflearn.fully_connected(x, image_dim, activation='sigmoid')
        return x


# Discriminator
def discriminator(x, reuse=False):
    with tf.variable_scope('Discriminator', reuse=reuse):
        x = tflearn.fully_connected(x, 256, activation='relu')
        x = tflearn.fully_connected(x, 1, activation='sigmoid')
        return x


# Build Networks
gen_input = tflearn.input_data(shape=[None, z_dim], name='input_noise')
disc_input = tflearn.input_data(shape=[None, 784], name='disc_input')

gen_sample = generator(gen_input)
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample, reuse=True)

# Define Loss
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
gen_loss = -tf.reduce_mean(tf.log(disc_fake))

# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope) and set
# 'placeholder=None' because we do not need to feed any target.
gen_vars = tflearn.get_layer_variables_by_scope('Generator')
示例#54
0
    def __init__(self, sess, num_test_updates=20, inner_lr=0.001):
        self.num_updates = 10
        self.num_test_updates = 20
        self.num_test_updates = num_test_updates
        self.meta_lr_val = 0.001
        self.meta_lr_val = 0.00002

        self.meta_lr_val = 0.0001 / 2

        self.meta_lr = tf.placeholder(tf.float32, shape=[])

        self.inner_lr = self.meta_lr * 10
        self.inner_lr = 0.001
        self.inner_lr_test = 0.001

        self.sess = sess

        # self.inputA = tflearn.input_data(shape = [None, image_size, image_size, 3])
        # self.outputA = tflearn.input_data(shape = [None, image_size, image_size, 1])
        # self.inputB = tflearn.input_data(shape = [None, image_size, image_size, 3])
        # self.outputB = tflearn.input_data(shape = [None, image_size, image_size, 1])

        self.inputA = tflearn.input_data(shape=[None, None, None, 3])
        self.outputA = tflearn.input_data(shape=[None, None, None, 1])
        self.inputB = tflearn.input_data(shape=[None, None, None, 3])
        self.outputB = tflearn.input_data(shape=[None, None, None, 1])

        # with tf.variable_scope("foo", reuse=False):
        # 	self.task_losses, self.task_outputs, _, self.groupA_loss = CNNmodel.buildMetaBlockV1_old(self.inputA, self.outputA, self.inputB, self.outputB, inner_step = self.num_updates, inner_lr = self.inner_lr)
        # with tf.variable_scope("foo", reuse=True):
        # 	self.task_losses_test, self.task_test_outputs, self.debug_inner_output, self.groupA_loss_test = CNNmodel.buildMetaBlockV1_old(self.inputA, self.outputA, self.inputB, self.outputB, inner_step = self.num_test_updates, inner_lr = self.inner_lr_test, layer_st = 10, layer_ed = 13)

        with tf.variable_scope("foo", reuse=False):
            self.baseline_output_, _ = CNNmodel.build_simple_net_512_4_V1(
                self.inputA, prefix="first_", deconv=True)

        self.baseline_output = tf.nn.softmax(self.baseline_output_)
        self.baseline_loss = CrossEntropy(self.baseline_output_, self.outputA)
        self.baseline_train_op = tf.train.AdamOptimizer(
            learning_rate=self.meta_lr).minimize(self.baseline_loss)

        # optimizer = tf.train.AdamOptimizer(self.meta_lr)
        # self.gvs = gvs = optimizer.compute_gradients(self.task_losses[self.num_updates-1])
        # self.metatrain_op = optimizer.apply_gradients(gvs)

        ### self.metatrain_op = tf.train.AdamOptimizer(learning_rate=self.meta_lr).minimize(self.task_losses[self.num_updates-1])

        #self.metatrain_groupA_op = tf.train.AdamOptimizer(learning_rate=self.meta_lr).minimize(self.groupA_loss)

        #self.metatrain_op = tf.train.AdamOptimizer(learning_rate=self.meta_lr).minimize(self.task_losses[self.num_updates-1] + (self.task_losses[self.num_updates-1] - self.task_losses[self.num_updates-2]))

        self.sess.run(tf.global_variables_initializer())
        self.saver = tf.train.Saver(max_to_keep=100)

        self.summary_loss = []
        self.test_loss = tf.placeholder(tf.float32)
        self.train_loss = tf.placeholder(tf.float32)
        self.lr = tf.placeholder(tf.float32)

        self.summary_loss.append(tf.summary.scalar('test_loss',
                                                   self.test_loss))
        self.summary_loss.append(
            tf.summary.scalar('train_loss', self.train_loss))
        self.summary_loss.append(tf.summary.scalar('lr', self.lr))

        self.merged_summary = tf.summary.merge_all()
示例#55
0
import os
import pickle
from six.moves import urllib
import past

import tflearn
from tflearn.data_utils import *

path = "../../Data/Parsed/new.txt"

maxlen = 30

X, Y, char_dict = \
    textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3)

g = tflearn.input_data([None, maxlen, len(char_dict)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_dict), activation='softmax')
g = tflearn.regression(g,
                       optimizer='adam',
                       loss='categorical_crossentropy',
                       learning_rate=0.001)

m = tflearn.SequenceGenerator(g,
                              dictionary=char_dict,
                              seq_maxlen=maxlen,
from __future__ import division, print_function, absolute_import

import tflearn
import tflearn.data_utils as du

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist

X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])
X, mean = du.featurewise_zero_center(X)
testX = du.featurewise_zero_center(testX, mean)

# Building Residual Network
net = tflearn.input_data(shape=[None, 28, 28, 1])
net = tflearn.conv_2d(net, 64, 3, activation='relu', bias=False)
net = tflearn.batch_normalization(net)
# Residual blocks
net = tflearn.deep_residual_block(net, 3, 64)
net = tflearn.deep_residual_block(net, 1, 128, downsample=True)
net = tflearn.deep_residual_block(net, 3, 128)
net = tflearn.deep_residual_block(net, 1, 256, downsample=True)
net = tflearn.deep_residual_block(net, 3, 256)
net_shape = net.get_shape().as_list()
k_size = [1, net_shape[1], net_shape[2], 1]
net = tflearn.avg_pool_2d(net, k_size, padding='valid', strides=1)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=300)
net = tflearn.regression(net,
示例#57
0
        output_row = out_empty[:]
        output_row[labels.index(docs_y[x])] = 1

        training.append(bag)
        output.append(output_row)

    training = numpy.array(training)
    output = numpy.array(output)

    with open("data.pickle", "wb") as f:
        pickle.dump((words, labels, training, output), f)

tensorflow.reset_default_graph()

net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)

model = tflearn.DNN(net)

try:
    model.load("model.tflearn")
except:
    model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
    model.save("model.tflearn")


def bag_of_words(s, words):
示例#58
0
    def __init__(self, observation_space, action_space, scope=None,
                 learning_rate=1e-04,
                 hidden_sizes=[],
                 train_type='supervised',
                 act_fn=tf.nn.relu,
                 filter_obs=False,
                 seed=None,
                 learn_std=True,
                 entropy_coeff=None,
                 weight_decay=0.0):
        self.action_dim = _get_space_size(action_space)
        scope = '' if scope is None else scope
        super(GaussianPolicy, self).__init__(observation_space, action_space,
                                             scope=scope,
                                             learning_rate=learning_rate,
                                             hidden_sizes=hidden_sizes,
                                             train_type=train_type,
                                             act_fn=act_fn,
                                             filter_obs=filter_obs,
                                             seed=seed,
                                             entropy_coeff=entropy_coeff,
                                             weight_decay=0.0)

        with self.graph.as_default():

            with tf.variable_scope(scope):

                self.mean = self.logits

                self.log_std = tf.get_variable(
                    'logstd', initializer=tf.zeros(self.action_dim),
                    trainable=learn_std)
                self.params.append(self.log_std)
                self.act_in = tflearn.input_data(shape=[None, self.action_dim],
                                                 name='Actions')

                zs = (self.act_in - self.mean) / tf.exp(self.log_std)
                self.log_likelihood = - tf.reduce_sum(self.log_std, axis=-1) - \
                    0.5 * tf.reduce_sum(tf.square(zs), axis=-1) - \
                    0.5 * self.action_dim * np.log(2 * np.pi)
                self.grad_log_prob = tf.gradients(self.log_likelihood,
                                                  self.params)
                ent = tf.log(np.sqrt(2 * np.pi * np.e, dtype=np.float32))
                entropy = self.log_std + ent
                self.avg_entropy = tf.reduce_mean(entropy, axis=-1)
                if self.train_type == 'reinforce':
                    self.adv_var = tflearn.input_data(shape=[None, 1])
                    self.loss = tf.reduce_sum(tf.multiply(self.adv_var,
                                                          self.log_likelihood))
                    optimizer = tf.train.GradientDescentOptimizer(
                        self.learning_rate)
                    self.train_step = optimizer.minimize(
                        tf.negative(self.loss))
                elif self.train_type == 'supervised':
                    # Loss is sum of neg log likelihoods with optional entropy
                    # term. We also compute an avg_loss without the average
                    # entropy
                    self.loss = tf.reduce_sum(
                        tf.negative(self.log_likelihood))
                    # negloglikelihood = tf.negative(self.log_likelihood)
                    # self.loss = tf.reduce_sum(negloglikelihood)
                    if self.entropy_coeff not in [0.0, None]:
                        print('Coeff %f' % self.entropy_coeff)
                        self.loss -= self.entropy_coeff * tf.reduce_sum(entropy)
                    else:
                        print('No Entropy Regularization')
                    optimizer = tf.train.AdamOptimizer(self.learning_rate)
                    self.train_step = optimizer.minimize(self.loss)

                self.init_op = tf.global_variables_initializer()

        self.session = tf.Session(graph=self.graph)
        self.session.run(self.init_op)
示例#59
0
    learning applied to document recognition." Proceedings of the IEEE,
    86(11):2278-2324, November 1998.
Links:
    [MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import

import tflearn
import numpy as np

# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)

# Building deep neural network
input_layer = tflearn.input_data(shape=[None, 784])
dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
                                 regularizer='L2', weight_decay=0.001)
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                 regularizer='L2', weight_decay=0.001)
dropout2 = tflearn.dropout(dense2, 0.8)
softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')

# Regression using SGD with learning rate decay and Top-3 accuracy
sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
top_k = tflearn.metrics.Top_k(3)
net = tflearn.regression(softmax, optimizer=sgd, metric=top_k,
                         loss='categorical_crossentropy')

# Training
示例#60
0
    def model(self, mode="train", num_layers=1, cell_size=32, cell_type="BasicLSTMCell", embedding_size=20, learning_rate=0.0001,
              tensorboard_verbose=0, checkpoint_path=None):
        '''
        Build tensor specifying graph of operations for the seq2seq neural network model.

        mode = string, either "train" or "predict"
        cell_type = attribute of rnn_cell specifying which RNN cell type to use
        cell_size = size for the hidden layer in the RNN cell
        num_layers = number of RNN cell layers to use

        Return TFLearn model instance.  Use DNN model for this.
        '''
        assert mode in ["train", "predict"]

        checkpoint_path = checkpoint_path or ("%s%ss2s_checkpoint.tfl" % (self.data_dir or "", "/" if self.data_dir else ""))
        GO_VALUE = self.out_max_int + 1		# unique integer value used to trigger decoder outputs in the seq2seq RNN

        network = tflearn.input_data(shape=[None, self.in_seq_len + self.out_seq_len], dtype=tf.int32, name="XY")
        encoder_inputs = tf.slice(network, [0, 0], [-1, self.in_seq_len], name="enc_in")	# get encoder inputs
        encoder_inputs = tf.unstack(encoder_inputs, axis=1)					# transform into list of self.in_seq_len elements, each [-1]

        decoder_inputs = tf.slice(network, [0, self.in_seq_len], [-1, self.out_seq_len], name="dec_in")	# get decoder inputs
        decoder_inputs = tf.unstack(decoder_inputs, axis=1)					# transform into list of self.out_seq_len elements, each [-1]

        go_input = tf.multiply( tf.ones_like(decoder_inputs[0], dtype=tf.int32), GO_VALUE ) # insert "GO" symbol as the first decoder input; drop the last decoder input
        decoder_inputs = [go_input] + decoder_inputs[: self.out_seq_len-1]				# insert GO as first; drop last decoder input

        feed_previous = not (mode=="train")

        if self.verbose > 3:
            print ("feed_previous = %s" % str(feed_previous))
            print ("encoder inputs: %s" % str(encoder_inputs))
            print ("decoder inputs: %s" % str(decoder_inputs))
            print ("len decoder inputs: %s" % len(decoder_inputs))

        self.n_input_symbols = self.in_max_int + 1		# default is integers from 0 to 9 
        self.n_output_symbols = self.out_max_int + 2		# extra "GO" symbol for decoder inputs

        single_cell = getattr(rnn_cell, cell_type)(cell_size, state_is_tuple=True)
        if num_layers==1:
            cell = single_cell
        else:
            cell = rnn_cell.MultiRNNCell([single_cell] * num_layers)

        if self.seq2seq_model=="embedding_rnn":
            model_outputs, states = seq2seq.embedding_rnn_seq2seq(encoder_inputs,	# encoder_inputs: A list of 2D Tensors [batch_size, input_size].
                                                                  decoder_inputs,
                                                                  cell,
                                                                  num_encoder_symbols=self.n_input_symbols,
                                                                  num_decoder_symbols=self.n_output_symbols,
                                                                  embedding_size=embedding_size,
                                                                  feed_previous=feed_previous)
        elif self.seq2seq_model=="embedding_attention":
            model_outputs, states = seq2seq.embedding_attention_seq2seq(encoder_inputs,	# encoder_inputs: A list of 2D Tensors [batch_size, input_size].
                                                                        decoder_inputs,
                                                                        cell,
                                                                        num_encoder_symbols=self.n_input_symbols,
                                                                        num_decoder_symbols=self.n_output_symbols,
                                                                        embedding_size=embedding_size,
                                                                        num_heads=1,
                                                                        initial_state_attention=False,
                                                                        feed_previous=feed_previous)
        else:
            raise Exception('[TFLearnSeq2Seq] Unknown seq2seq model %s' % self.seq2seq_model)
            
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + "seq2seq_model", model_outputs)	# for TFLearn to know what to save and restore

        # model_outputs: list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_size] containing the generated outputs.
        if self.verbose > 2: print ("model outputs: %s" % model_outputs)
        network = tf.stack(model_outputs, axis=1)		# shape [-1, n_decoder_inputs (= self.out_seq_len), num_decoder_symbols]
        if self.verbose > 2: print ("packed model outputs: %s" % network)
        
        if self.verbose > 3:
            all_vars = tf.get_collection(tf.GraphKeys.VARIABLES)
            print ("all_vars = %s" % all_vars)

        with tf.name_scope("TargetsData"):			# placeholder for target variable (i.e. trainY input)
            targetY = tf.placeholder(shape=[None, self.out_seq_len], dtype=tf.int32, name="Y")

        network = tflearn.regression(network, 
                                     placeholder=targetY,
                                     optimizer='adam',
                                     learning_rate=learning_rate,
                                     loss=self.sequence_loss, 
                                     metric=self.accuracy,
                                     name="Y")

        model = tflearn.DNN(network, tensorboard_verbose=tensorboard_verbose, checkpoint_path=checkpoint_path)
        return model