コード例 #1
0
        joblib.dump(sc, 'Saved_Models/Fully_Connected_n_epochs_{}/standard.pkl'.format(n_epoch))

        X_train_sd = sc.transform(X_train)
        X_test_sd = sc.transform(X_test)

        # Model
        input_layer = tflearn.input_data(shape=[None, 1391], name='input')
        dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
        dropout1 = tflearn.dropout(dense1, 0.8)
        dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
        dropout2 = tflearn.dropout(dense2, 0.8)
        output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
        regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy', learning_rate=.001)

        # Define model with checkpoint (autosave)
        model = tflearn.DNN(regression, tensorboard_verbose=3,
                            tensorboard_dir='Saved_Models/Fully_Connected_n_epochs_{}/'.format(d))

        # Train model with checkpoint every epoch and every 500 steps
        model.fit(X_train_sd, Y_train, n_epoch=n_epoch, show_metric=True, snapshot_epoch=True, snapshot_step=500,
                  run_id='model_and_weights_{}'.format(c + 1),
                  validation_set=(X_test_sd, Y_test), batch_size=batch_size)

        # Find the probability of outputs
        y_pred_prob = np.array(model.predict(X_test_sd))[:, 1]
        # Find the predicted class
        y_pred = np.where(y_pred_prob > 0.5, 1., 0.)
        # Predicted class is the 2nd column in Y_test
        Y_test_dia = Y_test[:, 1]

        acc = accuracy_score(Y_test_dia, y_pred) * 100
        errors = (y_pred != Y_test_dia).sum()
コード例 #2
0
ファイル: untitled2.py プロジェクト: regouga/LN-MEIC-1819
# trainX contains the Bag of words and train_y contains the label/ category
train_x = list(training[:, 0])
train_y = list(training[:, 1])

# reset underlying graph data
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)

# Define model and setup tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training (apply gradient descent algorithm)
model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
model.save('model.tflearn')

# let's test the mdodel for a few sentences:
# the first two sentences are used for training, and the last two sentences are not present in the training data.
sent_1 = "what time is it?"
sent_2 = "I gotta go now"
sent_3 = "do you know the time now?"
sent_4 = "you must be a couple of years older then her!"

# a method that takes in a sentence and list of all words
# and returns the data in a form the can be fed to tensorflow

コード例 #3
0
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)

convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=LR,
                     loss='categorical_crossentropy',
                     name='targets')

model = tflearn.DNN(convnet, tensorboard_dir='log')

if os.path.exists('{}.meta'.format(MODEL_NAME)):
    model.load(MODEL_NAME)
    print("model loaded!")

train = train_data[:-1000]
test = train_data[-1000:]

X = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
Y = [i[1] for i in train]

test_x = np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
test_y = [i[1] for i in test]

model.fit({'input': X}, {'targets': Y},
コード例 #4
0
ファイル: linear_main.py プロジェクト: BellMus/tagmachine
g = tflearn.input_data([None, 120])
g = tflearn.embedding(g, input_dim=10000, output_dim=hidden_dim)

g = tflearn.fully_connected(g, hidden_dim, activation='tanh')
g = tflearn.dropout(g, 0.3)

# g = tflearn.lstm(g, 128, dynamic=True)
# g = tflearn.dropout(g, 0.3)

g = tflearn.fully_connected(g, 120, activation='softmax')
g = tflearn.regression(g,
                       optimizer='adam',
                       loss='categorical_crossentropy',
                       learning_rate=0.005)

m = tflearn.DNN(g, clip_gradients=5.0)

print("starting training.")

for i in range(30):
    m.fit(trainX,
          trainY,
          validation_set=(validX, validY),
          show_metric=True,
          batch_size=32,
          n_epoch=2,
          run_id=str(i))
    print("-- TESTING...")
    q = m.predict(np.reshape(trainX[0], (1, 120)))[0]
    q = np.argmax(q, axis=0)
    print("prediction = ", q)
コード例 #5
0
ファイル: modelsNN.py プロジェクト: benhur98/GazeUI_RH3
def inceptionv3(width,
                height,
                frame_count,
                lr,
                output=9,
                model_name='sentnet_color.model'):
    network = input_data(shape=[None, width, height, 3], name='input')
    conv1_7_7 = conv_2d(network,
                        64,
                        7,
                        strides=2,
                        activation='relu',
                        name='conv1_7_7_s2')
    pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
    pool1_3_3 = local_response_normalization(pool1_3_3)
    conv2_3_3_reduce = conv_2d(pool1_3_3,
                               64,
                               1,
                               activation='relu',
                               name='conv2_3_3_reduce')
    conv2_3_3 = conv_2d(conv2_3_3_reduce,
                        192,
                        3,
                        activation='relu',
                        name='conv2_3_3')
    conv2_3_3 = local_response_normalization(conv2_3_3)
    pool2_3_3 = max_pool_2d(conv2_3_3,
                            kernel_size=3,
                            strides=2,
                            name='pool2_3_3_s2')
    inception_3a_1_1 = conv_2d(pool2_3_3,
                               64,
                               1,
                               activation='relu',
                               name='inception_3a_1_1')
    inception_3a_3_3_reduce = conv_2d(pool2_3_3,
                                      96,
                                      1,
                                      activation='relu',
                                      name='inception_3a_3_3_reduce')
    inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce,
                               128,
                               filter_size=3,
                               activation='relu',
                               name='inception_3a_3_3')
    inception_3a_5_5_reduce = conv_2d(pool2_3_3,
                                      16,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_3a_5_5_reduce')
    inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce,
                               32,
                               filter_size=5,
                               activation='relu',
                               name='inception_3a_5_5')
    inception_3a_pool = max_pool_2d(
        pool2_3_3,
        kernel_size=3,
        strides=1,
    )
    inception_3a_pool_1_1 = conv_2d(inception_3a_pool,
                                    32,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_3a_pool_1_1')

    # merge the inception_3a__
    inception_3a_output = merge([
        inception_3a_1_1, inception_3a_3_3, inception_3a_5_5,
        inception_3a_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    inception_3b_1_1 = conv_2d(inception_3a_output,
                               128,
                               filter_size=1,
                               activation='relu',
                               name='inception_3b_1_1')
    inception_3b_3_3_reduce = conv_2d(inception_3a_output,
                                      128,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_3b_3_3_reduce')
    inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce,
                               192,
                               filter_size=3,
                               activation='relu',
                               name='inception_3b_3_3')
    inception_3b_5_5_reduce = conv_2d(inception_3a_output,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_3b_5_5_reduce')
    inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce,
                               96,
                               filter_size=5,
                               name='inception_3b_5_5')
    inception_3b_pool = max_pool_2d(inception_3a_output,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_3b_pool')
    inception_3b_pool_1_1 = conv_2d(inception_3b_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_3b_pool_1_1')

    #merge the inception_3b_*
    inception_3b_output = merge([
        inception_3b_1_1, inception_3b_3_3, inception_3b_5_5,
        inception_3b_pool_1_1
    ],
                                mode='concat',
                                axis=3,
                                name='inception_3b_output')

    pool3_3_3 = max_pool_2d(inception_3b_output,
                            kernel_size=3,
                            strides=2,
                            name='pool3_3_3')
    inception_4a_1_1 = conv_2d(pool3_3_3,
                               192,
                               filter_size=1,
                               activation='relu',
                               name='inception_4a_1_1')
    inception_4a_3_3_reduce = conv_2d(pool3_3_3,
                                      96,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4a_3_3_reduce')
    inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce,
                               208,
                               filter_size=3,
                               activation='relu',
                               name='inception_4a_3_3')
    inception_4a_5_5_reduce = conv_2d(pool3_3_3,
                                      16,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4a_5_5_reduce')
    inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce,
                               48,
                               filter_size=5,
                               activation='relu',
                               name='inception_4a_5_5')
    inception_4a_pool = max_pool_2d(pool3_3_3,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_4a_pool')
    inception_4a_pool_1_1 = conv_2d(inception_4a_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_4a_pool_1_1')

    inception_4a_output = merge([
        inception_4a_1_1, inception_4a_3_3, inception_4a_5_5,
        inception_4a_pool_1_1
    ],
                                mode='concat',
                                axis=3,
                                name='inception_4a_output')

    inception_4b_1_1 = conv_2d(inception_4a_output,
                               160,
                               filter_size=1,
                               activation='relu',
                               name='inception_4a_1_1')
    inception_4b_3_3_reduce = conv_2d(inception_4a_output,
                                      112,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4b_3_3_reduce')
    inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce,
                               224,
                               filter_size=3,
                               activation='relu',
                               name='inception_4b_3_3')
    inception_4b_5_5_reduce = conv_2d(inception_4a_output,
                                      24,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4b_5_5_reduce')
    inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce,
                               64,
                               filter_size=5,
                               activation='relu',
                               name='inception_4b_5_5')

    inception_4b_pool = max_pool_2d(inception_4a_output,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_4b_pool')
    inception_4b_pool_1_1 = conv_2d(inception_4b_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_4b_pool_1_1')

    inception_4b_output = merge([
        inception_4b_1_1, inception_4b_3_3, inception_4b_5_5,
        inception_4b_pool_1_1
    ],
                                mode='concat',
                                axis=3,
                                name='inception_4b_output')

    inception_4c_1_1 = conv_2d(inception_4b_output,
                               128,
                               filter_size=1,
                               activation='relu',
                               name='inception_4c_1_1')
    inception_4c_3_3_reduce = conv_2d(inception_4b_output,
                                      128,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4c_3_3_reduce')
    inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce,
                               256,
                               filter_size=3,
                               activation='relu',
                               name='inception_4c_3_3')
    inception_4c_5_5_reduce = conv_2d(inception_4b_output,
                                      24,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4c_5_5_reduce')
    inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce,
                               64,
                               filter_size=5,
                               activation='relu',
                               name='inception_4c_5_5')

    inception_4c_pool = max_pool_2d(inception_4b_output,
                                    kernel_size=3,
                                    strides=1)
    inception_4c_pool_1_1 = conv_2d(inception_4c_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_4c_pool_1_1')

    inception_4c_output = merge([
        inception_4c_1_1, inception_4c_3_3, inception_4c_5_5,
        inception_4c_pool_1_1
    ],
                                mode='concat',
                                axis=3,
                                name='inception_4c_output')

    inception_4d_1_1 = conv_2d(inception_4c_output,
                               112,
                               filter_size=1,
                               activation='relu',
                               name='inception_4d_1_1')
    inception_4d_3_3_reduce = conv_2d(inception_4c_output,
                                      144,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4d_3_3_reduce')
    inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce,
                               288,
                               filter_size=3,
                               activation='relu',
                               name='inception_4d_3_3')
    inception_4d_5_5_reduce = conv_2d(inception_4c_output,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4d_5_5_reduce')
    inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce,
                               64,
                               filter_size=5,
                               activation='relu',
                               name='inception_4d_5_5')
    inception_4d_pool = max_pool_2d(inception_4c_output,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_4d_pool')
    inception_4d_pool_1_1 = conv_2d(inception_4d_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_4d_pool_1_1')

    inception_4d_output = merge([
        inception_4d_1_1, inception_4d_3_3, inception_4d_5_5,
        inception_4d_pool_1_1
    ],
                                mode='concat',
                                axis=3,
                                name='inception_4d_output')

    inception_4e_1_1 = conv_2d(inception_4d_output,
                               256,
                               filter_size=1,
                               activation='relu',
                               name='inception_4e_1_1')
    inception_4e_3_3_reduce = conv_2d(inception_4d_output,
                                      160,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4e_3_3_reduce')
    inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce,
                               320,
                               filter_size=3,
                               activation='relu',
                               name='inception_4e_3_3')
    inception_4e_5_5_reduce = conv_2d(inception_4d_output,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4e_5_5_reduce')
    inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce,
                               128,
                               filter_size=5,
                               activation='relu',
                               name='inception_4e_5_5')
    inception_4e_pool = max_pool_2d(inception_4d_output,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_4e_pool')
    inception_4e_pool_1_1 = conv_2d(inception_4e_pool,
                                    128,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_4e_pool_1_1')

    inception_4e_output = merge([
        inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,
        inception_4e_pool_1_1
    ],
                                axis=3,
                                mode='concat')

    pool4_3_3 = max_pool_2d(inception_4e_output,
                            kernel_size=3,
                            strides=2,
                            name='pool_3_3')

    inception_5a_1_1 = conv_2d(pool4_3_3,
                               256,
                               filter_size=1,
                               activation='relu',
                               name='inception_5a_1_1')
    inception_5a_3_3_reduce = conv_2d(pool4_3_3,
                                      160,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_5a_3_3_reduce')
    inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce,
                               320,
                               filter_size=3,
                               activation='relu',
                               name='inception_5a_3_3')
    inception_5a_5_5_reduce = conv_2d(pool4_3_3,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_5a_5_5_reduce')
    inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce,
                               128,
                               filter_size=5,
                               activation='relu',
                               name='inception_5a_5_5')
    inception_5a_pool = max_pool_2d(pool4_3_3,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_5a_pool')
    inception_5a_pool_1_1 = conv_2d(inception_5a_pool,
                                    128,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_5a_pool_1_1')

    inception_5a_output = merge([
        inception_5a_1_1, inception_5a_3_3, inception_5a_5_5,
        inception_5a_pool_1_1
    ],
                                axis=3,
                                mode='concat')

    inception_5b_1_1 = conv_2d(inception_5a_output,
                               384,
                               filter_size=1,
                               activation='relu',
                               name='inception_5b_1_1')
    inception_5b_3_3_reduce = conv_2d(inception_5a_output,
                                      192,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_5b_3_3_reduce')
    inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce,
                               384,
                               filter_size=3,
                               activation='relu',
                               name='inception_5b_3_3')
    inception_5b_5_5_reduce = conv_2d(inception_5a_output,
                                      48,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_5b_5_5_reduce')
    inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,
                               128,
                               filter_size=5,
                               activation='relu',
                               name='inception_5b_5_5')
    inception_5b_pool = max_pool_2d(inception_5a_output,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_5b_pool')
    inception_5b_pool_1_1 = conv_2d(inception_5b_pool,
                                    128,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_5b_pool_1_1')
    inception_5b_output = merge([
        inception_5b_1_1, inception_5b_3_3, inception_5b_5_5,
        inception_5b_pool_1_1
    ],
                                axis=3,
                                mode='concat')

    pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1)
    pool5_7_7 = dropout(pool5_7_7, 0.4)

    loss = fully_connected(pool5_7_7, output, activation='softmax')

    network = regression(loss,
                         optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr,
                         name='targets')

    model = tflearn.DNN(network,
                        max_checkpoints=0,
                        tensorboard_verbose=0,
                        tensorboard_dir='log')

    return model
コード例 #6
0
                            loss=ctrl_loss,
                            trainable_vars=ctrl_vars,
                            batch_size=64,
                            name='target_ctrl',
                            op_name='ctrl_model')

value_model = tfl.regression(value,
                             placeholder=y,
                             optimizer='adam',
                             loss=value_loss,
                             trainable_vars=value_vars,
                             batch_size=64,
                             name='target_value',
                             op_name='value_model')

model = tfl.DNN(tf.concat([ctrl, value], 1))

fc.game_init("FCMAP0.PNG")
fc.add_car("0", (430, 240))
fc.set_car("0", (430, 240), np.pi / 2, 0)
fc.show_car_sight("0")
wight = len(fc.get_car_sight("0")[0])

train_sight = []
train_degree = []
train_velocity = []
train_y = []

degree = 0
velocity = 100
コード例 #7
0
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n - 1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n - 1, 64)

net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)

# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=mom, loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net,
                    checkpoint_path='model_resnet_cifar10',
                    max_checkpoints=10,
                    tensorboard_verbose=0,
                    clip_gradients=0.)
model.fit(X,
          Y,
          n_epoch=20,
          validation_set=(testX, testY),
          snapshot_epoch=False,
          snapshot_step=500,
          show_metric=True,
          batch_size=128,
          shuffle=True,
          run_id='resnet_cifar10')
コード例 #8
0
network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
network = conv_2d(network, 64, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 64, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 128, 4, activation='relu')
network = dropout(network, 0.3)
network = fully_connected(network, 3072, activation='relu')
network = fully_connected(network, len(EMOTIONS), activation='softmax')
network = regression(network,
                     optimizer='momentum',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)
model = tflearn.DNN(network,
                    checkpoint_path='./temp/checkpoint.ckpt',
                    max_checkpoints=1,
                    tensorboard_verbose=2)

if os.path.exists('{}.meta'.format(SAVE_MODEL_FILENAME)):
    model.load(SAVE_MODEL_FILENAME)
    print("model loaded")
else:
    print("model load failed!")

print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
cascade_classifier = cv2.CascadeClassifier(CASC_PATH)

video_capture = cv2.VideoCapture(1)
font = cv2.FONT_HERSHEY_SIMPLEX

while True:
コード例 #9
0
    index = index + 1
trainX.shape = (index, steps_of_history, 1)
trainY.shape = (index, 1)

# Network building
net = tflearn.input_data(shape=[None, steps_of_history, 1])
net = tflearn.simple_rnn(net, n_units=512, return_seq=False)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 1, activation='linear')
net = tflearn.regression(net,
                         optimizer='sgd',
                         loss='mean_square',
                         learning_rate=0.001)

# Training
model = tflearn.DNN(net, clip_gradients=0.0, tensorboard_verbose=0)
model.fit(trainX,
          trainY,
          n_epoch=15,
          validation_set=0.1,
          show_metric=True,
          batch_size=128)

# Prepare the testing data set
# testX = window to use for prediction
# testY = actual value
# predictY = predicted value
index = 0
while (index + steps_of_history + steps_in_future < len(y)):
    window = y[index:index + steps_of_history]
    target = y[index + steps_of_history + steps_in_future]
コード例 #10
0
def cycleTesting3Layer(stock,
                       length,
                       lrs,
                       activations,
                       nodes,
                       epoch,
                       batches,
                       saveName,
                       saveBase,
                       reshape=False):

    # Obtain the stock data
    (train_d, train_t, test_d,
     test_t) = sf.getInputData(stock, length, reshape)

    result = 0
    bestResult = 0
    count = 1
    totalRuns = len(lrs) * len(activations) * len(nodes) * len(batches) * len(
        nodes) * len(nodes)
    startTime = time.time()
    for lr in lrs:
        for func in activations:
            for depth in nodes:
                for depth1 in nodes:
                    for depth2 in nodes:
                        for mbs in batches:
                            func1 = func
                            func2 = func
                            sf.printProgress3(startTime, count, totalRuns, lr,
                                              func, func1, func2, depth,
                                              depth1, depth2, mbs, length,
                                              result, bestResult)
                            count += 1
                            model = tflearn.DNN(
                                ann_three_level(length,
                                                (depth, depth1, depth2), lr,
                                                (func, func1, func2)))
                            model.fit(train_d,
                                      train_t,
                                      n_epoch=epoch,
                                      shuffle=False,
                                      validation_set=(test_d, test_t),
                                      show_metric=False,
                                      batch_size=mbs)
                            result = test_model2(model, length, test_d, test_t)
                            if result > bestResult:
                                bestResult = result
                                bestLr = lr
                                bestFunc = func
                                bestFunc1 = func1
                                bestFunc2 = func2
                                bestDepth = depth
                                bestDepth1 = depth1
                                bestDepth2 = depth2
                                bestMBS = mbs
                                sf.writeFile3Level(length, epoch, bestResult,
                                                   bestLr, bestFunc, bestFunc1,
                                                   bestFunc2, bestDepth,
                                                   bestDepth1, bestDepth2,
                                                   bestMBS, saveName, saveBase)
                                model.save(saveBase + 'nets/ann/threeLayer/' +
                                           saveName + '.pck')
                            tf.reset_default_graph()
コード例 #11
0
X, Y, labls = pathsToData(dataPaths)

num_classes = 10

l1 = tfl.input_data(shape=[None, 1020])
l2 = tfl.fully_connected(l1, 16, activation='relu')
l2 = tfl.fully_connected(l2, 10, activation='relu')
sm = tfl.fully_connected(l2, num_classes, activation='softmax')

reg = tfl.regression(sm,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.00001)

net = tfl.DNN(reg, tensorboard_verbose=3)

net.load('net7.tflearn')

net.fit(X,
        Y,
        n_epoch=100,
        shuffle=True,
        batch_size=5,
        show_metric=True,
        run_id='ligma2')

net.save('net7.tflearn')

boop = lambda a: a[0]
コード例 #12
0
def load_net(build, path):
    network = build
    model = tflearn.DNN(network)
    model.load(path)
    return model
コード例 #13
0
def training(nb_epoch=5):

    # Loading training and test data  :
    if os.path.exists(TRAIN_DB):
        train_data = np.load(TRAIN_DB)
        print("Load Train data")
    else:
        train_data = create_train_data()

    if os.path.exists(TEST_DB):
        test_data = np.load(TEST_DB)
        print("Load Test data")
    else:
        test_data = process_test_data()

    train = train_data
    test = test_data

    X = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE_WIDTH,
                                                IMG_SIZE_HEIGHT, 1)
    Y = [i[1] for i in train]

    test_x = np.array([i[0] for i in test]).reshape(-1, IMG_SIZE_WIDTH,
                                                    IMG_SIZE_HEIGHT, 1)
    test_y = [i[1] for i in test]

    # Define and use the neural network :

    convnet = input_data(shape=[None, IMG_SIZE_WIDTH, IMG_SIZE_HEIGHT, 1],
                         name='input')

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 5)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 5)

    convnet = conv_2d(convnet, 128, 5, activation='relu')
    convnet = max_pool_2d(convnet, 5)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 5)

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 5)

    convnet = fully_connected(convnet, 1024, activation='relu')
    convnet = dropout(convnet, 0.8)

    convnet = fully_connected(convnet, 2, activation='softmax')
    convnet = regression(convnet,
                         optimizer='adam',
                         learning_rate=LR,
                         loss='categorical_crossentropy',
                         name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir='log')

    model.fit({'input': X}, {'targets': Y},
              n_epoch=nb_epoch,
              validation_set=({
                  'input': test_x
              }, {
                  'targets': test_y
              }),
              snapshot_step=500,
              show_metric=True,
              run_id=MODEL_NAME)

    model.save(MODEL_LOCATION)
コード例 #14
0
ファイル: eval.py プロジェクト: Aarti626/contextual
app = Flask(__name__)
# restore all of our data structures
import pickle
data = pickle.load(open("training_data", "rb"))
words = data['words']
classes = data['classes']
train_x = data['train_x']
train_y = data['train_y']

net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)

model = tflearn.DNN(net, tensorboard_dir="./tflearn_logs")
# import our chat-bot intents file
import json
with open('intents.json') as json_data:
    intents = json.load(json_data)
# load our saved model
model.load('model.tflearn')


def clean_up_sentence(sentence):
    # tokenize the pattern
    sentence_words = nltk.word_tokenize(sentence)
    # stem each word
    sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
    return sentence_words
コード例 #15
0
                      name="fc7",
                      trainable=False)
fc7_droptout = dropout(fc7, 0.5)

fc8 = fully_connected(fc7_droptout, 30, activation='softmax', name="fc8")

mm = Momentum(learning_rate=0.01, momentum=0.9, lr_decay=0.1, decay_step=1000)

network = regression(fc8,
                     optimizer=mm,
                     loss='categorical_crossentropy',
                     restore=False)

print("Network defined.")
model = tflearn.DNN(network,
                    checkpoint_path='../../checkpoints/vgg16_AID',
                    max_checkpoints=1,
                    tensorboard_verbose=3)

print("Model defined.")
"""
print(model.get_weights(conv1_1.W).shape)
print(model.get_weights(conv1_2.W).shape)
print(model.get_weights(conv2_1.W).shape)
print(model.get_weights(conv2_2.W).shape)
print(model.get_weights(conv3_1.W).shape)
print(model.get_weights(conv3_2.W).shape)
print(model.get_weights(conv3_3.W).shape)
print(model.get_weights(conv4_1.W).shape)
print(model.get_weights(conv4_2.W).shape)
print(model.get_weights(conv4_3.W).shape)
print(model.get_weights(conv5_1.W).shape)
コード例 #16
0
ファイル: cnn8.1.py プロジェクト: okawo80085/codeDump
net = tfl.conv_1d(net, 25, 5, activation='relu')
net = tfl.max_pool_1d(net, 2)
net = tfl.dropout(net, 0.8)
net = tfl.conv_1d(net, 35, 3, activation='relu')
net = tfl.max_pool_1d(net, 2)
net = tfl.dropout(net, 0.75)
net = tfl.fully_connected(net, 128, activation='relu')
net = tfl.fully_connected(net, 50, activation='relu')
net = tfl.fully_connected(net, n_classes, activation='softmax')

reg = tfl.regression(net,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.00003)

mod = tfl.DNN(reg, tensorboard_verbose=0)

mod.load('conv_nn8.1')

mod.fit(X,
        Y,
        n_epoch=100,
        shuffle=True,
        show_metric=True,
        batch_size=11,
        run_id='ligma4')

mod.save('conv_nn8.1')

names = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'z']
コード例 #17
0
ファイル: linear_regression.py プロジェクト: zt706/tflearn
import tflearn

# Regression data
X = [
    3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 7.042, 10.791,
    5.313, 7.997, 5.654, 9.27, 3.1
]
Y = [
    1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 2.827,
    3.465, 1.65, 2.904, 2.42, 2.94, 1.3
]

# Linear Regression graph
input_ = tflearn.input_data(shape=[None])
linear = tflearn.single_unit(input_)
regression = tflearn.regression(linear,
                                optimizer='sgd',
                                loss='mean_square',
                                metric='R2',
                                learning_rate=0.01)
m = tflearn.DNN(regression)
m.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False)

print("\nRegression result:")
print("Y = " + str(m.get_weights(linear.W)) + ".X + " +
      str(m.get_weights(linear.b)))

print("\nTest prediction for y = 3.2 and y = 4.5:")
print(m.predict([3.2, 4.5]))
コード例 #18
0
convnet = input_data(shape=[None, 28, 28, 1], name='input')
convnet = conv_2d(convnet, 32, 2, activation='relu')

convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')

convnet = max_pool_2d(convnet, 2)
convnet = fully_connected(convnet, 10, activation='relu')

convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='targets')
model = tflearn.DNN(convnet)

model.fit({'input': X}, {'targets': Y},
          n_epoch=10,
          validation_set=({
              'input': test_x
          }, {
              'targets': test_y
          }),
          snapshot_step=500,
          show_metric=True,
          run_id='mnist')

model.save('quickest.model')
model.load('quickest.model')
model.predict(data)
コード例 #19
0
ファイル: train.py プロジェクト: SeoyeonLee8772/chatbot_web
    training.append([bag, output_row])

random.shuffle(training)
training = np.array(training)

train_x = list(training[:, 0])
train_y = list(training[:, 1])

tf.reset_default_graph()
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)

model = tflearn.DNN(net, tensorboard_dir=path.getPath('train_logs'))
model.fit(train_x, train_y, n_epoch=20000, batch_size=500, show_metric=True)
model.save(path.getPath('model.tflearn'))


def clean_up_sentence(sentence):
    sentence_words = nltk.word_tokenize(sentence)
    sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
    return sentence_words


def bow(sentence, words, show_details=False):
    sentence_words = clean_up_sentence(sentence)
    bag = [0] * len(words)
    for s in sentence_words:
        for i, w in enumerate(words):
コード例 #20
0
def main():
    height = 50
    width = 50
    workpath = '../testing_patient/'
    file_name = '../testing_patient/lungmask_0003_0125'
    image = cv2.imread(file_name + '.png')
    lung_mask = np.load(workpath + file_name + '.npy')
    image_files = np.load(workpath + 'images_' + file_name[28:] + '.npy')

    SeletiveSearch(image)
    images_path = '../temptation/'
    filelist = glob(images_path + '*.png')

    filelist.sort(key=lambda x: int(x[18:-4]))
    print(filelist)

    image_num = len(filelist)
    data = np.zeros((image_num, height, width, 1))
    for idx in range(image_num):
        img = imread(filelist[idx]).astype('float32')
        img = img.reshape(-1, img.shape[0], img.shape[1], 1)
        data[idx, :, :, :] = img
    print(data.shape)

    # Read images and corresponding labels
    csvfile = open('../testing_patient/predicted_nodules.csv', 'r')
    csvReader = csv.reader(csvfile)
    images_labels = list(csvReader)
    csvfile_1 = open('../testing_patient/file_classes.csv', 'r')
    csvReader_1 = csv.reader(csvfile_1)
    images_nodules = list(csvReader_1)
    real_candidates = []
    candidates = []
    del images_labels[0]
    del images_nodules[0]
    for i_l in images_labels:
        i_l[1] = eval(i_l[1])
        i_l[2] = eval(i_l[2])
        candidates.append(i_l[2])
    print(images_labels)
    print(candidates)

    # Get the lung nodule coordinates
    for j_l in images_nodules:
        if j_l[0] == file_name[19:]:
            j_l[1] = eval(j_l[1])
            j_l[2] = eval(j_l[2])
            real_candidates.append((j_l[1], j_l[2]))

    # Mapping the real regions that contain lung nodules
    real_nodules = []
    for candidate in candidates:
        if (candidate[0], candidate[1]) in real_candidates:
            real_nodules.append(candidate)
    print(real_nodules)

    # Feed the data into trained model.
    cnnnet = CNNModel()
    network = cnnnet.define_network(data, 'testtrain')
    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        checkpoint_path='nodule-classifier.tfl.ckpt')
    model.load('nodule-classifier.tfl')
    predictions = np.vstack(model.predict(data[:, :, :, :]))
    label_predictions = np.zeros_like(predictions)
    label_predictions[np.arange(len(predictions)), predictions.argmax(1)] = 1
    print(len(label_predictions))
    index_list = []
    for ind, val in enumerate(label_predictions):
        if val[1] == 1:
            index_list.append(ind)
    print(len(index_list))
    print(index_list)

    nodule_candidate = []
    for i in index_list:
        nodule_candidate.append(candidates[i])
    print(nodule_candidate)
    fig, ax = plt.subplots(2, 2, figsize=[8, 8])
    ax[0, 0].imshow(image_files[0], cmap='gray')
    ax[0, 1].imshow(image_files[0] * lung_mask[0], cmap='gray')
    ax[1, 0].imshow(image)
    ax[1, 1].imshow(image)
    for x, y, w, h in candidates:
        rect = mpatches.Rectangle((x, y),
                                  w,
                                  h,
                                  fill=False,
                                  edgecolor='red',
                                  linewidth=1)
        ax[1, 0].add_patch(rect)
    for x, y, w, h in real_nodules:
        rect = mpatches.Rectangle((x, y),
                                  w,
                                  h,
                                  fill=False,
                                  edgecolor='yellow',
                                  linewidth=1)
        ax[1, 1].add_patch(rect)
    for x, y, w, h in nodule_candidate:
        rect = mpatches.Rectangle((x - 3, y - 3),
                                  w + 5,
                                  h + 5,
                                  fill=False,
                                  edgecolor='red',
                                  linewidth=1)
        ax[1, 1].add_patch(rect)
    plt.show()

    shutil.rmtree('../temptation')
    os.mkdir('../temptation')
コード例 #21
0
    def create_network(self):
        # randomly choose configuration of the network
        self.layers_count = random.randrange(2, 4)
        self.layers_filter_count = [
            random.choice([16, 32, 64, 96]) for i in range(self.layers_count)
        ]
        self.layers_filter_size = [
            random.choice([
                8, 16, 32, 64,
                len(self.string_to_number), 2 * len(self.string_to_number)
            ]) for i in range(self.layers_count)
        ]
        self.layers_maxpool_kernel_size = [
            random.choice([2, 3, 4]) for i in range(self.layers_count)
        ]
        self.dropout = random.choice([True, True, False])
        if self.dropout:
            self.fully_connected_nodes = random.choice([32, 64, 96])
            self.droput_thresh = random.choice([0.25, 0.4, 0.5])
        self.epochs = random.choice([3, 4])

        # Print Configuration to console
        print("Initializing Model with layers_count=%d, each with:" %
              self.layers_count)
        for layer in zip(self.layers_filter_count, self.layers_filter_size,
                         self.layers_maxpool_kernel_size):
            print("filter_count=%d, filter_size=%d, maxpool_kernel_size=%d" %
                  (layer[0], layer[1], layer[2]))
        if not self.dropout:
            print("No dropout Layer")
        else:
            print(
                "Extra fully connected Layer with %d nodes and dropout threshold %1.2f"
                % (self.fully_connected_nodes, self.droput_thresh))
        print("Number of Epochs: %d" % self.epochs)

        # define input_data layer: shape tells us how the input data looks like. First element defines batch size and should be "None"
        self.net = tflearn.input_data(
            shape=[None, context_length * 2 * len(self.string_to_number), 1])
        # add Convolutional layers
        for i in range(self.layers_count):
            self.net = tflearn.layers.conv.conv_1d(
                self.net,
                self.layers_filter_count[i],
                self.layers_filter_size[i],
                activation='relu'
            )  # 16 filters of size len(self.string_to_number)
            self.net = tflearn.layers.conv.max_pool_1d(
                self.net, self.layers_maxpool_kernel_size[i])

        if self.dropout:
            self.net = tflearn.fully_connected(self.net,
                                               self.fully_connected_nodes,
                                               activation='relu')
            self.net = tflearn.layers.core.dropout(self.net,
                                                   self.droput_thresh)

        self.net = tflearn.fully_connected(self.net,
                                           len(self.string_to_number),
                                           activation='softmax')
        # output with a regression layer
        self.net = tflearn.regression(self.net)
        self.model = tflearn.DNN(self.net)
コード例 #22
0
def main():
    train_dataset = pd.read_csv(DATASET_FILENAME)
    test_dataset = pd.read_csv(TEST_FILENAME)

    for month in range(1, 13):
        avg_prod = train_dataset.loc[train_dataset["month"] == month, "production"].mean()
        train_dataset.loc[train_dataset["month"] == month, "mean_prod_mon"] = avg_prod
        test_dataset.loc[test_dataset["month"] == month, "mean_prod_mon"] = avg_prod
    
    for field in range(28):
        avg_prod = train_dataset.loc[train_dataset["field"] == field, "production"].mean()
        train_dataset.loc[train_dataset["field"] == field, "mean_prod_field"] = avg_prod
        test_dataset.loc[test_dataset["field"] == field, "mean_prod_field"] = avg_prod
    
    
    full_dataset = pd.concat([train_dataset, test_dataset])
    for attribute in ATTRIBUTES:
        max_value = full_dataset[attribute].max()
        min_value = full_dataset[attribute].min()
        train_dataset[attribute] = (train_dataset[attribute] - min_value) / (max_value - min_value)
        test_dataset[attribute] = (test_dataset[attribute] - min_value) / (max_value - min_value)
    
        
    # Modelo 1: tipos 0, 5 e 6
    
    """
    train_model_1 = train_dataset[train_dataset["type"].isin([0, 5, 6])]
    test_model_1 = test_dataset[test_dataset["type"].isin([0, 5, 6])]
    urf = EllipticEnvelope(contamination=0.07)
    urf.fit(train_model_1[ATTRIBUTES].values.reshape(-1, len(ATTRIBUTES)))
    train_model_1["outlier"] = urf.predict(train_model_1[ATTRIBUTES].values.reshape(-1, len(ATTRIBUTES)))
    train_model_1 = train_model_1[train_model_1["outlier"] == 1]
    train_model_1.drop("outlier", axis=1)
    
    x_train_1 = train_model_1[ATTRIBUTES]
    y_train_1 = train_model_1["production"]
    
    x_test_1 = test_model_1[ATTRIBUTES]
    id_test_1 = test_model_1["Id"]
    
    model_1 = AdaBoostRegressor(base_estimator=XGBRegressor(max_depth=7, learning_rate=0.05, n_estimators=100, n_jobs=8, base_score=0.05), n_estimators=75, learning_rate=1, loss="exponential")
    #model_1 = RandomForestRegressor()
    scores = cross_val_score(model_1, x_train_1, y_train_1, cv=10, scoring="neg_mean_absolute_error")
    print("Kaggle score (modelo 1):")
    print(scores)
    print()
    
    model_1.fit(x_train_1, y_train_1)
    results_1 = model_1.predict(x_test_1)
    
    
    # Modelo 2: tipos 1, 2 e 4
    
    train_model_2 = train_dataset[train_dataset["type"].isin([1, 2, 4])]
    test_model_2 = test_dataset[test_dataset["type"].isin([1, 2, 4])]
    urf = EllipticEnvelope(contamination=0.16)
    urf.fit(train_model_2[ATTRIBUTES].values.reshape(-1, len(ATTRIBUTES)))
    train_model_2["outlier"] = urf.predict(train_model_2[ATTRIBUTES].values.reshape(-1, len(ATTRIBUTES)))
    train_model_2 = train_model_2[train_model_2["outlier"] == 1]
    train_model_2.drop("outlier", axis=1)
    
    x_train_2 = train_model_2[ATTRIBUTES]
    y_train_2 = train_model_2["production"]
    
    x_test_2 = test_model_2[ATTRIBUTES]
    id_test_2 = test_model_2["Id"]
    
    model_2 = AdaBoostRegressor(base_estimator=XGBRegressor(max_depth=7, learning_rate=0.05, n_estimators=100, n_jobs=8, base_score=0.05), n_estimators=75, learning_rate=1, loss="exponential")
    #model_2 = RandomForestRegressor()
    scores = cross_val_score(model_2, x_train_2, y_train_2, cv=10, scoring="neg_mean_absolute_error")
    print("Kaggle score (modelo 2):")
    print(scores)
    print()
    
    model_2.fit(x_train_2, y_train_2)
    results_2 = model_2.predict(x_test_2)
    
    
    # Modelo 3: tudo
    
    train_model_3 = train_dataset
    test_model_3 = test_dataset[test_dataset["type"].isin([-1, 3, 7])]
    urf = EllipticEnvelope(contamination=0.06)
    urf.fit(train_model_3[ATTRIBUTES].values.reshape(-1, len(ATTRIBUTES)))
    train_model_3["outlier"] = urf.predict(train_model_3[ATTRIBUTES].values.reshape(-1, len(ATTRIBUTES)))
    train_model_3 = train_model_3[train_model_3["outlier"] == 1]
    train_model_3.drop("outlier", axis=1)
    
    x_train_3 = train_model_3[ATTRIBUTES]
    y_train_3 = train_model_3["production"]
    
    x_test_3 = test_model_3[ATTRIBUTES]
    id_test_3 = test_model_3["Id"]
    
    model_3 = AdaBoostRegressor(base_estimator=XGBRegressor(max_depth=7, learning_rate=0.05, n_estimators=100, n_jobs=8, base_score=0.05), n_estimators=75, learning_rate=1, loss="exponential")
    #model_3 = RandomForestRegressor()
    scores = cross_val_score(model_3, x_train_3, y_train_3, cv=10, scoring="neg_mean_absolute_error")
    print("Kaggle score (modelo 3):")
    print(scores)
    print()
    
    model_3.fit(x_train_3, y_train_3)
    results_3 = model_3.predict(x_test_3)
    
    # Agora gerar output
    with open("output.csv", "w") as weeb:
        weeb.write("Id,production\n")
        for id, result in zip(id_test_1, results_1):
            weeb.write(str(id) + "," + str(result) + "\n")
        for id, result in zip(id_test_2, results_2):
            weeb.write(str(id) + "," + str(result) + "\n")
        for id, result in zip(id_test_3, results_3):
            weeb.write(str(id) + "," + str(result) + "\n")
    """
    
    """
    train_filtered = []
    for field in range(28):
        noob = train_dataset[train_dataset["field"] == field]
        #train_filtered.append(noob[noob["production"] < noob["production"].quantile(0.90)])
        urf = EllipticEnvelope(contamination=0.06)
        urf.fit(noob["production"].values.reshape(-1, 1))
        noob["outlier"] = urf.predict(noob["production"].values.reshape(-1, 1))
        print(noob["outlier"].value_counts())
        noob = noob[noob["outlier"] == 1] # Pega só os inliers
        noob.drop("outlier", axis=1)
        train_filtered.append(noob)
    train_dataset = pd.concat(train_filtered)
    """
    
    urf = EllipticEnvelope(contamination=0.08)
    urf.fit(train_dataset["production"].values.reshape(-1, 1))
    train_dataset["outlier"] = urf.predict(train_dataset["production"].values.reshape(-1, 1))
    #print(train_dataset["outlier"].value_counts())
    train_dataset = train_dataset[train_dataset["outlier"] == 1]
    train_dataset.drop("outlier", axis=1)
    
    
    x_data_tr = train_dataset[ATTRIBUTES]
    y_data_tr = train_dataset["production"]
    
    x_data_te = test_dataset[ATTRIBUTES]
    id_data_te = test_dataset["Id"]

    # SPLIT DATASET
    #x_train, x_test, y_train, y_test = train_test_split(
    #        x_data_tr, y_data_tr, test_size=0.2)
    
    # FULL DATASET
    x_train = x_data_tr
    x_test = x_data_tr
    y_train = y_data_tr
    y_test = y_data_tr
    
    
    
    network = input_data(shape=[None, len(ATTRIBUTES)], name="Input_layer")
    #network = fully_connected(network, 24, activation="relu", name="Hidden_layer_1")
    network = fully_connected(network, 20, activation="relu", name="Hidden_layer_2")
    network = fully_connected(network, 16, activation="relu", name="Hidden_layer_3")
    network = fully_connected(network, 12, activation="relu", name="Hidden_layer_4")
    network = fully_connected(network, 1, activation="linear", name="Output_layer")
    network = regression(network, batch_size=64, optimizer='adam', learning_rate=0.001, loss="mean_square", metric="R2")
    
    model = tflearn.DNN(network)
    
    
    
    x_sarue = x_train.values
    y_sarue = y_train.values.reshape(-1, 1)
    x_weeb = x_test.values
    y_weeb = y_test.values.reshape(-1, 1)
    
    
    
    #model.fit(x_sarue, y_sarue, show_metric=True, run_id="sarue", validation_set=(x_weeb, y_weeb), n_epoch=200)
    model.fit(x_sarue, y_sarue, show_metric=True, run_id="weeb", validation_set=0.2, n_epoch=500)
    
    score = model.evaluate(x_weeb, y_weeb)
    print("Result: {}".format(score[0]))
    
    
    """
    #model = AdaBoostRegressor(n_estimators=75, learning_rate=1.0, loss="square")
    model = RandomForestRegressor()
    model.fit(x_train, y_train)
    
    #results = model.predict(x_weeb)
    #score = mean_absolute_error(y_weeb, results)
    results = model.predict(x_test)
    score = mean_absolute_error(y_test, results)
    print("Kaggle score: {}".format(score))
    """
    
    
    # Agora gerar output
    results = model.predict(x_data_te.values)
    with open("output.csv", "w") as weeb:
        weeb.write("Id,production\n")
        for id, result in zip(id_data_te, results):
            weeb.write(str(id) + "," + str(result[0]) + "\n")
コード例 #23
0
    softmax = tflearn.fully_connected(dropout2, nClass, activation='softmax')

    # Regression using SGD with learning rate decay and Top-3 accuracy
    sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
    top_k = tflearn.metrics.Top_k(3)
    net = tflearn.regression(softmax,
                             optimizer=sgd,
                             metric=top_k,
                             loss='categorical_crossentropy')

    print(len(X))
    print(len(Y))

    # Training
    print(exp, "experiment, number of classes:,", nClass)
    model = tflearn.DNN(net, tensorboard_verbose=0)

    model.fit(X,
              Y,
              n_epoch=n_epoch1,
              validation_set=0.1,
              run_id='Fullnet_KF_' + str(fold))
    # Save model
    model.save(os.path.join(model_path,
                            'Fullnet_KF_' + str(fold) + '.tflearn'))

    prob_vector = model.predict(testX)
    k_list = []
    confs = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    if b_label:  # Binary classification
        k_list = [1]
コード例 #24
0
    def build_model(self, learning_rate=[0.001, 0.01]):
        '''
        Model - wide and deep - built using tflearn
        '''
        n_cc = len(self.continuous_columns)
        n_categories = 1  # two categories: is_idv and is_not_idv
        input_shape = [None, n_cc]
        if self.verbose:
            print("=" * 77 + " Model %s (type=%s)" %
                  (self.name, self.model_type))
            print("  Input placeholder shape=%s" % str(input_shape))
        wide_inputs = tflearn.input_data(shape=input_shape, name="wide_X")
        if not isinstance(learning_rate, list):
            learning_rate = [learning_rate, learning_rate]  # wide, deep
        if self.verbose:
            print("  Learning rates (wide, deep)=%s" % learning_rate)

        with tf.name_scope(
                "Y"):  # placeholder for target variable (i.e. trainY input)
            Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")

        with tf.variable_op_scope([wide_inputs], None, "cb_unit",
                                  reuse=False) as scope:
            central_bias = tflearn.variables.variable(
                'central_bias',
                shape=[1],
                initializer=tf.constant_initializer(np.random.randn()),
                trainable=True,
                restore=True)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/cb_unit',
                                 central_bias)

        if 'wide' in self.model_type:
            wide_network = self.wide_model(wide_inputs, n_cc)
            network = wide_network
            wide_network_with_bias = tf.add(wide_network,
                                            central_bias,
                                            name="wide_with_bias")

        if 'deep' in self.model_type:
            deep_network = self.deep_model(wide_inputs, n_cc)
            deep_network_with_bias = tf.add(deep_network,
                                            central_bias,
                                            name="deep_with_bias")
            if 'wide' in self.model_type:
                network = tf.add(wide_network, deep_network)
                if self.verbose:
                    print("Wide + deep model network %s" % network)
            else:
                network = deep_network

        network = tf.add(network, central_bias, name="add_central_bias")

        # add validation monitor summaries giving confusion matrix entries
        with tf.name_scope('Monitors'):
            predictions = tf.cast(tf.greater(network, 0), tf.int64)
            print("predictions=%s" % predictions)
            Ybool = tf.cast(Y_in, tf.bool)
            print("Ybool=%s" % Ybool)
            pos = tf.boolean_mask(predictions, Ybool)
            neg = tf.boolean_mask(predictions, ~Ybool)
            psize = tf.cast(tf.shape(pos)[0], tf.int64)
            nsize = tf.cast(tf.shape(neg)[0], tf.int64)
            true_positive = tf.reduce_sum(pos, name="true_positive")
            false_negative = tf.subtract(psize,
                                         true_positive,
                                         name="false_negative")
            false_positive = tf.reduce_sum(neg, name="false_positive")
            true_negative = tf.subtract(nsize,
                                        false_positive,
                                        name="true_negative")
            overall_accuracy = tf.truediv(tf.add(true_positive, true_negative),
                                          tf.add(nsize, psize),
                                          name="overall_accuracy")
        vmset = [
            true_positive, true_negative, false_positive, false_negative,
            overall_accuracy
        ]

        trainable_vars = tf.trainable_variables()
        tv_deep = [v for v in trainable_vars if v.name.startswith('deep_')]
        tv_wide = [v for v in trainable_vars if v.name.startswith('wide_')]

        if self.verbose:
            print("DEEP trainable_vars")
            for v in tv_deep:
                print("  Variable %s: %s" % (v.name, v))
            print("WIDE trainable_vars")
            for v in tv_wide:
                print("  Variable %s: %s" % (v.name, v))

        if 'wide' in self.model_type:
            if not 'deep' in self.model_type:
                tv_wide.append(central_bias)
            tflearn.regression(
                wide_network_with_bias,
                placeholder=Y_in,
                optimizer='sgd',
                loss='roc_auc_score',
                # loss='binary_crossentropy',
                metric="accuracy",
                learning_rate=learning_rate[0],
                validation_monitors=vmset,
                trainable_vars=tv_wide,
                op_name="wide_regression",
                name="Y")

        if 'deep' in self.model_type:
            if not 'wide' in self.model_type:
                tv_wide.append(central_bias)
            tflearn.regression(
                deep_network_with_bias,
                placeholder=Y_in,
                optimizer='adam',
                loss='roc_auc_score',
                # loss='binary_crossentropy',
                metric="accuracy",
                learning_rate=learning_rate[1],
                validation_monitors=vmset
                if not 'wide' in self.model_type else None,
                trainable_vars=tv_deep,
                op_name="deep_regression",
                name="Y")

        if self.model_type == 'wide+deep':  # learn central bias separately for wide+deep
            tflearn.regression(
                network,
                placeholder=Y_in,
                optimizer='adam',
                loss='roc_auc_score',
                # loss='binary_crossentropy',
                metric="accuracy",
                learning_rate=learning_rate[0],  # use wide learning rate
                trainable_vars=[central_bias],
                op_name="central_bias_regression",
                name="Y")

        self.model = tflearn.DNN(
            network,
            tensorboard_verbose=self.tensorboard_verbose,
            max_checkpoints=5,
            checkpoint_path="%s/%s.tfl" % (self.checkpoints_dir, self.name),
        )

        if self.verbose:
            print("Target variables:")
            for v in tf.get_collection(tf.GraphKeys.TARGETS):
                print("  variable %s: %s" % (v.name, v))

            print("=" * 77)
コード例 #25
0
ファイル: network_test.py プロジェクト: wgcgxp/econ-fed
    # 	tays.append(taylor_rule_rate)

    # 	monthly_deviation = abs(rate-Y[index])
    # 	monthly_deviations.append(monthly_deviation)

    # # print(rates)
    # print(tays)
    # print(monthly_deviations)


# Network building
net = tflearn.input_data([None, 3])
net = tflearn.fully_connected(net,
                              10,
                              activation='linear',
                              regularizer='L2',
                              weight_decay=0.0005)
net = tflearn.fully_connected(net, 1, activation='linear')
net = tflearn.regression(net,
                         optimizer=tflearn.optimizers.AdaGrad(
                             learning_rate=0.01,
                             initial_accumulator_value=0.01),
                         loss='mean_square',
                         learning_rate=0.05)

# Training
model = tflearn.DNN(net, checkpoint_path='tmp/')

model.load('/Users/rodrigo.castellon/Desktop/econ-fed/analysis/tmp-7000')

run()
コード例 #26
0
                             name='l5')
network_left_right = max_pool_2d(network_left_right, 2)
network_left_right = conv_2d(network_left_right,
                             256,
                             3,
                             activation='relu',
                             name='l6')
network_left_right = max_pool_2d(network_left_right, 2)
network_left_right = fully_connected(network_left_right,
                                     2,
                                     activation='softmax')
network_left_right = regression(network_left_right,
                                optimizer='adam',
                                loss='categorical_crossentropy',
                                learning_rate=0.0001)
model_left_right = tflearn.DNN(network_left_right, tensorboard_verbose=0)
#load a model either for more training or just for controlling the game
#model_left_right.load("best_model_left_right/model_left_right_17400.tfl")

#UP DOWN network
tf.reset_default_graph()
network_up_down = input_data(shape=[None, 128, 128, 6], name='input')
network_up_down = conv_2d(network_up_down,
                          8,
                          3,
                          activation='relu',
                          name='l1_2')
network_up_down = max_pool_2d(network_up_down, 2)
network_up_down = conv_2d(network_up_down,
                          16,
                          3,
コード例 #27
0
        output.append(output_row)
    with open('data.pickle', 'wb') as f:
        pickle.dump((words, labels, training, output), f)
training = np.array(training)
output = np.array(output)

# print(training,output)

tf.reset_default_graph()

net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation='softmax')
net = tflearn.regression(net)
model = tflearn.DNN(net)

try:
    model.load('model.tflearn')
except:
    model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
    model.save('model.tflearn')


def bag_of_words(s, words):
    bag = [0 for _ in range(len(words))]
    s_words = nltk.word_tokenize(s)
    s_words = [stemmer.stem(w.lower()) for w in s_words]

    for se in s_words:
        for i, w in enumerate(words):
コード例 #28
0
ファイル: code_5.py プロジェクト: surajsomani/Teaching
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from tflearn.data_utils import load_csv
import tensorflow as tfs

data, target = load_csv('dataset.csv',
                        target_column=-1,
                        columns_to_ignore=[1],
                        has_header=True,
                        categorical_labels=True,
                        n_classes=2)
sess = tfs.Session()
outp = tfs.gather(data, 2)
print(sess.run(outp))
outp1 = tfs.gather(target, 2)
print(sess.run(outp1))

network = input_data(shape=[None, 23], name='input')
network = fully_connected(network, 10, activation='relu', name='nn_layer_1')
network = fully_connected(network, 5, activation='relu', name='nn_layer_2')
network = fully_connected(network,
                          2,
                          activation='softmax',
                          name='output_layer')
network = regression(network,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)
model = tf.DNN(network, tensorboard_verbose=3)
#model.fit(data,target,n_epoch=50,validation_set=0.3,show_metric=True,run_id='model1')
コード例 #29
0
def predict(network, modelfile, images):
    model = tflearn.DNN(network)
    model.load(modelfile)
    return model.predict(images)
コード例 #30
0
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)

# Building the decoder
decoder = tflearn.fully_connected(encoder, 256)
decoder = tflearn.fully_connected(decoder, 784, activation='sigmoid')

# Regression, with mean square error
net = tflearn.regression(decoder,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='mean_square',
                         metric=None)

# Training the auto encoder
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X,
          X,
          n_epoch=20,
          validation_set=(testX, testX),
          run_id="auto_encoder",
          batch_size=256)

# Encoding X[0] for test
print("\nTest encoding of X[0]:")
# New model, re-using the same session, for weights sharing
encoding_model = tflearn.DNN(encoder, session=model.session)
print(encoding_model.predict([X[0]]))

# Testing the image reconstruction on new data (test set)
print("\nVisualizing results after being encoded and decoded:")