def do_DNN(X, Y, testX, testY): # Building deep neural network input_layer = tflearn.input_data(shape=[None, 784]) #定义输入参数形状,只有一个维度 dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh', regularizer='L2', weight_decay=0.001) #连接全网络 dropout1 = tflearn.dropout(dense1, 0.8) #防止过拟合 dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout2 = tflearn.dropout(dense2, 0.8) softmax = tflearn.fully_connected(dropout2, 10, activation='softmax') #定义输出层,使用softmax分类 # Regression using SGD with learning rate decay and Top-3 accuracy sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) #梯度下降SGD top_k = tflearn.metrics.Top_k(3) #定义,真实结果在预测结果前3中就算正确 net = tflearn.regression(softmax, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') # Training model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y, n_epoch=20, validation_set=(testX, testY), show_metric=True, run_id="dense_model")
def predict(data): input_layer = tflearn.input_data(shape=[None, 20]) dense1 = tflearn.fully_connected(input_layer, 128, activation='relu') dropout1 = tflearn.dropout(dense1, 0.8) dense2 = tflearn.fully_connected(dropout1, 32, activation='relu') dropout2 = tflearn.dropout(dense2, 0.8) softmax = tflearn.fully_connected(dropout2, 2, activation='softmax') sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) net = tflearn.regression(softmax, optimizer='adam', loss='categorical_crossentropy') model = tflearn.DNN(net) model.load("/home/vasu/HINT/backend/src/job_hell/utlis/tflearn_nn.model") x = json.loads(data) temp = [] for ix in x: temp.append(float(x[ix])) dum = np.array(temp) dum = dum.reshape(1, 20) y = model.predict_label(dum) return np.argmax(y)
def get_DNN(): X, Y, testX, testY = mnist.load_data(one_hot=True) # Building deep neural network input_layer = tflearn.input_data(shape=[None, 784]) dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout1 = tflearn.dropout(dense1, 0.8) dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout2 = tflearn.dropout(dense2, 0.8) softmax = tflearn.fully_connected(dropout2, 10, activation='softmax') # Regression using SGD with learning rate decay and Top-3 accuracy sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) net = tflearn.regression(softmax, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') # Training model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y, n_epoch=20, validation_set=(testX, testY), show_metric=True, run_id="dense_model")
def DNN_load(args): feat_dim = 6166 # 3918 n_layers = 1 input_layer = tflearn.input_data(shape=[None, feat_dim]) dense1 = tflearn.fully_connected(input_layer, 64, activation='softplus', regularizer='L2', weight_decay=0.001) dropout1 = tflearn.dropout(dense1, 1, 0) # increas layers for _ in range(n_layers - 1): dropout1 = tflearn.dropout(dense1, 1.0) dense1 = tflearn.fully_connected(dropout1, 16, activation='softplus', regularizer='L2', weight_decay=0.001) dropout1 = tflearn.dropout(dense1, 0.8) softmax = tflearn.fully_connected(dropout1, 6, activation='softmax') # Regression using SGD with learning rate decay and Top-3 accuracy sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) # top_k = tflearn.metrics.Top_k(3) net = tflearn.regression(softmax, optimizer=sgd, loss='categorical_crossentropy') # Training model = tflearn.DNN(net, tensorboard_verbose=0) model.load(args.model) return model
def simple_dnn(): input_layer = tflearn.input_data(shape=[None, 5, 19]) # 1st hidden layer dense1 = tflearn.fully_connected(input_layer, 128, activation='tanh', regularizer='L2', weight_decay=0.001) dropout1 = tflearn.dropout(dense1, 0.5) # 2nd hidden layer dense2 = tflearn.fully_connected(dropout1, 256, activation='tanh', regularizer='L2', weight_decay=0.001) dropout2 = tflearn.dropout(dense2, 0.5) # Activation layer softmax = tflearn.fully_connected(dropout2, 11, activation='sigmoid') # Regression sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(1) model = tflearn.regression(softmax, optimizer=sgd, loss='categorical_crossentropy') dnn_model = tflearn.DNN(model, tensorboard_verbose=3) return dnn_model
def get_model(X, Y, input_shape, kernal_size, drop_out, activation): n_layers = 2 input_layer = tflearn.input_data(shape=[None, input_shape[1]]) input_layer = tflearn.dropout(input_layer, drop_out) dense1 = tflearn.fully_connected(input_layer, kernal_size, activation=activation) dense1 = tflearn.dropout(dense1, drop_out) # increas layers for _ in range(n_layers - 1): # dropout1 = tflearn.dropout(dense1, drop_out) dense1 = tflearn.fully_connected(dense1, kernal_size, activation=activation) dropout1 = tflearn.dropout(dense1, 0.8) softmax = tflearn.fully_connected(dense1, 6, activation='softmax') # Regression using SGD with learning rate decay and Top-3 accuracy sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) # top_k = tflearn.metrics.Top_k(3) net = tflearn.regression(softmax, optimizer=sgd, learning_rate=0.01, loss='categorical_crossentropy') # net = tflearn.regression(softmax, optimizer=sgd, learning_rate=0.01, loss='categorical_crossentropy') # Training adagrad model = tflearn.DNN(net, tensorboard_verbose=3, tensorboard_dir='./tmp/tflearn_logs/') # model.fit(X, Y, n_epoch=20, validation_set=(validationX, validationY), show_metric=True, run_id="dense_model") # model.fit(X, Y, n_epoch=20, show_metric=False) model.fit(X, Y) return model
def build_network(p, input_num): network = tflearn.input_data(shape=[None, input_num]) for layer in p.layers: network = tflearn.fully_connected(network, layer[1], activation=layer[2], weights_init=p.weights_init) if p.batch_norm: network = tflearn.batch_normalization(network) if layer[3] < 1.0: network = tflearn.dropout(network, layer[3]) network = tflearn.fully_connected(network, p.class_num, activation="softmax") if p.optimizer == "sgd": optimizer = tflearn.SGD(learning_rate=p.learning_rate, lr_decay=p.lr_decay, decay_step=p.decay_step) elif p.optimizer == "momentum": optimizer = tflearn.Momentum(learning_rate=p.learning_rate, lr_decay=p.lr_decay, decay_step=p.decay_step) elif p.optimizer == "adagrad": optimizer = tflearn.AdaGrad(learning_rate=p.learning_rate) else: optimizer = tflearn.Adam(learning_rate=p.learning_rate) network = tflearn.regression(network, optimizer=optimizer, loss="categorical_crossentropy") return network
def build_model(): net = tflearn.input_data(shape=[None, 784]) # input_layer net = tflearn.fully_connected(net, 1024, activation='relu', regularizer='L2', weight_decay=0.001) # dense1 net = tflearn.dropout(net, 0.9) # dropout1 net = tflearn.fully_connected(net, 512, activation='relu', regularizer='L2', weight_decay=0.001) # dense2 net = tflearn.dropout(net, 0.9) # dropout2 net = tflearn.fully_connected(net, 128, activation='relu', regularizer='L2', weight_decay=0.001) # dense3 net = tflearn.dropout(net, 0.9) # dropout3 softmax = tflearn.fully_connected(net, 26, activation='softmax') # Regression using SGD with learning rate decay and Top-3 accuracy sgd = tflearn.SGD(learning_rate=0.04, lr_decay=0.98, decay_step=1000) # top_k = tflearn.metrics.Top_k(3) net = tflearn.regression(softmax, optimizer=sgd, metric=tflearn.metrics.Accuracy(), loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) return model
def predict_new_velocity(x): # Building deep neural network input_layer = tflearn.input_data(shape=[None, 360, 3]) dense1 = tflearn.fully_connected(input_layer, 64, activation='relu', regularizer='L2', weight_decay=0.001) dropout1 = tflearn.dropout(dense1, 0.5) dense2 = tflearn.fully_connected(dropout1, 64, activation='relu', regularizer='L2', weight_decay=0.001) dropout2 = tflearn.dropout(dense2, 0.5) softmax = tflearn.fully_connected(dropout2, 8, activation='softmax') # Regression using SGD with learning rate decay and Top-3 accuracy sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) net = tflearn.regression(softmax, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') # load the trained model model = tflearn.DNN(net, tensorboard_verbose=3) model.load("model.tflearn") predict_y = model.predict(x) new_y = np.argmax(predict_y, axis=1) return new_y.astype(np.uint8)
def nn_boilerplate(input_size, output_size, hidden_sizes=[], weight_decay=0.001, dropout=0.8, lr=0.1, lr_decay=0.96, decay_step=1000, k=3): net = tflearn.input_data(shape=[None, input_size]) for size in hidden_sizes: net = tflearn.fully_connected(net, size, activation='tanh', regularizer='L2', weight_decay=weight_decay) net = tflearn.dropout(net, dropout) net = tflearn.fully_connected(net, output_size, activation='softmaxsgd decay') sgd = tflearn.SGD(learning_rate=lr, lr_decay=lr_decay, decay_step=decay_step) top_k = tflearn.metrics.Top_k(k) net = tflearn.regression(net, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) return model
def simple_blstm(): input_layer = tf.input_data(shape=[None, 5, 19]) model = tf.bidirectional_rnn(input_layer, tf.BasicLSTMCell(91), tf.BasicLSTMCell(91)) model = tf.dropout(model, 0.5) model = tf.fully_connected(model, 11, activation='sigmoid') sgd = tf.SGD(learning_rate=0.01, lr_decay=0.96, decay_step=1000) model = tf.regression(model, optimizer=sgd, loss='categorical_crossentropy') return tf.DNN(model, clip_gradients=0., tensorboard_verbose=0)
def nn(): # building Deep Learning Network input_layer = tflearn.input_data(shape=[None, 128, 128, 3]) network = tflearn.fully_connected(input_layer, 64, activation='tanh') network = tflearn.fully_connected(network, 2, activation='softmax') # regression using SGD with learning rate decay sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) network = tflearn.regression(network, optimizer=sgd, metric='accuracy', loss='categorical_crossentropy') return network
def run_classifier(dataset): # Create model input_layer = tflearn.input_data(shape=[None, 200]) dense_1 = tflearn.fully_connected(input_layer, 128, activation='relu') dense_2 = tflearn.fully_connected(dense_1, 16, activation='linear') # dense_3 = tflearn.fully_connected(dense_2, 4, activation='relu') softmax = tflearn.fully_connected(dense_2, 2, activation='softmax') # Regression sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.9, decay_step=100) top_k = tflearn.metrics.Top_k(2) net = tflearn.regression(softmax, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') # Training model = tflearn.DNN(net, tensorboard_verbose=0, session=None) model.fit(dataset["train"], dataset["train_labels"], n_epoch=30, show_metric=True, run_id="fold_training") # Test model performance match_count = 0.0 zero_count = 0.0 zero_match = 0.0 one_count = 0.0 one_match = 0.0 predictions = model.predict(dataset["test"]) for index in xrange(0, len(predictions)): is_zero = True if np.all( np.equal(dataset["test_labels"][index], np.array( (0, 1)))) else False if is_zero: zero_count += 1 else: one_count += 1 if compare(predictions[index], dataset["test_labels"][index]): match_count += 1 if is_zero: zero_match += 1 else: one_match += 1 print("Accuracy on Fresh Data is : {} %".format( str(100.0 * match_count / len(predictions)))) print("Accuracy on Zeros is : {} %".format( str(100 * zero_match / zero_count))) print("Accuracy on Ones is : {} %".format(str(100 * one_match / one_count)))
def __init__(self): # Building deep neural network network = tflearn.input_data(shape=[None, 784], name="input") network = self.make_core_network(network) # Regression using SGD with learning rate decay and Top-3 accuracy sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) network = tflearn.regression(network, optimizer=sgd, metric=top_k, loss='categorical_crossentropy', name="target") model = tflearn.DNN(network, tensorboard_verbose=0) self.model = model
def do_mlp(): #构造神经网络 input_layer = tflearn.input_data(shape=[None, 100, 60, 1]) dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout1 = tflearn.dropout(dense1, 0.8) dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout2 = tflearn.dropout(dense2, 0.8) softmax = tflearn.fully_connected(dropout2, 252, activation='softmax') sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) net = tflearn.regression(softmax, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') #训练 model = tflearn.DNN(net, tensorboard_verbose=0) model_path = './model/mlp/mlp' flag = 0 if flag == 0: if os.path.exists('./model/mlp'): print "loading model" model.load(model_path) #for i in range(10): x, y = preprocess.get_feature() x = x.reshape([-1, 100, 60, 1]) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.05) model.fit(x_train, y_train, n_epoch=100000, validation_set=(x_test, y_test), show_metric=True, run_id="captchalib") model.save(model_path) elif flag == 1: model.load(model_path) print model.evaluate(x_test, y_test)
def dnn_model(input_length, output_length, activation='relu'): input_layer = tflearn.input_data(shape=[None, input_length]) model = tflearn.fully_connected(input_layer, 64, activation=activation) model = tflearn.dropout(model, 0.8) model = tflearn.fully_connected(input_layer, 64, activation=activation) model = tflearn.dropout(model, 0.8) softmax = tflearn.fully_connected(model, output_length, activation='softmax') sgd = tflearn.SGD(learning_rate=0.1, decay_step=1000) net = tflearn.regression(softmax, optimizer=sgd, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) return model
def lstm_model(input_length, output_length, activation='relu'): input_layer = tflearn.input_data(shape=[None, input_length]) model = tflearn.embedding(input_layer, input_dim=data_provider.STATE['tokenizer'].index, output_dim=64) model = tflearn.lstm(model, 64, dropout=0.8) softmax = tflearn.fully_connected(model, output_length, activation='softmax') sgd = tflearn.SGD(learning_rate=0.1, decay_step=1000) net = tflearn.regression(softmax, optimizer=sgd, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) return model
def make_model(self): input_layer = tflearn.input_data(shape=[None, 126]) dense_1 = tflearn.fully_connected(input_layer, 64, activation='relu', regularizer='L2', weight_decay=0.0001) softmax = tflearn.fully_connected(dense_1, 2, activation='softmax') sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(5) net = tflearn.regression(softmax, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') return tflearn.DNN(net, tensorboard_verbose=0)
def fit(self, X, y, name: str = "p1-fp(C)") -> "P1FPClassifierC": """Fit the convolution neural network with the given data.""" X = check_array(X, accept_sparse=False) y = check_array(y, accept_sparse=False, ensure_2d=False) n_features = X.shape[1] n_classes = np.unique(y).size X = X.reshape([-1, 1, n_features, 1]) encoder = OneHotEncoder(sparse=False) y = encoder.fit_transform(y.reshape(-1, 1)) tf.compat.v1.reset_default_graph() network = input_data(shape=[None, 1, n_features, 1], name='input') network = conv_2d(network, 128, 12, activation='relu', regularizer="L2") network = max_pool_2d(network, 10) network = local_response_normalization(network) network = conv_2d(network, 128, 12, activation='relu', regularizer="L2") network = max_pool_2d(network, 10) network = local_response_normalization(network) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) softmax = fully_connected(network, n_classes, activation='softmax') sgd = tflearn.SGD(learning_rate=0.05, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) network = tflearn.regression( softmax, optimizer=sgd, loss='categorical_crossentropy', name='target', metric=top_k) # pylint: disable=attribute-defined-outside-init self.n_features_ = n_features self.classes_ = encoder.categories_[0] self.model_ = tflearn.DNN(network, tensorboard_verbose=0) self.model_.fit({'input': X}, {'target': y}, validation_set=0.1, n_epoch=self.n_epoch, snapshot_step=self.snapshot_step, run_id=name) return self
def run(): # model variables X = tf.placeholder('float', [None, 784]) Y = tf.placeholder('float', [None, 10]) W1 = tf.Variable(tf.random_normal([784, 256])) W2 = tf.Variable(tf.random_normal([256, 256])) W3 = tf.Variable(tf.random_normal([256, 10])) b1 = tf.Variable(tf.random_normal([256])) b2 = tf.Variable(tf.random_normal([256])) b3 = tf.Variable(tf.random_normal([10])) def dnn(x): # using tflearn PReLU activation ops x = tflearn.prelu(tf.add(tf.matmul(x, W1), b1)) tflearn.summaries.monitor_activation(x) # Monitor activation x = tflearn.prelu(tf.add(tf.matmul(x, W2), b2)) tflearn.summaries.monitor_activation(x) # Monitor activation x = tf.nn.softmax(tf.add(tf.matmul(x, W3), b3)) return x net = dnn(X) # use objective ops from TFLearn loss = tflearn.categorical_crossentropy(net, Y) # use metric ops from TFLearn acc = tflearn.metrics.accuracy_op(net, Y) # use SGF Optimizer class from TFLearn optimizer = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=200) # Because of lr decay, it is required to first build the Optimizer with # the step tensor that will monitor training step. # (Note: When using TFLearn estimators wrapper, build is self managed, # so only using above `Optimizer` class as `DNN` optimizer arg is enough). step = tflearn.variable('step', initializer='zeros', shape=[]) optimizer.build(step_tensor=step) optim_tensor = optimizer.get_tensor() # Use TFLearn Trainer # def training op for backprop trainop = tflearn.TrainOp(loss=loss, optimizer=optim_tensor, metric=acc, batch_size=128, step_tensor=step) trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=3) trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={ X: testX, Y: testY}, n_epoch=2, show_metric=True)
def simple_cnn(): input_layer = tf.input_data(shape=[None, 5, 19]) model = tf.conv_1d(input_layer, 256, 4, padding='valid', activation='sigmoid', regularizer='L2') model = tf.max_pool_1d(model, kernel_size=4) model = tf.dropout(model, 0.7) model = tf.fully_connected(model, 11, activation='sigmoid') sgd = tf.SGD(learning_rate=0.01, lr_decay=0.96, decay_step=32000) model = tf.regression(model, optimizer=sgd, loss='categorical_crossentropy') return tf.DNN(model)
def Convolution_NN(input_size, arg): input_layer = input_data(shape=[None, input_size, input_size, arg], name='input_layer') conv1 = conv_2d(input_layer, nb_filter=filter_size_1, filter_size=6, strides=1, activation='relu', regularizer='L2') #conv1 = max_pool_2d(conv1,2) conv2 = conv_2d(conv1, nb_filter=filter_Size_2, filter_size=5, strides=2, activation='relu', regularizer='L2') #conv2 = max_pool_2d(conv2,2) conv3 = conv_2d(conv2, nb_filter=filter_size_3, filter_size=4, strides=2, activation='relu', regularizer='L2') full_layer1 = fully_connected(flatten(conv3), fullyconnected_size, activation='relu', regularizer='L2') full_layer1 = dropout(full_layer1, 0.75) out_layer = fully_connected(full_layer1, 10, activation='softmax') sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.096, decay_step=100) top_k = tflearn.metrics.top_k(3) network = regression(out_layer, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') return tflearn.DNN(network, tensorboard_dir='tf_CNN_board', tensorboard_verbose=3)
def non_linear_classifier(position_array, class_array, n_classes): """ Here you will implement a non-linear neural network that will classify the input data. The input data is an x, y coordinate (in 'position_array') and a classification for that x, y coordinate (in 'class_array'). The order of the data in 'position_array' corresponds with the order of the data in 'class_array', i.e., the ith element in 'position_array' is classified by the ith element in 'class_array'. Your neural network should have three layers total. An input layer and two fully connected layers (meaning that the middle layer is a hidden layer). The second fully connected layer is the output layer (so it should have 4 nodes and a softmax activation function). You get to decide how many nodes the middle layer has and the activation function that it uses. :param position_array: a 2D np array of size [n_examples, 2] that contains an x,y position for each point :param class_array: a 1D np array of size [n_examples] :param n_classes: an integer that is the number of classes your data has """ with tf.Graph().as_default(): # YOUR CODE FOR PROBLEM 6C GOES HERE: # build your neural network print "I'm doing Non-linear now" net = tflearn.input_data(shape=[None, 2]) net = tflearn.fully_connected(net, 64, activation='relu') # 64 nodes net = tflearn.fully_connected(net, 4, activation='softmax') # 4 nodes sgd = tflearn.SGD(learning_rate=2.0, decay_step=500) net = tflearn.regression(net, optimizer=sgd, loss='categorical_crossentropy') # Define model model = tflearn.DNN(net) Class_Array = to_categorical(class_array, n_classes) # Start training (apply gradient descent algorithm) model.fit(position_array, Class_Array, n_epoch=2000, batch_size=100, show_metric=True, snapshot_epoch=False) # Plot the class area plot_spiral_and_predicted_class(position_array, class_array, model, "Non-LinearPlot", "Plot of Non-Linear Classifier")
def mlp_1d(x_train, y_train, x_test, y_test): print "MLP + 1d" #构造神经网络 input_layer = tflearn.input_data(shape=[None, 784]) dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout1 = tflearn.dropout(dense1, 0.8) dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout2 = tflearn.dropout(dense2, 0.8) softmax = tflearn.fully_connected(dropout2, 10, activation='softmax') sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) net = tflearn.regression(softmax, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') #训练 model = tflearn.DNN(net, tensorboard_verbose=0) model_path = 'bc.tfl' flag = 0 if flag == 0: model.fit(X, Y, n_epoch=10, validation_set=(testX, testY), show_metric=True, run_id="mnist") model.save(model_path) elif flag == 1: model.load(model_path) print model.evaluate(testX, testY)
def __init__(self, learning_rate): input_layer = tflearn.input_data(shape=[None, 32,32,3]) dense1 = tflearn.fully_connected(input_layer, 32, activation='relu') dropout1 = tflearn.dropout(dense1, 0.5) dense2 = tflearn.fully_connected(dropout1, 64, activation='relu') dropout2 = tflearn.dropout(dense2, 0.25) dense3 = tflearn.fully_connected(dropout2, 128, activation='relu') dropout3 = tflearn.dropout(dense3, 0.5) dense4 = tflearn.fully_connected(dropout3, 256, activation='relu') dropout4 = tflearn.dropout(dense4, 0.5) dense5 = tflearn.fully_connected(dropout4, 512, activation='relu') softmax = tflearn.fully_connected(dense5, 10, activation='softmax') sgd = tflearn.SGD(learning_rate= learning_rate, lr_decay=0.95, decay_step=1000) network = tflearn.regression(softmax, optimizer= sgd, learning_rate=learning_rate, loss='categorical_crossentropy') self.network = network
def mlp(): input_layer = tflearn.input_data(shape=[None, 784]) dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout1 = tflearn.dropout(dense1, 0.8) dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout2 = tflearn.dropout(dense2, 0.8) softmax = tflearn.fully_connected(dropout2, 10, activation='softmax') # Regression using SGD with learning rate decay and Top-3 accuracy sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) net = tflearn.regression(softmax, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') return net
def mlp2(X_train, y_train, X_test, y_test): # build network network = tflearn.input_data(shape=[None, 784]) network = tflearn.fully_connected(network, 128, activation='tanh', regularizer='L2', weight_decay=0.001) network = tflearn.dropout(network, 0.7) network = tflearn.fully_connected(network, 64, activation='tanh', regularizer='L2', weight_decay=0.001) network = tflearn.dropout(network, 0.7) network = tflearn.fully_connected(network, 10, activation='softmax') # network optimization sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) network = tflearn.regression(network, optimizer=sgd, metric=top_k, loss='categorical_crossentropy') # train MLP model = tflearn.DNN(network, tensorboard_verbose=0) model.fit(X_train, y_train, n_epoch=20, validation_set=(X_test, y_test), show_metric=True, run_id="dense_model") # predict y_pred = model.predict(X_test) return y_pred
def dnn(): # building Deep Learning Network input_layer = tflearn.input_data(shape=[None, 128, 128, 3]) dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout1 = tflearn.dropout(dense1, 0.8) dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh', regularizer='L2', weight_decay=0.001) dropout2 = tflearn.dropout(dense2, 0.8) softmax = tflearn.fully_connected(dropout2, 2, activation='softmax') # regression using SGD with learning rate decay sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) net = tflearn.regression(softmax, optimizer=sgd, loss='categorical_crossentropy') return net
def ModelMaker(inputShape, structreArray, tensorBoardAdress, lr=0.01, optimizer="adam"): tflearn.config.init_graph(gpu_memory_fraction=0.95, soft_placement=True) if len(inputShape) == 1: network = tflearn.input_data(shape=[None, inputShape[0]], name='input') elif len(inputShape) == 2: network = tflearn.input_data( shape=[None, inputShape[0], inputShape[1]], name='input') elif len(inputShape) == 3: network = tflearn.input_data( shape=[None, inputShape[0], inputShape[1], inputShape[2]], name='input') else: network = tflearn.input_data(shape=[ None, inputShape[0], inputShape[1], inputShape[2], inputShape[3] ], name='input') network = LayerMaker(network, structreArray) loss = 'mean_square' if optimizer != "adam": optimizer = tflearn.SGD(learning_rate=lr) network = tflearn.regression(network, optimizer=optimizer, learning_rate=lr, loss=loss, name="target") model = tflearn.DNN(network, tensorboard_dir=tensorBoardAdress) return model
def three_branch_cnn(): input_layer = tf.input_data(shape=[None, 5, 19]) # Branching... branch1 = tf.conv_1d(input_layer, 256, 5, padding='valid', activation='sigmoid', regularizer='L2') branch2 = tf.conv_1d(input_layer, 256, 5, padding='valid', activation='sigmoid', regularizer='L2') branch3 = tf.conv_1d(input_layer, 256, 5, padding='valid', activation='sigmoid', regularizer='L2') # Merging model = tf.merge([branch1, branch2, branch3], mode='concat', axis=1) #model = tf.max_pool_1d(model, kernel_size=5) model = tf.dropout(model, 0.7) model = tf.fully_connected(model, 11, activation='sigmoid') sgd = tf.SGD(learning_rate=0.01, lr_decay=0.96, decay_step=1000) model = tf.regression(model, optimizer='sgd', loss='categorical_crossentropy') return tf.DNN(model, tensorboard_verbose=0)