def heart_disease(data_file, iterations=3000, learning_rate=0.1, reg_param=0.1, plot_learning_curves=False):
    dataset = dproc.read_dataset(data_file)
    X, y = dproc.preprocess(dataset)
    X_train, X_cv, y_train, y_cv = train_test_split(X, y, test_size=0.3)
    
    # Standardize data
    X_train_std, X_cv_std = dproc.standardize(X_train, X_cv)

    activation_layers = (X.shape[1], 25, 1)
    parameters = model.init_params(activation_layers)

    model.train_model(X_train_std.T, y_train.T, parameters, iterations, learning_rate, reg_param)




    if plot_learning_curves:
        costs_train, costs_cv, m_examples = model.train_various_sizes(X_train_std.T, X_cv_std.T, y_train.T, y_cv.T, parameters, activation_layers, 3000, 0.01, reg_param)
        dataplot.plot_learning_curves(costs_train, costs_cv, m_examples)
        

    
    train_accuracy = model.compute_accuracy(X_train_std.T, y_train.T, parameters)
    cv_accuracy = model.compute_accuracy(X_cv_std.T, y_cv.T, parameters)
    print(f"Train accuracy: {train_accuracy}")
    print(f"CV accuracy: {cv_accuracy}")
Beispiel #2
0
def objective(hyper_params):
    activation_layers = tuple(int(a) for a in hyper_params["hidden_layers"]["network"])
    
    learning_rate = hyper_params["learning_rate"]
    epochs = int(hyper_params["epochs"])
    reg_param = hyper_params["reg_param"]
    parameters, V, S = model.init_params_V_and_S(activation_layers)

    model.train_mini_batch_model(X_batches, y_batches, parameters, V, S, epochs, learning_rate, reg_param)

    train_accuracy = model.compute_accuracy(X_train_std.T, y_train.T, parameters)
    cv_accuracy = model.compute_accuracy(X_cv_std.T, y_cv.T, parameters)

    loss = 1 - cv_accuracy
    
    print(f"CV accuracy: {cv_accuracy}")

    return {"loss": loss, "train_accuracy": train_accuracy, "cv_accuracy": cv_accuracy, "hyper_params": hyper_params, "status": STATUS_OK, "parameters": parameters, "batch_size": y_batches[0].shape[1]}
Beispiel #3
0
def classification(X, y, learning_rate, num_epochs, verbose):

    # split data into training and test
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=17,
                                                        shuffle=True)

    # create neural network by specifying the number of neurons in each layer
    network = Classifier(X_train.shape[1], 15, 1)

    # fit neural network and return the loss history
    ce_history, acc_history = network.classify(np.array(X_train),
                                               np.array(y_train),
                                               learning_rate, num_epochs,
                                               verbose)

    # predict training accuracy and error
    train_predictions = np.array(
        [network.predict(np.array(row)) for _, row in X_train.iterrows()])
    train_accuracy = compute_accuracy(
        np.array([0 if p < 0.5 else 1 for p in train_predictions]),
        np.array(y_train))

    # predict test accuracy and error
    test_predictions = np.array(
        [network.predict(np.array(row)) for _, row in X_test.iterrows()])
    test_accuracy = compute_accuracy(
        np.array([0 if p < 0.5 else 1 for p in test_predictions]),
        np.array(y_test))

    if verbose:
        print('Training accuracy:', train_accuracy)
        print('Test accuracy:', test_accuracy)
        print('Training error:', (100 - train_accuracy) * 1 / 100)
        print('Test error:', (100 - test_accuracy) * 1 / 100)

        # plot cross-entropy curve
        plot_learning_curve(ce_history, 'cross_entropy')

        # plot accuracy curve
        plot_learning_curve(acc_history, 'accuracy')
def test(X_test, Y_test):
    """
    restore the model, and test the model by test dataset

    :param X_test: mnist test dataset
    :param Y_test: mnist test dataset
    :return:
    """

    keep_prob = tf.constant(1.)
    prediction = model.convolution(X_test, keep_prob)
    accuracy = model.compute_accuracy(Y_test, prediction)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        # sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state('model')
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            tf.logging.fatal('No model to restore!!!')
        test_acc = sess.run(accuracy) * 100.
        print("test accuracy: %.2f%%" % test_acc)
Beispiel #5
0
def evaluate_model():
    """Evaluate model with calculating test accuracy
	"""
    sess = setup_tensorflow()

    # SetUp Input PipeLine for queue inputs
    with tf.name_scope('train_input'):
        evaluate_features, evaluate_labels = input_pipeline.get_files(
            evaluate_dir)

    # Create Model creating graph
    output, var_list, is_training1 = model.create_model(
        sess, evaluate_features, evaluate_labels)

    # Create Model loss  & optimizer
    with tf.name_scope("loss"):
        total_loss, softmax_loss = model.compute_loss(output, evaluate_labels)

    (global_step, learning_rate,
     minimize) = model.create_optimizer(total_loss, var_list)

    # Acurracy setup
    out_eval, eval_input, eval_label, accuracy, is_training2 = model.compute_accuracy(
        sess)

    sess.run(tf.global_variables_initializer())

    # Basic stuff for input pipeline
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    # Calculate number of batches to run
    num_batches = EVALUATE_DATASET_SIZE / FLAGS.BATCH_SIZE

    # Add ops to restore all the variables.
    saver = tf.train.Saver()

    # Give the path of model with weights u wanna load
    saver.restore(sess, "./model/model100.ckpt")

    # Calculate acurracy for whole evaluate data
    total_accuracy = 0

    for batch in range(1, num_batches + 1):

        # Load input from the pipeline in batches , batch by batch
        input_batch, label_batch = sess.run(
            [evaluate_features, evaluate_labels])

        feed_dict = {
            eval_input: input_batch,
            eval_label: label_batch,
            is_training2: False
        }
        ops = [out_eval, accuracy]

        # Get the accuracy on evaluate batch run
        _, acc = sess.run(ops, feed_dict=feed_dict)

        print(" batch /" + str(batch) + " /" + str(num_batches) + " acc: " +
              str(acc))
        total_accuracy += acc

    total_accuracy /= (num_batches + 1)

    # Total Accuracy for Evaluate dataset
    print(" ACCURACY : " + str(total_accuracy))
Beispiel #6
0
def train_model():
	"""Training model with calculating training and dev accuracy
	"""
	sess = setup_tensorflow()

	# SetUp Input PipeLine for queue inputs
	with tf.name_scope('train_input'):
		train_features, train_labels = input_pipeline.get_files(train_dir)
	with tf.name_scope('dev_input'):
		dev_features , dev_labels  = input_pipeline.get_files(dev_dir)

	# Create Model creating graph
	output, var_list, is_training1 = model.create_model(sess, train_features, train_labels)

	# Create Model loss  & optimizer
	with tf.name_scope("loss"):
		total_loss, softmax_loss  = model.compute_loss(output, train_labels )
		tf.summary.scalar("loss",total_loss)

	(global_step, learning_rate, minimize) = model.create_optimizer(total_loss, var_list)	

	# Adds summary tensorboard
	tf.summary.scalar("loss",total_loss)

	# Acurracy setup 
	out_eval,eval_input, eval_label, accuracy, is_training2 = model.compute_accuracy(sess)

	sess.run(tf.global_variables_initializer())
	
	# Basic stuff for input pipeline
	coord = tf.train.Coordinator()
	threads = tf.train.start_queue_runners(sess=sess,coord=coord)

	# Add ops to save and restore all the variables.
	saver = tf.train.Saver()


	num_batches = TRAINING_DATASET_SIZE/FLAGS.BATCH_SIZE
	num_batches_dev = DEV_DATASET_SIZE/FLAGS.BATCH_SIZE

	#add computation graph to summary writer
	writer = tf.summary.FileWriter(summary_dir)
	writer.add_graph(sess.graph)

	merged_summaries = tf.summary.merge_all()

	for epoch in range(1,EPOCHS+1):

		# Train Model feeding data in batches calculating total loss
		Tsloss = 0
		Tloss = 0

		for batch in range(1,num_batches+1 ):
			feed_dict = {learning_rate: LEARNING_RATE,is_training1:True}
			ops = [minimize, softmax_loss, total_loss, merged_summaries]
			_, sloss, loss, summaries = sess.run(ops, feed_dict=feed_dict)
			#print ("Epoch /" + str (epoch) + " /" + str(EPOCHS)+" batch /" + str (batch) + " /" + str(num_batches)   + " ; Loss " + str(loss)+ " softmax Loss " + str(sloss))
			Tsloss += sloss
			Tloss  += loss

		Tsloss /= (num_batches+1)
		Tloss /= (num_batches+1)

		print ("Epoch /" + str (epoch) + " /" + str(EPOCHS)  + " ; Loss " + str(Tloss)+ " softmax Loss " + str(Tsloss))

		# Calculate training acurracy for whole training data
		total_accuracy = 0
		
		for batch in range(1,num_batches+1 ):

			input_batch, label_batch = sess.run([train_features, train_labels])

			feed_dict = {eval_input:input_batch,eval_label:label_batch,is_training2:False}
			ops = [out_eval,accuracy]
			_,acc = sess.run(ops, feed_dict=feed_dict)

			#print("Epoch /" + str (epoch) + " /" + str(EPOCHS)+" batch /" + str (batch) + " /" + str(num_batches) + " acc: " + str( acc ) )
			total_accuracy += acc
		
		total_accuracy /= (num_batches+1)

		print(" TRAINING ACCURACY : " + str( total_accuracy ) )


		# Calculate dev acurracy
		total_accuracy = 0

		for batch in range(1,num_batches_dev+1 ):

			input_batch, label_batch = sess.run([dev_features, dev_labels])

			feed_dict = {eval_input:input_batch,eval_label:label_batch,is_training2:False}
			ops = [out_eval,accuracy]
			_,acc = sess.run(ops, feed_dict=feed_dict)

			#print("Epoch /" + str (epoch) + " /" + str(EPOCHS)+" batch /" + str (batch) + " /" + str(num_batches_dev) + " acc: " + str( acc ) )
			total_accuracy += acc
		
		total_accuracy /= (num_batches_dev+1)

		print(" DEV ACCURACY : " + str( total_accuracy ) )
		

		# Write summary to logdir
		writer.add_summary(summaries)
		print "Summary Written to Logdir"

		# Save model for each eopch 
		make_dir_if_not_exists(model_dir)
		save_path = saver.save(sess, model_dir + "model" + str(epoch) +".ckpt")
		print("Model saved in path: %s" % save_path)
Beispiel #7
0
def main():
    """

    :return:
    """

    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
    minibatch_size = 128
    minibatch_num = mnist.train.num_examples // minibatch_size
    # learning rate decay
    train_step = 0
    init_lr = 1e-3
    global_ = tf.Variable(tf.constant(0))

    keep_prob = tf.placeholder(tf.float32)
    X = tf.placeholder(tf.float32, [None, 784])
    Y = tf.placeholder(tf.float32, [None, 10])
    prediction = model.convolution(X, keep_prob)

    cross_entropy = model.compute_cost(Y, prediction)
    learning_rate = tf.train.exponential_decay(init_lr,
                                               global_,
                                               10,
                                               0.96,
                                               staircase=True)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
    accuracy = model.compute_accuracy(Y, prediction)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state('model')
        initial_epoch = 1
        if ckpt and ckpt.model_checkpoint_path:
            # recover the model from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
            initial_epoch = int(ckpt.model_checkpoint_path.rsplit('-', 1)[1])
        print(initial_epoch)
        for epoch in range(initial_epoch, 100):
            # train the model
            print('training epoch: {}'.format(epoch))
            for minibatch in range(minibatch_num):
                minibatch_xs, minibatch_ys = mnist.train.next_batch(
                    minibatch_size)
                train_step += 1
                _, train_acc, lr = sess.run(
                    [optimizer, accuracy, learning_rate],
                    feed_dict={
                        X: minibatch_xs,
                        Y: minibatch_ys,
                        keep_prob: 0.5,
                        global_: train_step
                    })

                if minibatch % 100 == 0:
                    # display the train accuracy
                    print(
                        "iter %3d:\tlearning rate=%f,\ttraining accuracy=%.2f%%"
                        % (minibatch, lr, train_acc * 100))

            # run validation after every epoch
            validation_acc = sess.run(accuracy,
                                      feed_dict={
                                          X: mnist.validation.images,
                                          Y: mnist.validation.labels,
                                          keep_prob: 1.
                                      })
            print('---------------------------------------------------------')
            print("epoch: %3d, validation accuracy: %.2f%%" %
                  (epoch, validation_acc * 100))
            print('---------------------------------------------------------')
            # save the model
            saver.save(sess, './model/my-model', global_step=epoch)