Exemplo n.º 1
0
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
             cross_entropy_loss, input_image, correct_label, keep_prob,
             learning_rate):
    """
    Train neural network and print out the loss during training.
    :param sess: TF Session
    :param epochs: Number of epochs
    :param batch_size: Batch size
    :param get_batches_fn: Function to get batches of training data.  Call using get_batches_fn(batch_size)
    :param train_op: TF Operation to train the neural network
    :param cross_entropy_loss: TF Tensor for the amount of loss
    :param input_image: TF Placeholder for input images
    :param correct_label: TF Placeholder for label images
    :param keep_prob: TF Placeholder for dropout keep probability
    :param learning_rate: TF Placeholder for learning rate
    """
    losses = []
    for epoch in range(epochs):
        loss = None
        s_time = time.time()
        for image, labels in get_batches_fn(batch_size):
            _, loss = sess.run(
                [train_op, cross_entropy_loss],
                feed_dict={
                    input_image: image,
                    correct_label: labels,
                    keep_prob: KEEP_PROB,
                    learning_rate: LEARNING_RATE
                })
            losses.append(loss)
        print("[Epoch: {0}/{1} Loss: {2:4f} Time: {3}]".format(
            epoch + 1, epochs, loss,
            str(timedelta(seconds=(time.time() - s_time)))))
    helper.plot_loss(RUNS_DIR, losses, "loss_graph")
Exemplo n.º 2
0
def exp(args):
    ExpModel = get_network(args.model,
                           dataset=args.dataset,
                           nb_U1=args.unlabeled1,
                           nb_U2=args.unlabeled2,
                           theta1=args.theta1,
                           theta2=args.theta2,
                           mode=args.mode,
                           loss_type=args.loss,
                           weight_decay=args.weightdecay)

    ExpModel.optimizer = get_optimizer(args.model, lr=args.learningrate, decay=args.lr_decay)

    print('Loading {} ...'.format(args.dataset))
    U_data1, U_data2, prior_true, x_test, y_test, prior = ExpModel.get_data()
    print('Done!')

    input_shape = U_data1.shape[1:]
    ExpModel.build_model(prior, input_shape)
    history, loss_test = ExpModel.fit_model(U_data1=U_data1,
                                            U_data2=U_data2,
                                            batch_size=args.batchsize,
                                            epochs=args.epoch,
                                            x_test=x_test,
                                            y_test=y_test)

    np_loss_test = np.array(loss_test)
    loss_test_file= build_file_name(args.dataset, args.mode, args.unlabeled1, args.unlabeled2, args.theta1, args.theta2, args.loss, phase='test', figure=False)
    np.savetxt(loss_test_file, np_loss_test, newline="\r\n")

    plot_loss(np_loss_test, args.epoch)
    figure_file = build_file_name(args.dataset, args.mode, args.unlabeled1, args.unlabeled2, args.theta1, args.theta2, args.loss, phase='test', figure=True)
    plt.savefig(figure_file)
def train_nn(sess,
             epochs,
             batch_size,
             get_batches_fn,
             train_op,
             cross_entropy_loss,
             input_image,
             correct_label,
             keep_prob,
             learning_rate,
             logits=False,
             image_shape=False,
             data_dir=False):
    """
    Train neural network and print out the loss during training.
    :param sess: TF Session
    :param epochs: Number of epochs
    :param batch_size: Batch size
    :param get_batches_fn: Function to get batches of training data.  Call using get_batches_fn(batch_size)
    :param train_op: TF Operation to train the neural network
    :param cross_entropy_loss: TF Tensor for the amount of loss
    :param input_image: TF Placeholder for input images
    :param correct_label: TF Placeholder for label images
    :param keep_prob: TF Placeholder for dropout keep probability
    :param learning_rate: TF Placeholder for learning rate
    """

    losses = []
    # TODO: Implement function

    sess.run(tf.global_variables_initializer())

    print("Starting Training")

    for i in range(epochs):
        epoch = i + 1
        print("=============================")
        print("EPOCH NR {} ...".format(epoch))
        print("=============================")
        for image, label in get_batches_fn(batch_size):
            # Training
            _, loss = sess.run(
                [train_op, cross_entropy_loss],
                feed_dict={
                    input_image: image,
                    correct_label: label,
                    keep_prob: 0.5,
                    learning_rate: 0.0001
                })
            print("loss = {:.3f}".format(loss))
            losses.append(loss)

        if ((epoch % 10) == 0):
            print("Inference samples after epoch", epoch)
            helper.save_inference_samples(runs_dir, data_dir, sess,
                                          image_shape, logits, keep_prob,
                                          input_image, epoch)

    helper.plot_loss(plot_dir, losses, "loss_vs_epoch")
Exemplo n.º 4
0
def run():

    num_classes = 2
    image_shape = (160, 576)

    data_dir = './data'
    runs_dir = './runs'

    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes],
                                   name='correct_label')
    learning_rate = tf.placeholder(tf.float32, name='learning_rate')

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        # Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3, layer4, layer7 = load_vgg(
            sess, vgg_path)
        nn_last_layer = layers(layer3, layer4, layer7, num_classes)
        logits, train_op, cross_entropy_loss = optimize(
            nn_last_layer, correct_label, learning_rate, num_classes)

        # Train NN using the train_nn function
        loss_history = train_nn(sess, EPOCHS, BATCH_SIZE, get_batches_fn,
                                train_op, cross_entropy_loss, input_image,
                                correct_label, keep_prob, learning_rate)

        saver = tf.train.Saver()
        saver.save(sess, 'model')
        tf.train.write_graph(sess.graph_def, '', 'model.pb', False)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, input_image)

        # Plot loss
        helper.plot_loss(runs_dir, loss_history)
Exemplo n.º 5
0
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
             cross_entropy_loss, input_image, correct_label, keep_prob,
             learning_rate):
    """
    Train neural network and print out the loss during training.
    :param sess: TF Session
    :param epochs: Number of epochs
    :param batch_size: Batch size
    :param get_batches_fn: Function to get batches of training data.  Call using get_batches_fn(batch_size)
    :param train_op: TF Operation to train the neural network
    :param cross_entropy_loss: TF Tensor for the amount of loss
    :param input_image: TF Placeholder for input images
    :param correct_label: TF Placeholder for label images
    :param keep_prob: TF Placeholder for dropout keep probability
    :param learning_rate: TF Placeholder for learning rate
    """
    # TODO: Implement function
    # Training loop
    losses = []
    print(
        "***************************************************************************"
    )
    print("Start Training for {} epochs with {} batch size ".format(
        epochs, batch_size))
    for epoch in range(epochs):
        loss = None
        start_time = time.time()
        for image, labels in get_batches_fn(batch_size):
            _, loss = sess.run(
                [train_op, cross_entropy_loss],
                feed_dict={
                    input_image: image,
                    correct_label: labels,
                    keep_prob: KEEP_PROB,
                    learning_rate: LR
                })
            losses.append(loss)
        print("Epoch: {} of {}, Loss: {} in {} Time".format(
            epoch + 1, epochs, round(loss, 4),
            str(timedelta(seconds=time.time() - start_time))))
    helper.plot_loss('./runs', losses, "Training Loss")
    print("========Training has ended========")
    print(
        "***************************************************************************"
    )
Exemplo n.º 6
0
def main():
    '''
    Main block

    Steps:
    1. Pull data (MNIST)
    2. Initialise network
    3. Train network
    4. Save weights
    '''

    DATA = download_mnist.load_mnist()
    validation = []
    for key in ['fold-{f}'.format(f=f) for f in range(4)]:
        validation += DATA[key]
    validation = np.array(validation)

    epochs = 8
    initial_lr = 8e-3
    final_lr = 8e-6

    if args.question in ["1", "2", "5"]:
        model = network.MLP([784, 1000, 500, 250, 10])

        train_losses,val_losses,test_losses,\
        train_accuracies,val_accuracies,test_accuracies\
        =model.fit(np.array(DATA['train']),validation,np.array(DATA['fold-4']),\
        epochs=epochs,\
        initial_lr=initial_lr,\
        final_lr=final_lr)
        print(val_losses, test_losses)

        helper.plot_loss([train_losses, val_losses, test_losses],
                         epochs=epochs,
                         name="sigmoid_loss")
        helper.plot_accuracy(
            [train_accuracies, val_accuracies, test_accuracies],
            epochs=epochs,
            name="sigmoid_accuracy")
        run_stats(model, DATA, tag="sigmoid")

    elif args.question == "3":
        epochs = 4
        initial_lr = 8e-1
        final_lr = 8e-6
        variance = 0.00001

        model= network.MLP([784,1000,500,250,10],activation="relu",\
        variance=variance)

        train_losses,val_losses,test_losses,\
        train_accuracies,val_accuracies,test_accuracies\
        =model.fit(np.array(DATA['train']),validation,np.array(DATA['fold-4']),\
        epochs=epochs,\
        initial_lr=initial_lr,\
        final_lr=final_lr)
        print(val_losses, test_losses)

        helper.plot_loss([train_losses, val_losses, test_losses],
                         epochs=epochs,
                         name="relu_loss")
        helper.plot_accuracy(
            [train_accuracies, val_accuracies, test_accuracies],
            epochs=epochs,
            name="sigmoid_accuracy")
        run_stats(model, DATA, tag="relu")

    elif args.question == "4":
        train_data = noise_addition(DATA['train'], sigma=1e-3)

        model = network.MLP([784, 1000, 500, 250, 10])

        train_losses,val_losses,test_losses,\
        train_accuracies,val_accuracies,test_accuracies\
        =model.fit(np.array(train_data),validation,np.array(DATA['fold-4']),\
        l2=0.1,\
        l1=0.01,\
        epochs=epochs,\
        initial_lr=initial_lr,\
        final_lr=final_lr)

        helper.plot_loss([train_losses, val_losses, test_losses],
                         epochs=epochs,
                         name="sigmoid_regularised_loss")
        helper.plot_accuracy(
            [train_accuracies, val_accuracies, test_accuracies],
            epochs=epochs,
            name="sigmoid_regularised_accuracy")
        run_stats(model, DATA, tag="sigmoid_regularised")

    elif args.question == "6":
        epochs = 10
        initial_lr = 8e-4
        final_lr = 8e-6
        variance = 0.001

        model = network.MLP([64, 32, 10])
        train_data = preprocess(DATA['train'])
        val_data = np.array(preprocess(validation))
        test_data = np.array(preprocess(DATA['fold-4']))
        print(val_data.shape)

        train_losses,val_losses,test_losses,\
        train_accuracies,val_accuracies,test_accuracies\
        =model.fit(train_data,val_data,test_data,\
        epochs=epochs,\
        l2=0.1,\
        l1=0.01,\
        initial_lr=initial_lr,\
        final_lr=final_lr)
        print(val_losses, test_losses)

        DATA_HOG_fold = {
            'fold-{f}'.format(f=f): preprocess(DATA['fold-{f}'.format(f=f)])
            for f in range(4)
        }

        helper.plot_loss([train_losses, val_losses, test_losses],
                         epochs=epochs,
                         name="sigmoid_HOG_loss")
        helper.plot_accuracy(
            [train_accuracies, val_accuracies, test_accuracies],
            epochs=epochs,
            name="sigmoid_HOG_accuracy")
        run_stats(model, DATA_HOG_fold, tag="sigmoid")

    elif args.question == "7":
        train_data = np.array(preprocess(DATA['train']))
        val_data = np.array(preprocess(validation))
        test_data = np.array(preprocess(DATA['fold-4']))

        svc = svm.SVC(kernel='linear')
        labels = np.array([
            np.where(train_data[:, 1][x] == 1)[0][0]
            for x in range(len(train_data[:, 1]))
        ])
        labels = np.array(labels).reshape((len(labels), 1))

        train_data = np.concatenate(train_data[:, 0], axis=1)
        svc.fit(train_data, labels)

        y_true = np.array([
            np.where(test_data[:, 1][x] == 1)[0][0]
            for x in range(len(test_data[:, 1]))
        ])
        test_data = np.vstack(test_data[:, 0])
        y_pred = svc.predict(test_data)

        print(sklearn.metrics.accuracy_score(y_true, y_pred))
        print(sklearn.metrics.classification_report(y_true, y_pred))

    else:
        print("Invalid question {}".format(args.question))
Exemplo n.º 7
0
                    print('Current best acc: ', best_val)
                    print("VALUES: ", values)
                print('Final training loss: ', stats['loss_history'][-1])
                print('Final validation loss: ', stats['loss_val_history'][-1])
                print('Used values: hiddensize ', hs, 'learning rate: ', lr,
                      'reg: ', r)
                print('Final validation accuracy: ',
                      stats['val_acc_history'][-1])

    print('Best Accuracy: ', best_val)
    print('Best values: \nHidden_size: ', values[0], '\nLearning Rate: ',
          values[1], '\nReg: ', values[2])
    net = TwoLayerNeuralNetwork(input_size, 300, num_classes)
    # Generate best nets
    # 55.1% Accuracy mit hiddensize: 300 learning rate:  0.001 reg:  0.5
    stats = net.train(X_train,
                      y_train,
                      X_val,
                      y_val,
                      num_iters=num_iters,
                      batch_size=batch_size,
                      learning_rate=values[1],
                      learning_rate_decay=learning_rate_decay,
                      reg=values[2],
                      verbose=True)
    final_acc = (net.predict(X_val) == y_val).mean()
    print('Final Accuracy reached: ', final_acc)
    helper.plot_net_weights(net)
    helper.plot_accuracy(stats)
    helper.plot_loss(stats)
            scheduler.step()

            # Show, plot, save, test
            if iters % par.show_interval == 0:
                log.write("Epoch:" + str(ep) + " ITER:" + str(iters) +
                          " Loss:" + str(loss.data.item()) + "\n")
                print("Epoch:", str(ep), " ITER:", str(iters), " Loss:",
                      str(loss.data.item()))
                log.flush()

            if iters > 0:
                loss_list.append(loss.data.item())
            if iters % par.plot_interval == 0 and iters > 0:
                hl.plot_loss(loss=loss_list,
                             epoch=ep,
                             iteration=iters,
                             step=1,
                             loss_name="NLL",
                             loss_dir=par.model_dir)

            if iters % par.save_interval == 0:
                hl.save_model(model=mapNet_model,
                              model_dir=par.model_dir,
                              model_name="MapNet",
                              train_iter=iters)
            '''
            if iters % par.test_interval == 0:
            
                mp3d_test = Habitat_MP3D(par, seq_len=par.seq_len, config_file=par.test_config, action_list=par.action_list, 
                    with_shortest_path=par.with_shortest_path)
                evaluate_MapNet(par, test_iter=iters, test_data=mp3d_test)
            '''
Exemplo n.º 9
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    model_runs_dir = './runs/normal'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/
    epochs = 15
    batch_size = 5

    # Create a TensorFlow configuration object. This will be
    # passed as an argument to the session.
    config = tf.ConfigProto()
    config.gpu_options.allocator_type = 'BFC'

    # JIT level, this can be set to ON_1 or ON_2
    jit_level = tf.OptimizerOptions.ON_1
    config.graph_options.optimizer_options.global_jit_level = jit_level

    with tf.Session(config=config) as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
        correct_label = tf.placeholder(tf.int32)
        learning_rate = tf.placeholder(tf.float32)

        # DONE: Build NN using load_vgg, layers, and optimize function
        input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)
        logits, train_op, cross_entropy_loss = optimize(
            layer_output, correct_label, learning_rate, num_classes)

        # DONE: Train NN using the train_nn function
        sess.run(tf.global_variables_initializer())
        loss_log = train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                            cross_entropy_loss, input_image, correct_label,
                            keep_prob, learning_rate)

        # for i in tf.get_default_graph().get_operations():
        #     print(i.name)

        # exit()

        # DONE: Save inference data using helper.save_inference_samples
        folder_name = helper.save_inference_samples(model_runs_dir, data_dir,
                                                    sess, image_shape, logits,
                                                    keep_prob, input_image)

        # Plot loss
        helper.plot_loss(model_runs_dir, loss_log, folder_name)

        # OPTIONAL: Apply the trained model to a video
        save_path = os.path.join(model_runs_dir, 'model')
        save_path_pb = os.path.join(model_runs_dir, 'model.pb')

        saver = tf.train.Saver(tf.trainable_variables())
        saver_def = saver.as_saver_def()
        print(saver_def.filename_tensor_name)
        print(saver_def.restore_op_name)

        saver.save(sess, save_path)
        tf.train.write_graph(sess.graph_def, '.', save_path_pb, as_text=False)
        print('Saved normal at : {}'.format(save_path))
Exemplo n.º 10
0
    data_folder = 'pancreas/'
    dataset_file_list = ['muraro_seurat.csv', 'baron_seurat.csv']
    cluster_similarity_file = data_folder + 'pancreas_metaneighbor.csv'
    code_save_file = data_folder + 'code_list.pkl'
    dataset_file_list = [data_folder + f for f in dataset_file_list]

    # read data
    dataset_list = pre_processing(dataset_file_list, pre_process_paras)
    cluster_pairs = read_cluster_similarity(cluster_similarity_file,
                                            similarity_thr)
    nn_paras['num_inputs'] = len(dataset_list[0]['gene_sym'])

    # training
    model, loss_total_list, loss_reconstruct_list, loss_transfer_list = training(
        dataset_list, cluster_pairs, nn_paras)
    plot_loss(loss_total_list, loss_reconstruct_list, loss_transfer_list,
              data_folder + 'loss.png')
    # extract codes
    code_list = testing(model, dataset_list, nn_paras)
    with open(code_save_file, 'wb') as f:
        pickle.dump(code_list, f)

    # combine datasets in dataset_list
    pre_process_paras = {
        'take_log': True,
        'standardization': False,
        'scaling': False
    }  # lof TPM for uncorrected data
    dataset_list = pre_processing(dataset_file_list, pre_process_paras)
    cell_list = []
    data_list = []
    cluster_list = []
def train_nn(sess, epochs, batch_size, get_train_batches_fn, get_valid_batches_fn, train_op, cross_entropy_loss,
             input_image, correct_label, keep_prob, learning_rate, iou, iou_op, saver, n_train, n_valid, lr):
    """
    Train neural network and print out the loss during training.
    :param sess: TF Session
    :param epochs: Number of epochs
    :param batch_size: Batch size
    :param get_batches_fn: Function to get batches of training data.  Call using get_batches_fn(batch_size)
    :param train_op: TF Operation to train the neural network
    :param cross_entropy_loss: TF Tensor for the amount of loss
    :param input_image: TF Placeholder for input images
    :param correct_label: TF Placeholder for label images
    :param keep_prob: TF Placeholder for dropout keep probability
    :param learning_rate: TF Placeholder for learning rate
    """
    print("Start training with lr {} ...".format(lr))
    best_iou = 0
    for epoch in range(epochs):
        generator = get_train_batches_fn(batch_size)
        description = 'Train Epoch {:>2}/{}'.format(epoch + 1, epochs)
        start = timer()
        losses = []
        ious = []
        for image, label in tqdm(generator, total=n_train, desc=description, unit='batches'):
            _, loss, _ = sess.run([train_op, cross_entropy_loss, iou_op],
                                  feed_dict={input_image: image, correct_label: label,
                                             keep_prob: KEEP_PROB, learning_rate: lr})
            # print(loss)
            losses.append(loss)
            ious.append(sess.run(iou))
        end = timer()
        helper.plot_loss(RUNS_DIR, losses, "loss_graph_training")
        print("EPOCH {} with lr {} ...".format(epoch + 1, lr))
        print("  time {} ...".format(end - start))
        print("  Train Xentloss = {:.4f}".format(sum(losses) / len(losses)))
        print("  Train IOU = {:.4f}".format(sum(ious) / len(ious)))

        generator = get_valid_batches_fn(batch_size)
        description = 'Valid Epoch {:>2}/{}'.format(epoch + 1, epochs)
        losses = []
        ious = []
        for image, label in tqdm(generator, total=n_valid, desc=description, unit='batches'):
            loss, _ = sess.run([cross_entropy_loss, iou_op],
                               feed_dict={input_image: image, correct_label: label, keep_prob: 1})
            losses.append(loss)
            ious.append(sess.run(iou))
        helper.plot_loss(RUNS_DIR, losses, "loss_graph_validation")
        print("  Valid Xentloss = {:.4f}".format(sum(losses) / len(losses)))
        valid_iou = sum(ious) / len(ious)
        print("  Valid IOU = {:.4f}".format(valid_iou))

        if (valid_iou > best_iou):
            saver.save(sess, os.path.join(MODELS, '/fcn8s'))
            saver.save(sess, os.path.join(MODELS, '/fcn8s.ckpt'))
            with open(os.path.join(MODELS, '/training.txt'), "w") as text_file:
                text_file.write("models/fcn8s: epoch {}, lr {}, valid_iou {}".format(epoch + 1, lr, valid_iou))
            print("  model saved")
            best_iou = valid_iou
        else:
            lr *= 0.5  # lr scheduling: halving on failure
            print("  no improvement => lr downscaled to {} ...".format(lr))
Exemplo n.º 12
0
print(np.shape(X))
print(np.shape(y))

X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=0)

deep_model = Sequential()
deep_model.add(Dense(32, input_shape=(X.shape[1], ), activation='relu'))
deep_model.add(Dense(16, activation='relu'))
deep_model.add(Dense(8, activation='relu'))
deep_model.add(Dense(1))
deep_model.compile('adam', 'mean_squared_error')
deep_history = deep_model.fit(X_train,
                              y_train,
                              epochs=30,
                              verbose=0,
                              validation_split=0.2)

helper.plot_loss(deep_history)

#

#trainData = np.array(trainData[0])
#print(trainData)
#print(trainData.shape[0])
#print(trainData.shape[1])
#print(type(trainData[0][0]["sensorAccX"]))
#print(len(dataAccX))