Exemple #1
0
    def load_dataset(self,
                     dataset_name="CT",
                     domain_A_folder="output8",
                     domain_B_folder="output5_x_128"):
        self.dataset_name = dataset_name

        if self.dataset_name == "MNIST":
            # Configure MNIST and MNIST-M data loader
            self.data_loader = DataLoader(img_res=(self.img_rows,
                                                   self.img_cols))
        elif self.dataset_name == "CT":
            bodys_filepath_A = "/home/lulin/na4/src/output/{}/train/bodys.npy".format(
                domain_A_folder)
            masks_filepath_A = "/home/lulin/na4/src/output/{}/train/liver_masks.npy".format(
                domain_A_folder)
            self.Dataset_A = MyDataset(
                paths=[bodys_filepath_A, masks_filepath_A],
                batch_size=self.batch_size,
                augment=False,
                seed=17,
                domain="A")

            bodys_filepath_B = "/home/lulin/na4/src/output/{}/train/bodys.npy".format(
                domain_B_folder)
            masks_filepath_B = "/home/lulin/na4/src/output/{}/train/liver_masks.npy".format(
                domain_B_folder)
            self.Dataset_B = MyDataset(
                paths=[bodys_filepath_B, masks_filepath_B],
                batch_size=self.batch_size,
                augment=False,
                seed=17,
                domain="B")
        else:
            pass
def main():
    configs = json.load(open('config.json', 'r'))
    model_dir = configs['data']['model_dir']
    train_dir = configs['data']['train_dir']

    if model_dir is not None:
        model_dir = pathlib.Path(model_dir)
    if train_dir is None:
        print('Please provide training directory!')
    else:
        train_dir = pathlib.Path(train_dir)

    data = DataLoader(nlp, configs)
    train_texts, train_labels, val_texts, val_labels = data.read_data(
        configs, train_dir)

    print("Parsing texts...")

    train_docs = list(nlp.pipe(train_texts))
    val_docs = list(nlp.pipe(val_texts))
    if configs['training']['by_sentence']:
        train_docs, train_labels = data.get_labelled_sentences(
            train_docs, train_labels)
        val_docs, val_labels = data.get_labelled_sentences(
            val_docs, val_labels)

    train_vec = data.get_vectors(train_docs)
    val_vec = data.get_vectors(val_docs)
    predictions = []
    model = Model(nlp, configs, predictions, val_vec)

    model.train_model(train_vec, train_labels, val_vec, val_labels)

    predictions = np.array(predictions)

    ensemble_prediction = model.model_evaluation(val_labels)

    val_labels = np.argmax(val_labels, axis=1)

    print('We got ', np.sum(ensemble_prediction != val_labels), 'out of ',
          val_labels.shape[0], 'misclassified texts')
    print('Here is the list of misclassified texts:\n')

    val_texts = np.array(val_texts).reshape(-1)

    print(val_texts[np.array(np.where(ensemble_prediction != val_labels))][:])
def main(epochs=15,
         save_weights_path="./Weights/mnist_weights.hdf5",
         mode="train",
         num_classes=NUM_CLASSES,
         useCNN=False):
    # x_train, y_train, x_test, y_test,input_shape = preprocessing(X_train, Y_train, X_test, Y_test, useCNN=useCNN)
    dirname = "/".join(save_weights_path.split("/")[:-1])
    if not os.path.exists(dirname):
        os.makedirs(dirname)

    img_rows = 32
    img_cols = 32
    data_loader = DataLoader(img_res=(img_rows, img_cols))
    input_shape = (32, 32, 3)

    if mode == "train":
        model = NN_model(input_shape, num_classes, useCNN=useCNN)
        checkpointer = ModelCheckpoint(filepath=save_weights_path,
                                       verbose=1,
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor='val_acc')
        model.fit(data_loader.mnist_X,
                  keras.utils.to_categorical(data_loader.mnist_y, 10),
                  epochs=epochs,
                  shuffle=True,
                  validation_split=0.05,
                  batch_size=BATCH_SIZE,
                  callbacks=[checkpointer])
        # model.save_weights(save_weights_path)
        print("All done.")
    elif mode == "test":
        model = NN_model(input_shape, num_classes, useCNN=useCNN)
        model.load_weights(save_weights_path, by_name=True)
        score = model.evaluate(
            data_loader.mnistm_X,
            keras.utils.to_categorical(data_loader.mnistm_y, 10))
        print("Accuracy on test set: {}".format(score[1] * 100))
        print("All done.")
    else:
        raise ValuerError("'mode' should be 'train' or 'test'.")
 def load_dataset(self):
     # Configure MNIST and MNIST-M data loader
     self.data_loader = DataLoader(img_res=(self.img_rows, self.img_cols))
from __future__ import print_function, division
import scipy

import datetime
import matplotlib.pyplot as plt
import sys
from data_processing import DataLoader
import numpy as np
import os

# Configure MNIST and MNIST-M data loader
data_loader = DataLoader(img_res=(32, 32))

mnist, _ = data_loader.load_data(domain="A", batch_size=25)
mnistm, _ = data_loader.load_data(domain="B", batch_size=25)

r, c = 5, 5

for img_i, imgs in enumerate([mnist, mnistm]):

    #titles = ['Original', 'Translated']
    fig, axs = plt.subplots(r, c)
    cnt = 0
    for i in range(r):
        for j in range(c):
            axs[i, j].imshow(imgs[cnt])
            #axs[i, j].set_title(titles[i])
            axs[i, j].axis('off')
            cnt += 1
    fig.savefig("%d.png" % (img_i))
    plt.close()
Exemple #6
0
    get_res = np.array(pd.read_csv('data/toPredict_noLabel.csv'))
    cur_road_index = 0
    cur_road = get_res[cur_road_index, 1]
    row_test_data = np.array(pd.read_csv('data/toPredict_train_TTI.csv'))
    row_train_data = np.array(pd.read_csv('data/train_TTI.csv'))

    for i in range(12):
        print("Predicting {0}th road, Num: {1} ".format(i+1, cur_road))
        test = row_test_data[row_test_data[:, 0] == cur_road]
        test = add_time(test)

        train = row_train_data[row_train_data[:, 0] == cur_road]
        train = add_time(train)

        data = DataLoader(train, test)
        x, y = data.get_train_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise']
        )
        x_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise']
        )
        cur_road_index += 21
        cur_road = get_res[cur_road_index, 1]
        totalPrediction.append(run_lstm(data, x_test))
    totalPrediction = np.array(totalPrediction)
    print(totalPrediction.shape)
    l, r = np.hsplit(totalPrediction, [21])
    res = l.reshape(1, -1).squeeze().tolist()