예제 #1
0
def main(_):
    data_object = dataset('KSC')
    cg = CGAN(data_ob = data_object, sample_dir = FLAGS.sample_dir, output_size=FLAGS.output_size,
              learn_rate=FLAGS.learn_rate, batch_size=FLAGS.batch_size, z_dim=FLAGS.z_dim,
              y_dim=FLAGS.y_dim, log_dir=FLAGS.log_dir, model_path=FLAGS.model_path)
    if FLAGS.op == 0:
        cg.train()
    else:
        cg.test()
예제 #2
0
import pickle
import nltk
import preprocess
unknown_token="UNKNOWN_TOKEN"
python_string="worked"
tokenized_sentence=nltk.word_tokenize(python_string.lower())

data=preprocess.dataset()
tokenized_sentence = [w if w in data.word_to_index else unknown_token for w in tokenized_sentence] # 将句子使用词汇表中的单词表示
x=[data.word_to_index[word] for word in tokenized_sentence]
print(x)


with open('model.pkl', 'rb') as model_data:
    print("加载训练的模型...")

    rnn=pickle.load(model_data)
    predict = rnn.predict(x)

    print("predict shape = " + str(predict.shape))
    print(predict)
    array_of_words = " ".join([data.index_to_word[x] for x in predict])

    print(array_of_words)
예제 #3
0
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    train_file_list = [
        'cifar10/data_batch_1', 'cifar10/data_batch_2', 'cifar10/data_batch_3',
        'cifar10/data_batch_4', 'cifar10/data_batch_5'
    ]
    val_file_list = ['cifar10/test_batch']
    length = len(preprocess.dataset(train_file_list))
    dataloader = torch.utils.data.DataLoader(preprocess.dataset(
        train_file_list, transform=transform_train),
                                             shuffle=True,
                                             batch_size=batch_size,
                                             num_workers=num_workers)

    val_dataloader = torch.utils.data.DataLoader(preprocess.dataset(
        val_file_list, transform=transform_test),
                                                 shuffle=True,
                                                 batch_size=batch_size,
                                                 num_workers=num_workers)

    model.train()
    best_pre = 0
    if arguments['pretrained_weight']:
예제 #4
0
import torch.nn.functional as F
import numpy as np
import os
from config import config
from preprocess import Vocab, Preprocess, dataset
from model import Encoder, Attention, Decoder
from torch.utils.data import DataLoader, Dataset

if __name__ == "__main__":
    print('==> Loading config......')
    cfg = config()
    print('==> Preprocessing data......')
    voc = Vocab(cfg)
    voc.gen_counter_dict()
    voc.gen_vocab()
    cfg.vocab_len = voc.vocab_len
    print('The length of vocab is: {}'.format(cfg.vocab_len))

    prep = Preprocess(cfg, voc.vocab)
    pairs = prep.gen_pair_sen()
    print('pairs sentences generated.')
    pairs = prep.tokenize(pairs)
    print('sentences tokenized.')

    traindataset = dataset(pairs, voc.vocab)
    traindataloader = DataLoader(traindataset, batch_size=5, shuffle=False)
    one_iter = iter(traindataloader).next()

    encoder = Encoder(cfg)
    encoder_outputs, _ = encoder(one_iter['inputs'], one_iter['length'])
예제 #5
0
import wrapper
import preprocess
import option
import torch
from torchvision import transforms
import os
import NN

if not option.loadModel:
    model = eval(option.model)
else:
    model = wrapper.load(option.loadModel)

train_dataset = preprocess.dataset(option.train_data,
                                   transform=option.transform_train)

val_dataset = preprocess.dataset(option.val_data,
                                 transform=option.transform_test)

ShengZhiyao = wrapper.wraper(model, train_dataset, val_dataset,
                             option.optimizer)

max_epoches = option.max_epoches
while ShengZhiyao.epoch < max_epoches:
    ShengZhiyao.Train()
    ShengZhiyao.Val()
예제 #6
0
    def train(self):
        if F.data2:
            data = pre2.dataset(extraction_step=self.extraction_step,
                                number_images_training=F.number_train_images,
                                batch_size=F.batch_size,
                                patch_shape=self.patch_shape)
        else:
            data = pre.dataset(num_classes=F.num_classes,
                               extraction_step=self.extraction_step,
                               number_images_training=F.number_train_images,
                               batch_size=F.batch_size,
                               patch_shape=self.patch_shape)

        global_step = tf.placeholder(tf.int32, [], name="global_step_epochs")

        # Learning rate
        learning_rate_ = tf.train.exponential_decay(F.learning_rate_,
                                                    global_step,
                                                    decay_steps=F.decay_step,
                                                    decay_rate=F.decay_rate,
                                                    staircase=True)

        # Optimizer operation
        _optim = tf.train.AdamOptimizer(
            learning_rate_, beta1=F.beta1).minimize(self.u_loss,
                                                    var_list=self.u_vars)

        tf.global_variables_initializer().run()

        # Load checkpoints if required
        if F.load_chkpt:
            try:
                load_model(F.checkpoint_dir, self.sess, self.saver)
                print("\n [*] Checkpoint loaded succesfully!")
            except:
                print("\n [!] Checkpoint loading failed!")
        else:
            print("\n [*] Checkpoint load not required.")

        if F.data2:
            patches_val, labels_val_patch, labels_val = pre2.preprocess_dynamic_lab(
                F.preprocesses_data2_directory,
                self.extraction_step,
                self.patch_shape,
                F.number_train_images,
                validating=F.training,
                testing=F.testing,
                num_images_testing=F.number_test_images)
        else:
            patches_val, labels_val_patch, labels_val = pre.preprocess_dynamic_lab(
                F.preprocesses_data_directory,
                F.num_classes,
                self.extraction_step,
                self.patch_shape,
                F.number_train_images,
                validating=F.training,
                testing=F.testing,
                num_images_testing=F.number_test_images)

        predictions_val = np.zeros((patches_val.shape[0], self.patch_shape[0],
                                    self.patch_shape[1], self.patch_shape[2]),
                                   dtype='uint8')
        max_par = 0.0
        max_loss = 100
        for epoch in xrange(int(F.epoch)):
            idx = 0
            batch_iter_train = data.batch_train()
            total_val_loss = 0
            total_train_loss = 0

            for patches_lab, labels in batch_iter_train:
                # Network update
                feed_dict = {
                    self.patches_labeled: patches_lab,
                    self.labels: labels,
                    self.phase: True,
                    global_step: epoch
                }
                _optim.run(feed_dict)

                # Evaluate loss for plotting/printing purposes
                feed_dict = {
                    self.patches_labeled: patches_lab,
                    self.labels: labels,
                    self.phase: True,
                    global_step: epoch
                }
                u_loss = self.u_loss.eval(feed_dict)
                total_train_loss = total_train_loss + u_loss

                # Update learning rate
                lrate = learning_rate_.eval({global_step: epoch})

                idx += 1
                print(("Epoch:[%2d] [%4d/%4d] Loss:%.2e \n") %
                      (epoch, idx, data.num_batches, u_loss))

            # Save model
            save_model(F.checkpoint_dir, self.sess, self.saver)

            if epoch % 3 == 0:
                avg_train_loss = total_train_loss / (idx * 1.0)
                print('\n\n')

                total_batches = int(patches_val.shape[0] / F.batch_size)
                print("Total number of Patches: ", patches_val.shape[0])
                print("Total number of Batches: ", total_batches)

                for batch in range(total_batches):
                    patches_feed = patches_val[batch *
                                               F.batch_size:(batch + 1) *
                                               F.batch_size, :, :, :, :]
                    labels_feed = labels_val_patch[batch *
                                                   F.batch_size:(batch + 1) *
                                                   F.batch_size, :, :, :]
                    feed_dict = {
                        self.patches_labeled: patches_feed,
                        self.labels: labels_feed,
                        self.phase: False
                    }
                    preds = self.Val_output.eval(feed_dict)
                    val_loss = self.u_loss.eval(feed_dict)

                    predictions_val[batch * F.batch_size:(batch + 1) *
                                    F.batch_size, :, :, :] = preds
                    print(
                        ("Validated Patch:[%8d/%8d]") % (batch, total_batches))
                    total_val_loss = total_val_loss + val_loss

                avg_val_loss = total_val_loss / (total_batches * 1.0)

                print("All validation patches Predicted")

                if (max_loss > avg_val_loss):
                    max_loss = avg_val_loss
                    save_model(F.best_checkpoint_dir_val, self.sess,
                               self.saver)
                    print("Best checkpoint updated from validation loss.")

                print("Shape of predictions_val, min and max:",
                      predictions_val.shape, np.min(predictions_val),
                      np.max(predictions_val))

                if F.data2:
                    val_image_pred = recompose3D_overlap(
                        predictions_val, 160, 127, 145, 8, 9, 9)
                else:
                    val_image_pred = recompose3D_overlap(
                        predictions_val, 144, 192, 256, 8, 8, 8)
                val_image_pred = val_image_pred.astype('uint8')

                print("Shape of Predicted Output Groundtruth Images:",
                      val_image_pred.shape, np.unique(val_image_pred),
                      np.unique(labels_val), np.mean(val_image_pred),
                      np.mean(labels_val))

                if F.data2:
                    pred2d = np.reshape(
                        val_image_pred,
                        (val_image_pred.shape[0] * 160 * 127 * 145))
                    lab2d = np.reshape(labels_val,
                                       (labels_val.shape[0] * 160 * 127 * 145))
                    F1_score = f1_score(
                        lab2d,
                        pred2d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
                        average=None)
                    print("Validation Dice Coefficient.... ")
                    print("Background:", F1_score[0])

                    print("Left-Thalamus:", F1_score[1])
                    print("Left-Caudate:", F1_score[2])
                    print("Left-Putamen:", F1_score[3])
                    print("Left-Pallidum :", F1_score[4])
                    print("Left-Hippocampus:", F1_score[6])
                    print("Left-Amygdala:", F1_score[7])

                    print("Right-Thalamus:", F1_score[8])
                    print("Right-Caudate:", F1_score[9])
                    print("Right-Putamen:", F1_score[10])
                    print("Right-Pallidum:", F1_score[11])
                    print("Right-Hippocampus:", F1_score[12])
                    print("Right-Amygdala:", F1_score[13])

                    print("Brain-Stem:", F1_score[5])
                else:
                    pred2d = np.reshape(
                        val_image_pred,
                        (val_image_pred.shape[0] * 144 * 192 * 256))
                    lab2d = np.reshape(labels_val,
                                       (labels_val.shape[0] * 144 * 192 * 256))
                    F1_score = f1_score(lab2d,
                                        pred2d, [0, 1, 2, 3],
                                        average=None)
                    print("Validation Dice Coefficient.... ")
                    print("Background:", F1_score[0])
                    print("CSF:", F1_score[1])
                    print("GM:", F1_score[2])
                    print("WM:", F1_score[3])

                    if (max_par < (F1_score[2] + F1_score[3])):
                        max_par = (F1_score[2] + F1_score[3])
                        save_model(F.best_checkpoint_dir, self.sess,
                                   self.saver)
                        print(
                            "Best checkpoint got updated from validation results."
                        )

                print("Average Validation Loss:", avg_val_loss)
                print("Average Training Loss", avg_train_loss)
                with open('Val_loss.txt', 'a') as f:
                    f.write('%.2e \n' % avg_val_loss)
                with open('Train_loss.txt', 'a') as f:
                    f.write('%.2e \n' % avg_train_loss)

        return
예제 #7
0
def detect(model='resnet18', weight=None, num=1):
    model = getattr(AlexNet, model)()
    model.load_weights(weight)
    dataset = preprocess.dataset(['cifar10/test_batch'])
    AlexNet.show_sample(model, dataset, num)