Exemple #1
0
def train(batch, epochs, size):
    """Train the model.

    Arguments:
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        size: Integer, image size.
    """
    if not os.path.exists('model'):
        os.makedirs('model')

    model = MSCNN((size, size, 3))

    opt = SGD(lr=1e-5, momentum=0.9, decay=0.0005)
    model.compile(optimizer=opt, loss='mse')

    lr = ReduceLROnPlateau(monitor='loss', min_lr=1e-7)

    indices = list(range(1500))
    train_ids, test_ids = train_test_split(indices, test_size=0.25)

    hist = model.fit_generator(
        generator(train_ids, batch, size),
        validation_data=generator(test_ids, batch, size),
        steps_per_epoch=len(train_ids) // batch,
        validation_steps=len(test_ids) // batch,
        epochs=epochs,
        callbacks=[lr])

    model.save_weights('model\\final_weights.h5')

    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model\\history.csv', index=False, encoding='utf-8')
Exemple #2
0
	def test(self, sess, sents, lens, labels, epoch, step, raw_data):
		batches = generator(zip(sents, lens, labels), self.batch_size)
		res, seq_lens = [], []
		for x, seq_len, y in batches:
			logits, transition_params = sess.run([self.logits, self.transition_params], 
						feed_dict={self.input_x:x, self.input_y:y, self.seq_len:seq_len})
			seq_lens.extend(seq_len)
			for logit, seq_l in zip(logits, seq_len):
				viterbi_seq, _ = viterbi_decode(logit[:seq_l], transition_params)
				res.append(viterbi_seq)
		self.evaluate(res, seq_lens, zip(sents, labels), epoch, step, raw_data)
Exemple #3
0
	def train(self, sess, train_sent, train_len, train_label, test_sent, test_len, test_label, raw_data):
		epoch = 20
		for e in range(epoch):
			batches = generator(zip(train_sent, train_len, train_label), self.batch_size)
			for step, (x, seq_len, y) in enumerate(batches):
				_, loss = sess.run([self.train_op, self.loss], 
						feed_dict={self.input_x:x,self.input_y:y, self.seq_len:seq_len})
				print('epoch '+str(e)+', step '+str(step)+', loss '+str(loss))	
				
				if step % 100 == 0:
					self.test(sess, test_sent, test_len, test_label, e, step, raw_data)
def val():
    datas = load_pickles()
    X_test, y_test = datas['X_test'], datas['y_test']

    test_gen = generator(X_test,
                         y_test,
                         batch_size=30,
                         aug=False,
                         shuffle=False)

    m = _get_m()

    res = m.predict_generator(test_gen, steps=(X_test.shape[0] // 30))
    res = np.argmax(res, axis=1)
    print(np.mean(res == y_test))
def main(argv):

    print('Finding data ...'),
    with Timer():
        image_filenames = glob(os.path.join(FLAGS.data_dir, 'disc*/OAS1_*_MR1/PROCESSED/MPRAGE/T88_111/OAS1_*_MR1_mpr_n4_anon_111_t88_gfc.hdr'))
        label_filenames = glob(os.path.join(FLAGS.data_dir, 'disc*/OAS1_*_MR1/FSL_SEG/OAS1_*_MR1_mpr_n4_anon_111_t88_masked_gfc_fseg.hdr'))
        assert(len(image_filenames) == len(label_filenames))
        print('Found %i images.' % len(image_filenames))

    print('Loading images ...'),
    with Timer():
        images = [ReadImage(image_filename) for image_filename in image_filenames]
        labels = [ReadImage(label_filename) for label_filename in label_filenames]
        images_train, images_test, labels_train, labels_test = train_test_split(images, labels, train_size=0.66)

    tensor_board = TensorBoard(log_dir='./TensorBoard')
    early_stopping = EarlyStopping(monitor='acc', patience=2, verbose=1)

    model = create_inception_resnet_v2(nb_classes=nb_classes)
    model.compile(optimizer=RMSprop(lr=0.045, rho=0.94, epsilon=1., decay=0.9), loss='categorical_crossentropy', metrics=['acc'])
    model.fit_generator(generator(images_train, labels_train, input_shape, nb_classes, FLAGS.patch_size, FLAGS.batch_size),
                        samples_per_epoch=FLAGS.samples_per_epoch, nb_epoch=FLAGS.nb_epochs, callbacks=[tensor_board, early_stopping],
                        verbose=1)
import sklearn
from sklearn import preprocessing as skproc

from data import (
    load_pickles,
    generator,
    pregen,
)

BATCH = 128

datas = load_pickles()
X_train, y_train = datas['X_train'], datas['y_train']
X_valid, y_valid = datas['X_valid'], datas['y_valid']

train_gen = generator(X_train, y_train, batch_size=BATCH)
valid_gen = generator(X_valid, y_valid, batch_size=BATCH)

n_classes = datas['n_classes']
image_shape = X_train.shape[1:4]

import os
import keras
from model import get_model

m = get_model(image_shape, n_classes)

epochs = 360

monitor = 'val_acc'
callbacks = [
Exemple #7
0
def main():
    """main function

    Main function... (what do you expect me to say...)

    Args:
        - none

    Returns:
        - none
    """

    # Main function for evaluate
    parser = argparse.ArgumentParser(
        description="A testing framework for semantic segmentation.")
    parser.add_argument(
        "--net",
        required=True,
        default="unet",
        type=str,
        help=
        "(str) The type of net work which is either unet, deeplab or custom.")
    parser.add_argument("--epochs", required=False, default=500, type=int)
    parser.add_argument("--batch_size", required=False, default=16, type=int)
    parser.add_argument("--gpu_id",
                        required=False,
                        default="0",
                        type=str,
                        help="(str) The id of the gpu used when training.")
    parser.add_argument("--img_size",
                        required=False,
                        default=192,
                        type=int,
                        help="(int) The size of input image")
    parser.add_argument(
        "--load_weights",
        required=False,
        default=False,
        type=bool,
        help="(bool) Use old weights or not (named net_imgSize.h5)")

    # Parse argument
    args = parser.parse_args()
    net_type = args.net
    epochs = args.epochs
    batch_size = args.batch_size
    gpu_number = args.gpu_id
    img_size = args.img_size

    import os
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_number
    # Argument check
    if not (net_type in {"unet", "deeplab", "custom"}):
        raise ValueError("netType should be either unet, deeplab and custom.")

    # Get config
    Config = cfg.Config()

    # COCO instance
    print("Reading COCO ground truth...")
    cocoGt = COCO(Config.COCO_training_ann_path)
    cocoValGt = COCO(Config.COCO_validation_ann_path)
    print("Finished")

    # Get all classes
    classes = len(cocoGt.getCatIds())

    id_to_index = dict()
    # There is a wired class of 0 in the feature map of type zero
    index_to_id = dict()

    # Because the id of COCO dataset starts from 92, we should project those id to index so that keras
    # utils can convert the segmentation map into one hot categorical encoding.
    for index, id in enumerate(cocoGt.getCatIds()):
        id_to_index[id] = index
        index_to_id[index] = id

    if net_type == "unet":
        model = basic_model.unet(input_size=(img_size, img_size, 3),
                                 classes=len(id_to_index))
    elif net_type == "deeplab":
        deeplab_model = basic_model.Deeplabv3(input_shape=(img_size, img_size,
                                                           3),
                                              classes=len(id_to_index),
                                              backbone="xception")
        output = KL.Activation("softmax")(deeplab_model.output)
        model = KM.Model(deeplab_model.input, output)
    elif net_type == "custom":
        model = model.custom_model(input_shape=(img_size, img_size, 3),
                                   classes=len(id_to_index))

    file_list = glob(Config.COCO_training_path + '*')
    val_list = glob(Config.COCO_validation_path + '*')

    if args.load_weights:
        try:
            model.load_weights(net_type + "_" + str(img_size) + ".h5")
            print("weights loaded!")
        except:
            print("weights not found!")

    checkpointer = KC.ModelCheckpoint(filepath=net_type + "_" + str(img_size) +
                                      ".h5",
                                      verbose=1,
                                      save_best_only=True)

    model.compile(optimizer=KO.Adam(),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    model.fit_generator(data.generator(batch_size, file_list,
                                       (img_size, img_size), cocoGt,
                                       id_to_index, True),
                        validation_data=data.generator(batch_size, val_list,
                                                       (img_size, img_size),
                                                       cocoValGt, id_to_index,
                                                       False),
                        validation_steps=10,
                        steps_per_epoch=100,
                        epochs=epochs,
                        use_multiprocessing=True,
                        workers=8,
                        callbacks=[checkpointer])
    print("Prediction start...")

    vfunc = np.vectorize(lambda index: index_to_id[index])

    anns = []

    # Convert into COCO annotation
    for i in trange(len(val_list)):
        image = val_list[i]
        image_id = int(image.replace(".jpg", '')[-12:])

        cropping_image, padding_dims, original_size = utils.padding_and_cropping(
            image, (img_size, img_size))
        cropping_image = preprocess_input(cropping_image, mode="torch")

        result = model.predict(cropping_image)
        result = np.argmax(result, axis=3)

        seg_result = utils.reverse_padding_and_cropping(
            result, padding_dims, original_size)
        seg_result = vfunc(seg_result)
        COCO_ann = cocostuffhelper.segmentationToCocoResult(seg_result,
                                                            imgId=image_id)
        for ann in COCO_ann:
            ann["segmentation"]["counts"] = ann["segmentation"][
                "counts"].decode("ascii")  # json can't dump byte string
        anns += COCO_ann

    with open("result.json", "w") as file:
        json.dump(anns, file)

    # Read result file
    # Test for fake result
    #resFile = Config.fake_result

    # Evaluate result
    resFile = "result.json"
    cocoDt = cocoValGt.loadRes(resFile)
    cocoEval = COCOStuffeval(cocoValGt, cocoDt)
    cocoEval.evaluate()
    cocoEval.summarize()
Exemple #8
0
import data
import nvidia
from keras.optimizers import Adam

if __name__ == '__main__':
    db = data.database()
    train_db, validation_db = data.validation_split(db)
    model = nvidia.model_x()
    model.summary()
    model.compile(loss='mse', optimizer=Adam(), metrics=['mse', 'accuracy'])
    # uncomment if you want to update trained network
    # model.load_weights('model.h5')
    model.fit_generator(data.generator(train_db, augment=True),
                        validation_data=data.load_data(validation_db),
                        steps_per_epoch=50,
                        epochs=10)
    model.save('model.h5')
Exemple #9
0
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout
from keras.layers.convolutional import Convolution2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
import data as DT

batch_size = 32
train_generator = DT.generator(DT.train_samples, batch_size)
validation_generator = DT.generator(DT.validation_samples, batch_size)
nb_train = len(DT.train_samples) * 2
nb_valid = len(DT.validation_samples) * 2

# try to visualize the cropping images/reverse image
model = Sequential()
model.add(Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(160, 320, 3)))
model.add(Lambda(lambda x: x / 127.5 - 1.))
model.add(Convolution2D(6, 5, 5))
# model.add(Convolution2D(6, 5, 5, input_shape=(160, 320, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))  # default (2, 2)

model.add(Convolution2D(
    6,
    5,
    5,
))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))  # default (2, 2)

model.add(Convolution2D(
    6,
Exemple #10
0
def train():
    #
    #  SaveModel is a CallBack class that we can use to save the model for each epoch
    #  This allows us to easily test each epoch on the simulator. The simulator seems like
    #  a better validation than just the validation data set
    class SaveModel(Callback):
        def on_epoch_end(self, epoch, logs={}):
            epoch += 1
            if (epoch > 0):
                #with open ('model-' + str(epoch) + '.json', 'w') as file:
                #    file.write (model.to_json ())
                #    file.close ()
                #model.save_weights ('model-' + str(epoch) + '.h5')
                model.save('model-' + str(epoch) + '.h5')

    #
    #  load the model
    #
    model = get_model()

    #  Keras has a nice tool to create an image of our network
    from keras.utils.visualize_util import plot
    plot(model, to_file='car_model.png', show_shapes=True)

    print("Loaded model")

    # load the data
    xs, ys = data.loadTraining()

    # split the dataset into training and validation  80% / 20%
    train_xs = xs[:int(len(xs) * 0.8)]
    train_ys = ys[:int(len(xs) * 0.8)]

    val_xs = xs[-int(len(xs) * 0.2):]
    val_ys = ys[-int(len(xs) * 0.2):]

    # load the validation dataset, it is better not generate an image each time - process them once
    # Use the validation process function, it doesn't augment the image, just resizes it
    X, y = data.getValidationDataset(val_xs, val_ys,
                                     data.processImageValidation)

    print(model.summary())
    print("Loaded validation datasetset")
    print("Total of", len(train_ys))
    print("Training..")

    checkpoint_path = "weights.{epoch:02d}-{val_loss:.2f}.hdf5"
    checkpoint = ModelCheckpoint(checkpoint_path,
                                 verbose=1,
                                 save_best_only=False,
                                 save_weights_only=False,
                                 mode='auto')

    # I tried using the earlystopping callback, but now I run it for a fixed number of epochs and test to see which is best
    earlystopping = EarlyStopping(monitor='val_loss',
                                  min_delta=0,
                                  patience=2,
                                  verbose=1,
                                  mode='auto')

    res = model.fit_generator(data.generator(train_xs, train_ys, 256),
                              validation_data=(X, y),
                              samples_per_epoch=100 * 256,
                              nb_epoch=epochs,
                              verbose=1,
                              callbacks=[SaveModel()])

    # pickle and dump the history so we can graph it in a notebook
    history = res.history
    with open('history.p', 'wb') as f:
        pickle.dump(history, f)
Exemple #11
0
    #predictions = tf.squeeze(predictions, axis=[1,2])
    correct_prediction = tf.equal(
        tf.argmax(predictions, axis=1),
        tf.argmax(targets, axis=1))  #shape of correct_prediction is [N]
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=targets,
                                                logits=predictions))
    tf.summary.scalar('loss', loss)
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
    global_steps = tf.Variable(0, trainable=False)
    train_step = optimizer.minimize(loss, global_step=global_steps)

    filepath = 'train.txt'
    gen = generator(filepath, height, width, num_classes, batch_size)

    saver = tf.train.Saver()
    init = tf.global_variables_initializer()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(init)
        for step in range(total_steps):
            images_, labels_ = next(gen)
            sess.run(train_step, feed_dict={inputs: images_, targets: labels_})
            loss_, accuracy_ = sess.run([loss, accuracy],
                                        feed_dict={
                                            inputs: images_,
                                            targets: labels_
                                        })
Exemple #12
0
from keras.layers import Dense, Flatten, \
                         Lambda, Conv2D, MaxPooling2D
from keras.layers.noise import AlphaDropout
from sklearn.model_selection import train_test_split
from data import SHAPE, generator

samples = pd.read_csv('driving_log.csv').as_matrix()
train_samples, valid_samples = train_test_split(samples, test_size=0.2)

# Hyperparameters
EPOCHS = 6
BATCH_SIZE = 32
LEARNING_RATE = 1.0e-3

# Data
train_generator = generator(train_samples, batch_size=BATCH_SIZE)
valid_generator = generator(valid_samples, batch_size=BATCH_SIZE)

# Define the model
#model = load_model("./model.h5")
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=SHAPE))
model.add(
    Conv2D(2, (3, 3), activation='selu', kernel_initializer='lecun_normal'))
model.add(MaxPooling2D((4, 4)))
model.add(AlphaDropout(0.25))
model.add(Flatten())
model.add(Dense(1))
model.compile(loss='mse', optimizer=optimizers.Adam(lr=LEARNING_RATE))

# Train the model
import data as d
import model as m
from sklearn.model_selection import train_test_split

path_list_train=d.getImagesPath('train')
path_list_test=d.getImagesPath('test')
X_train,Y_train=d.PreprocessData(path_list_train)
X_test,sizes_test=d.PreprocessData(path_list_test,False)
d.savePreparedData(X_test,X_train,Y_train,sizes_test)


xtr, xval, ytr, yval = train_test_split(X_train, Y_train, test_size=0.1, random_state=7)
#X_gen,Y_gen=d.genImagesAndMasks(X_train,Y_train)
#X_train=np.concatenate(X_train,X_gen)
#Y_train=np.concatenate(Y_train,Y_gen)
train_generator, val_generator = d.generator(xtr, xval, ytr, yval, 16)
model = get_unet(256,256,3)
model.fit_generator(train_generator, steps_per_epoch=len(xtr)/6, epochs=250,
                        validation_data=val_generator, validation_steps=len(xval)/16)
preds_test = model.predict(X_test, verbose=1)


preds_test_t = (preds_test > 0.5).astype(np.uint8)
preds_test_upsampled =d.resizeTest(path_list_test,preds_test,sizes_test)

new_test_ids = []
rles = []
for n, path in enumerate(path_list_test):
    rle = list(d.prob_to_rles(preds_test_upsampled[n]))
    rles.extend(rle)
    new_test_ids.extend([os.path.splitext(os.path.basename(os.path.normpath(str(path))))[0]] * len(rle))
Exemple #14
0
from keras.optimizers import Adam
from data import generator, train_samples, validation_samples, test_samples
import math

def samples_count(total_count, batch_size):
    """
      Compute number of ssample per epoch
    """
    return math.ceil(total_count/batch_size) * batch_size

learning_rate = 0.0001
batch_size    = 32
epochs        = 3

# Create generators
train_generator      = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size, training=False)
test_generetaor      = generator(test_samples, batch_size=test_samples.shape[0], training=False)

def build_model(learning_rate=0.0001):
    model = Sequential()
    model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(64,64,3)))
    model.add(Convolution2D(16, 3,3, subsample=(1, 1), activation="relu"))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Convolution2D(32, 3,3, subsample=(1, 1), activation="relu"))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Convolution2D(64, 3,3, subsample=(1, 1), activation="relu"))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Flatten())
    model.add(Dense(512,activation="relu"))
    model.add(Dropout(0.5))