Exemplo n.º 1
0
def main():
    nb_classes = 40
    train_file = './ModelNet40/ply_data_train.h5'
    test_file = './ModelNet40/ply_data_test.h5'

    epochs = 100
    batch_size = 32

    train = DataGenerator(train_file, batch_size, nb_classes, train=True)
    val = DataGenerator(test_file, batch_size, nb_classes, train=False)

    model = PointNet(nb_classes)
    model.summary()
    lr = 0.0001
    adam = Adam(lr=lr)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    if not os.path.exists('./results/'):
        os.mkdir('./results/')
    checkpoint = ModelCheckpoint('./results/pointnet.h5', monitor='val_acc',
                                 save_weights_only=True, save_best_only=True,
                                 verbose=1)
    history = model.fit_generator(train.generator(),
                                  steps_per_epoch=9840 // batch_size,
                                  epochs=epochs,
                                  validation_data=val.generator(),
                                  validation_steps=2468 // batch_size,
                                  callbacks=[checkpoint, onetenth_50_75(lr)],
                                  verbose=1)

    plot_history(history, './results/')
    save_history(history, './results/')
    model.save_weights('./results/pointnet_weights.h5')
Exemplo n.º 2
0
    def fit_model(self, train_df, valid_df):

        # callbacks
        checkpointer = K.callbacks.ModelCheckpoint(filepath=self.model_filename, verbose=1, save_best_only=True)
        scheduler = K.callbacks.LearningRateScheduler(lambda epoch: self.learning_rate * pow(self.decay_rate, floor(epoch / self.decay_steps)))

        self.model.fit_generator(
            DataGenerator(
                train_df.index,
                train_df,
                self.batch_size,
                self.input_dims,
                self.train_images_dir
            ),
            epochs=self.num_epochs,
            verbose=self.verbose,
            validation_data=DataGenerator(
                valid_df.index,
                valid_df,
                self.batch_size,
                self.input_dims,
                self.train_images_dir
            ),
            use_multiprocessing=True,
            workers=4,
            callbacks=[PredictionCheckpoint(), scheduler, checkpointer]
        )
Exemplo n.º 3
0
def main():
    nb_classes = 40
    train_file = './ModelNet40/ply_data_train.h5'
    test_file = './ModelNet40/ply_data_test.h5'
    # train_num_points = 9840
    # test_num_points = 2468

    # below variables are for dev propose
    train_num_points = 1648
    test_num_points = 420

    epochs = 100
    batch_size = 32

    train = DataGenerator(train_file, batch_size, nb_classes, train=True)
    val = DataGenerator(test_file, batch_size, nb_classes, train=False)

    model = pointnet2(nb_classes)
    model.summary()

    lr = 0.0001
    adam = Adam(lr=lr)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if not os.path.exists('./results/'):
        os.mkdir('./results/')

    last_epoch, last_meta = get_last_status(model)

    checkpoint = MetaCheckpoint('./results/pointnet.h5',
                                monitor='val_acc',
                                save_weights_only=True,
                                save_best_only=True,
                                verbose=1,
                                meta=last_meta)

    history = model.fit_generator(
        train.generator(),
        steps_per_epoch=train_num_points // batch_size,
        epochs=epochs,
        validation_data=val.generator(),
        validation_steps=test_num_points // batch_size,
        callbacks=[checkpoint, onetenth_50_75(lr)],
        verbose=1,
        initial_epoch=last_epoch + 1)

    plot_history(history, './results/')
    save_history(history, './results/')
    model.save_weights('./results/pointnet_weights.h5')
Exemplo n.º 4
0
def train():

    # create dataset
    dataGenerator = DataGenerator("../../data/train2dae.csv", batch_size)
    features_input = dataGenerator.getNFeatures()
    steps_per_epoch = dataGenerator.getSteps()
    #generator = dataGenerator.generate()

    m = model.get_model2(features_input, nhidden)
    decay_rate = learning_rate / NEPOCHS
    optimizer = optimizers.Adam(lr=learning_rate, decay=1 - 0.995)

    callbacks = [
        ModelCheckpoint(filepath="./best_m",
                        monitor='val_loss',
                        save_best_only=True),
        EarlyStopping(monitor='val_loss', patience=2)
    ]

    m.compile(loss="mean_squared_error", optimizer=optimizer, metrics=["mse"])

    m.fit_generator(generator=dataGenerator.generate(),
                    steps_per_epoch=steps_per_epoch,
                    epochs=NEPOCHS,
                    callbacks=callbacks,
                    validation_data=dataGenerator.generate(),
                    validation_steps=steps_per_epoch)
Exemplo n.º 5
0
    def _load_pretrained_model(self):
        base_dir = "/media/scatter/scatterdisk/reply_matching_model/runs/delstm_1024_nsrandom4_lr1e-3/"
        config_dir = base_dir + "config.json"
        best_model_dir = base_dir + "best_loss/best_loss.ckpt"
        model_config = load_config(config_dir)
        model_config.add_echo = False
        preprocessor = DynamicPreprocessor(model_config)
        preprocessor.build_preprocessor()

        infer_config = load_config(config_dir)
        setattr(infer_config, "tokenizer", "SentencePieceTokenizer")
        setattr(
            infer_config, "soynlp_scores",
            "/media/scatter/scatterdisk/tokenizer/soynlp_scores.sol.100M.txt")
        infer_preprocessor = DynamicPreprocessor(infer_config)
        infer_preprocessor.build_preprocessor()
        graph = tf.Graph()
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True

        with graph.as_default():
            Model = get_model(model_config.model)
            data = DataGenerator(preprocessor, model_config)
            infer_model = Model(data, model_config)
            infer_sess = tf.Session(config=tf_config, graph=graph)
            infer_sess.run(tf.global_variables_initializer())
            infer_sess.run(tf.local_variables_initializer())

        infer_model.load(infer_sess, model_dir=best_model_dir)
        self.infer_preprocessor = infer_preprocessor
        return infer_model, infer_sess
def main():
    # create the experiments dirs
    create_dirs(config)

    # create tensorflow session
    sess = tf.Session()

    # build preprocessor
    preprocessor = Preprocessor(config)

    # load data, preprocess and generate data
    data = DataGenerator(preprocessor, config)

    # create an instance of the model you want
    model = TextCNN.TextCNN(preprocessor, config)

    # create tensorboard logger
    logger = Logger(sess, config)

    # create trainer and pass all the previous components to it
    trainer = Trainer(sess, model, data, config, logger)

    # load model if exists
    model.load(sess)

    # here you train your model
    trainer.train()
Exemplo n.º 7
0
def main():
    config = args.parse_args()
    # Load pre-defined config if possible
    if config.config:
        config = load_config(config.config)

    config_str = " | ".join([
        "{}={}".format(attr.upper(), value)
        for attr, value in vars(config).items()
    ])
    print(config_str)

    # create the experiments dirs
    config = create_dirs(config)

    # create tensorflow session
    device_config = tf.ConfigProto()
    device_config.gpu_options.allow_growth = True
    sess = tf.Session(config=device_config)

    # build preprocessor
    preprocessor = DynamicPreprocessor(config)

    # load data, preprocess and generate data
    data = DataGenerator(preprocessor, config)

    # create tensorboard summary writer
    summary_writer = SummaryWriter(sess, config)

    # create trainer and pass all the previous components to it
    trainer = Seq2SeqTrainer(sess, preprocessor, data, config, summary_writer)

    # here you train your model
    trainer.train()
Exemplo n.º 8
0
def train():
    from data_trans import process
    process()
    from data_loader import train_data, SpoSearcher, dev_data, DataGenerator

    init_keras_config()
    train_model, subject_model, object_model = model()

    EMAer = ExponentialMovingAverage(train_model)
    EMAer.inject()

    spoer = SpoSearcher(train_data)
    train_D = DataGenerator(train_data)

    evaluator = Evaluate(train_model,
                         EMAer=EMAer,
                         dev_data=dev_data,
                         spoer=spoer,
                         subject_model=subject_model,
                         object_model=object_model)

    train_model.fit_generator(train_D.__iter__(),
                              steps_per_epoch=len(train_D),
                              epochs=120,
                              callbacks=[evaluator])
Exemplo n.º 9
0
    def test_dataloader(self):
        test_set = DataGenerator(self.test_data_path, data_col=self.data_col)

        loader = torch.utils.data.DataLoader(test_set,
                                             batch_size=self.batch_size,
                                             shuffle=False,
                                             num_workers=6)
        return loader
Exemplo n.º 10
0
def main():
    test_df = read_testset(testset_filename)
    test_generator = DataGenerator(list_IDs=test_df.index,
                                   batch_size=batch_size,
                                   img_size=img_size,
                                   img_dir=test_images_dir)
    best_model = keras.models.load_model(MODEL_NAME, compile=False)
    create_submission(best_model, test_generator, test_df)
Exemplo n.º 11
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([
        config['result_dir'], config['checkpoint_dir'],
        config['checkpoint_dir_lstm']
    ])
    # save the config in a txt file
    save_config(config)
    sess_centralized = tf.Session(config=tf.ConfigProto())
    data = DataGenerator(config)
    model_vae = VAEmodel(config, "Centralized")
    model_vae.load(sess_centralized)
    trainer_vae = vaeTrainer(sess_centralized, model_vae, data, config)
    # here you train your model
    if config['TRAIN_VAE']:
        if config['vae_epochs_per_comm_round'] > 0:
            trainer_vae.train()

    if config['TRAIN_LSTM']:
        # create a lstm model class instance
        lstm_model = lstmKerasModel("Centralized", config)

        # produce the embedding of all sequences for training of lstm model
        # process the windows in sequence to get their VAE embeddings
        lstm_model.produce_embeddings(model_vae, data, sess_centralized)

        # Create a basic model instance
        lstm_nn_model = lstm_model.lstm_nn_model
        lstm_nn_model.summary()  # Display the model's architecture
        # checkpoint path
        checkpoint_path = lstm_model.config['checkpoint_dir_lstm']\
                                        + "cp_{}.ckpt".format(lstm_model.name)
        # Create a callback that saves the model's weights
        cp_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoint_path, save_weights_only=True, verbose=1)
        # load weights if possible
        # lstm_model.load_model(lstm_nn_model, config, checkpoint_path)

        # start training
        if config['lstm_epochs_per_comm_round'] > 0:
            lstm_model.train(lstm_nn_model, cp_callback)

    sess_centralized.close()
Exemplo n.º 12
0
def train_on_train_test_split():
    train_config = get_config()
    bert_config = get_bert_config(train_config)
    cased = train_config.BERT_DIR.split('/')[-1].startswith('cased')
    tokenizer = FullTokenizer(bert_config.vocab, do_lower_case=cased)

    with tf.device('/cpu:0'):
        model = get_bert_base_model(bert_config)

    text, label = load_data(os.path.join(train_config.DATA_DIR, 'train.csv'))
    train_text, val_text, train_label, val_label = train_test_split(
        text, label, test_size=0.055, random_state=59)
    train_gen = DataGenerator(train_text,
                              train_label,
                              tokenizer,
                              batch_size=32)

    val_text = tokenize_examples(val_text, tokenizer, max_len=512)
    val_text = seq_padding(val_text)

    logger = Logger(model=model,
                    val_text=val_text,
                    val_label=(val_label > 0.5).astype(np.float32))

    # OPTIMIZER PARAMs
    lr = 2e-5
    weight_decay = 0.01
    bsz = 32
    decay_steps = 1 * len(train_gen)
    warmup_steps = int(0.1 * decay_steps)

    optimizer = AdamWarmup(
        decay_steps=decay_steps,
        warmup_steps=warmup_steps,
        lr=lr,
        weight_decay=weight_decay,
    )

    parallel_model = multi_gpu_model(model, gpus=4)
    parallel_model.compile(loss='binary_crossentropy', optimizer=optimizer)
    parallel_model.fit_generator(train_gen.__iter__(),
                                 steps_per_epoch=len(train_gen),
                                 epochs=1,
                                 callbacks=[logger],
                                 max_queue_size=100)
Exemplo n.º 13
0
def predict(model_points, predict_points):

    print(
        "Predicting with {} points on a model trained with {} points.".format(
            predict_points, model_points))

    test_file = "./ModelNet40/ply_data_test.h5"
    test_file = h5py.File(test_file, mode="r")

    nb_classes = 40

    val = DataGenerator(test_file, 32, predict_points, nb_classes, train=False)

    model = PointNet_cls(nb_classes, predict_points)
    model.load_weights("./results/pointnet-" + str(model_points) + ".h5")
    pred = np.argmax(model.predict(val), axis=1)

    labels = np.squeeze(
        [test_file["label"][x] for x in range(test_file["label"].shape[0])])
    labels = np.array([int(x) for x in labels])

    print("Accuracy: {:.5}%\n".format(
        100 * sklearn.metrics.accuracy_score(labels[:pred.shape[0]], pred)))
Exemplo n.º 14
0
def train():

	# create dataset
	dataGenerator = DataGenerator( "../../data/train_woe.csv" , "../../data/labels_train.csv" , batch_size )
	features_input = dataGenerator.getNFeatures()
	steps_per_epoch  = dataGenerator.getSteps()
	#generator = dataGenerator.generate()

	m = model.get_model(features_input , nhidden)
	decay_rate =  learning_rate / NEPOCHS 
	optimizer = optimizers.Adam(lr = learning_rate , decay = decay_rate  )

	callbacks = [EarlyStopping(monitor='val_loss', patience=5),
             ModelCheckpoint(filepath= "./best_m", monitor='val_loss', save_best_only=True)]

	m.compile( loss = binary_crossentropy , optimizer = optimizer , metrics = [ binary_crossentropy , auc_roc  ] ) 


	m.fit_generator( generator = dataGenerator.generate(), steps_per_epoch = steps_per_epoch , epochs = NEPOCHS , callbacks = callbacks , validation_data = dataGenerator.generate()
			 , validation_steps = steps_per_epoch )


	y_train = m.predict( dataGenerator.getData()  )
	print( y_train )
Exemplo n.º 15
0
def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']


print(get_available_gpus())

# load VAE model
config = process_config('PX4_config.json')
# create the experiments dirs
create_dirs([config['result_dir'], config['checkpoint_dir']])
# create tensorflow session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# create your data generator
data = DataGenerator(config)
# create a CNN model
model_vae = VAEmodel(config)
# create a CNN model
trainer_vae = vaeTrainer(sess, model_vae, data, config)
model_vae.load(sess)

# here you train your model
if config['TRAIN_VAE']:
    if config['num_epochs_vae'] > 0:
        trainer_vae.train()

# load LSTM model
lstm_model = lstmKerasModel(data)
lstm_model.produce_embeddings(config, model_vae, data, sess)
lstm_nn_model = lstm_model.create_lstm_model(config)
Exemplo n.º 16
0
prediction = Flatten()(c)
'''
model = Model(inputs=input_points, outputs=[out_0, prediction])
xx = np.random.rand(32,2048, 3) - 0.5
y = model.predict_on_batch(xx)
'''

model = Model(inputs=input_points, outputs=[prediction])
nb_classes = 40
train_file = '/home/changetest/datasets/Modelnet40/ply_data_train.h5'
test_file = '/home/changetest/datasets/Modelnet40/ply_data_test.h5'

epochs = 100
batch_size = 32

train = DataGenerator(train_file, batch_size, nb_classes, train=True)
val = DataGenerator(test_file, batch_size, nb_classes, train=False)

model.summary()
lr = 0.0001
adam = Adam(lr=lr)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

if not os.path.exists('./results/'):
    os.mkdir('./results/')
checkpoint = ModelCheckpoint('./results/pointnet.h5',
                             monitor='val_acc',
                             save_weights_only=True,
                             save_best_only=True,
Exemplo n.º 17
0
    infer_config = load_config(config_dir)
    setattr(infer_config, "tokenizer", TOKENIZER)
    setattr(infer_config, "soynlp_scores",
            "/media/scatter/scatterdisk/tokenizer/soynlp_scores.sol.100M.txt")
    infer_preprocessor = DynamicPreprocessor(infer_config)
    infer_preprocessor.build_preprocessor()

    model_config.add_echo = False

    graph = tf.Graph()
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with graph.as_default():
        Model = get_model(model_config.model)
        data = DataGenerator(preprocessor, model_config)
        infer_model = Model(data, model_config)
        infer_sess = tf.Session(config=tf_config, graph=graph)
        infer_sess.run(tf.global_variables_initializer())
        infer_sess.run(tf.local_variables_initializer())

        infer_model.load(infer_sess, model_dir=best_model_dir)

    with open("../reply_matching_model/data/reply_set_new.txt", "r") as f:
        reply_set = [line.strip() for line in f if line]
    indexed_reply_set, reply_set_lengths = zip(
        *[infer_preprocessor.preprocess(r) for r in reply_set])

    def get_result(query, reply):
        preprocessed_query, query_length = infer_preprocessor.preprocess(query)
        preprocessed_reply, reply_length = infer_preprocessor.preprocess(reply)
Exemplo n.º 18
0
        'batch_size': config.batch_size,
        'n_classes': 2,
        'n_channels': 3,
        'shuffle': False,
        'aug': False
    }

    datalist = pd.read_csv('dataset/train/train.txt',
                           header=None,
                           delimiter=' ')
    X_train, X_test, y_train, y_test = train_test_split(datalist[0].values,
                                                        datalist[1].values,
                                                        stratify=datalist[1],
                                                        test_size=0.2)

    train_generator = DataGenerator(X_train, y_train, **train_params)
    val_generator = DataGenerator(X_test, y_test, **val_params)

    print("Training size =", len(train_generator))
    print("Validation size =", len(val_generator))

    # models
    # add a global spatial average pooling layer
    base_model = get_base_model()
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # and a logistic layer -- let's say we have 2 classes
    predictions = Dense(2, activation='softmax')(x)
Tensorflow Code for a color segmentation network
'''

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# Import Dataset
from data_loader import DataGenerator
data = DataGenerator()

# Import Models
from .model import unet

# Training Parameters
learning_rate = 0.0001
num_steps = 1000
batch_size = 32
display_step = 100

# Network Parameters
WIDTH = 256
HEIGHT = 256
CHANNELS = 2
NUM_INPUTS = WIDTH * HEIGHT * CHANNELS
Exemplo n.º 20
0
        index = config.cls_classes - i * 50
        s = s[0:30 * index, :]
        l = l[0:30 * index, :]
    x_valid.extend(s)
    y_valid.extend(l)
    del s
x_valid = np.array(x_valid)
y_valid = np.array(y_valid)
print("x_valid: {} | {:.2f} ~ {:.2f}".format(x_valid.shape, np.min(x_valid),
                                             np.max(x_valid)))
print("y_valid: {} | {:.2f} ~ {:.2f}".format(y_valid.shape, np.min(y_valid),
                                             np.max(y_valid)))

training_generator = DataGenerator(x_train,
                                   y_train,
                                   batch_size=config.batch_size,
                                   dim=(config.input_rows, config.input_cols,
                                        config.input_deps),
                                   nb_classes=config.cls_classes)
validation_generator = DataGenerator(x_valid,
                                     y_valid,
                                     batch_size=config.batch_size,
                                     dim=(config.input_rows, config.input_cols,
                                          config.input_deps),
                                     nb_classes=config.cls_classes)

if os.path.exists(os.path.join(config.model_path, config.exp_name + ".txt")):
    os.remove(os.path.join(config.model_path, config.exp_name + ".txt"))
with open(os.path.join(config.model_path, config.exp_name + ".txt"),
          'w') as fh:
    model.summary(positions=[.3, .55, .67, 1.],
                  print_fn=lambda x: fh.write(x + '\n'))
Exemplo n.º 21
0
def trainer(train_dir_name, eval_dir_name, out_dir_name):
    '''
    Train the model
    :param train_dir_name: path to training set directory
    :param eval_dir_name: path to evaluation set directory
    :param out_dir_name: output path to save model files
    :return: None
    '''
    if not os.path.exists(out_dir_name):
        os.makedirs(out_dir_name)

    train_features, train_texts = load_data(train_dir_name)
    eval_features, eval_texts = load_data(eval_dir_name)
    steps_per_epoch = len(train_texts) / BATCH_SIZE
    print('Image file format is %s' % IMAGE_FILE_FORMAT)
    print('Keras backend file format is %s' % K.image_data_format())
    print('Training images input shape: {}'.format(train_features.shape))
    print('Evaluation images input shape: {}'.format(eval_features.shape))
    print('Training texts shape: {}'.format(len(train_texts)))
    print('Evaluation texts input shape: {}'.format(len(eval_texts)))
    print('Epoch size: {}'.format(EPOCHS))
    print('Batch size: {}'.format(BATCH_SIZE))
    print('Steps per epoch: {}'.format(int(steps_per_epoch)))
    print('Kernel Initializer: {}'.format(KERNEL_INIT))

    with open(os.path.join(out_dir_name, 'config.txt'), 'w') as fh:
        with redirect_stdout(fh):
            print('Image file format is %s' % IMAGE_FILE_FORMAT)
            print('Keras backend file format is %s' % K.image_data_format())
            print('Training images input shape: {}'.format(
                train_features.shape))
            print('Evaluation images input shape: {}'.format(
                eval_features.shape))
            print('Training texts shape: {}'.format(len(train_texts)))
            print('Evaluation texts input shape: {}'.format(len(eval_texts)))
            print('Epoch size: {}'.format(EPOCHS))
            print('Batch size: {}'.format(BATCH_SIZE))
            print('Steps per epoch: {}'.format(int(steps_per_epoch)))
            print('Kernel Initializer: {}'.format(KERNEL_INIT))

    # Prepare tokenizer to create the vocabulary
    tokenizer = Tokenizer(filters='', split=" ", lower=False)
    # Create the vocabulary
    tokenizer.fit_on_texts([load_doc('../data/code.vocab')])

    # Initialize data generators for training and validation
    train_generator = DataGenerator(train_texts,
                                    train_features,
                                    batch_size=BATCH_SIZE,
                                    tokenizer=tokenizer,
                                    shuffle=True,
                                    image_data_format=IMAGE_FILE_FORMAT)
    validation_generator = DataGenerator(eval_texts,
                                         eval_features,
                                         batch_size=BATCH_SIZE,
                                         tokenizer=tokenizer,
                                         shuffle=True,
                                         image_data_format=IMAGE_FILE_FORMAT)

    # Initialize model
    model = CodeGeneratorModel(IMAGE_SIZE,
                               out_dir_name,
                               image_file_format=IMAGE_FILE_FORMAT,
                               kernel_initializer=KERNEL_INIT)
    model.save_model()
    model.summarize()
    model.summarize_image_model()
    model.plot_model()

    if VALIDATE:
        model.fit_generator(generator=train_generator,
                            steps_per_epoch=steps_per_epoch,
                            callbacks=generate_callbacks(out_dir_name),
                            validation_data=validation_generator)
    else:
        model.fit_generator(generator=train_generator,
                            steps_per_epoch=steps_per_epoch,
                            callbacks=generate_callbacks(out_dir_name))
Exemplo n.º 22
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([
        config['result_dir'], config['checkpoint_dir'],
        config['checkpoint_dir_lstm']
    ])
    # save the config in a txt file
    save_config(config)
    # create tensorflow session
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
    # create your data generator
    data = DataGenerator(config)
    # create a CNN model
    model_vae = VAEmodel(config)
    # create a trainer for VAE model
    trainer_vae = vaeTrainer(sess, model_vae, data, config)
    model_vae.load(sess)
    # here you train your model
    if config['TRAIN_VAE']:
        if config['num_epochs_vae'] > 0:
            trainer_vae.train()

    if config['TRAIN_LSTM']:
        # create a lstm model class instance
        lstm_model = lstmKerasModel(data)

        # produce the embedding of all sequences for training of lstm model
        # process the windows in sequence to get their VAE embeddings
        lstm_model.produce_embeddings(config, model_vae, data, sess)

        # Create a basic model instance
        lstm_nn_model = lstm_model.create_lstm_model(config)
        lstm_nn_model.summary()  # Display the model's architecture
        # checkpoint path
        checkpoint_path = config['checkpoint_dir_lstm']\
                          + "cp.ckpt"
        # Create a callback that saves the model's weights
        cp_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoint_path, save_weights_only=True, verbose=1)
        # load weights if possible
        lstm_model.load_model(lstm_nn_model, config, checkpoint_path)

        # start training
        if config['num_epochs_lstm'] > 0:
            lstm_model.train(config, lstm_nn_model, cp_callback)

        # make a prediction on the test set using the trained model
        lstm_embedding = lstm_nn_model.predict(
            lstm_model.x_test, batch_size=config['batch_size_lstm'])
        print(lstm_embedding.shape)

        # visualise the first 10 test sequences
        for i in range(10):
            lstm_model.plot_lstm_embedding_prediction(i, config, model_vae,
                                                      sess, data,
                                                      lstm_embedding)
Exemplo n.º 23
0
def main():

    # Check command line arguments.
    #if len(sys.argv) != 2 or sys.argv[1] not in model_names:
    #    print("Must provide name of model.")
    #    print("Options: " + " ".join(model_names))
    #    exit(0)
    #model_name = sys.argv[1]

    # Data preparation.
    nb_classes = 40
    train_file = './ModelNet40/ply_data_train.h5'
    test_file = './ModelNet40/ply_data_test.h5'

    # Hyperparameters.
    number_of_points = 1024
    epochs = 100
    batch_size = 32

    # Data generators for training and validation.
    train = DataGenerator(train_file,
                          batch_size,
                          number_of_points,
                          nb_classes,
                          train=True)
    val = DataGenerator(test_file,
                        batch_size,
                        number_of_points,
                        nb_classes,
                        train=False)

    # Create the model.
    if model_name == "pointnet":
        model = create_pointnet(number_of_points, nb_classes)
    elif model_name == "gapnet":
        model = GAPNet()
    model.summary()

    # Ensure output paths.
    output_path = "logs"
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    output_path = os.path.join(output_path, model_name)
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    output_path = os.path.join(output_path, training_name)
    if os.path.exists(output_path):
        shutil.rmtree(output_path)
    os.mkdir(output_path)

    # Compile the model.
    lr = 0.0001
    adam = Adam(lr=lr)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Checkpoint callback.
    checkpoint = ModelCheckpoint(os.path.join(output_path, "model.h5"),
                                 monitor="val_acc",
                                 save_weights_only=True,
                                 save_best_only=True,
                                 verbose=1)

    # Logging training progress with tensorboard.
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=output_path,
        histogram_freq=0,
        batch_size=32,
        write_graph=True,
        write_grads=False,
        write_images=True,
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None,
        embeddings_data=None,
        update_freq="epoch")

    callbacks = []
    #callbacks.append(checkpoint)
    callbacks.append(onetenth_50_75(lr))
    callbacks.append(tensorboard_callback)

    # Train the model.
    history = model.fit_generator(train.generator(),
                                  steps_per_epoch=9840 // batch_size,
                                  epochs=epochs,
                                  validation_data=val.generator(),
                                  validation_steps=2468 // batch_size,
                                  callbacks=callbacks,
                                  verbose=1)

    # Save history and model.
    plot_history(history, output_path)
    save_history(history, output_path)
    model.save_weights(os.path.join(output_path, "model_weights.h5"))
Exemplo n.º 24
0
                    learning_rate=5e-4,
                    num_epochs=num_epochs,
                    decay_rate=0.8,
                    decay_steps=1,
                    weights="imagenet",
                    verbose=1,
                    train_image_dir=train_images_dir,
                    model_filename=model_filename)

model.load("epoch2.hdf5")

#model.load(model_filename)  # Use previous checkpoint

if (TRAINING == True):

    df = read_trainset(trainset_filename)
    ss = ShuffleSplit(n_splits=10, test_size=0.1,
                      random_state=816).split(df.index)
    # lets go for the first fold only
    train_idx, valid_idx = next(ss)

    # Train the model
    model.fit_model(df.iloc[train_idx], df.iloc[valid_idx])

test_df = read_testset(testset_filename)
test_generator = DataGenerator(test_df.index, None, 1, img_shape,
                               test_images_dir)
best_model = K.models.load_model(model.model_filename, compile=False)

prediction_df = create_submission(best_model, test_generator, test_df)