Пример #1
0
    def fit_model(self, train_df, valid_df):

        # callbacks
        checkpointer = K.callbacks.ModelCheckpoint(filepath=self.model_filename, verbose=1, save_best_only=True)
        scheduler = K.callbacks.LearningRateScheduler(lambda epoch: self.learning_rate * pow(self.decay_rate, floor(epoch / self.decay_steps)))

        self.model.fit_generator(
            DataGenerator(
                train_df.index,
                train_df,
                self.batch_size,
                self.input_dims,
                self.train_images_dir
            ),
            epochs=self.num_epochs,
            verbose=self.verbose,
            validation_data=DataGenerator(
                valid_df.index,
                valid_df,
                self.batch_size,
                self.input_dims,
                self.train_images_dir
            ),
            use_multiprocessing=True,
            workers=4,
            callbacks=[PredictionCheckpoint(), scheduler, checkpointer]
        )
Пример #2
0
def train():
    from data_trans import process
    process()
    from data_loader import train_data, SpoSearcher, dev_data, DataGenerator

    init_keras_config()
    train_model, subject_model, object_model = model()

    EMAer = ExponentialMovingAverage(train_model)
    EMAer.inject()

    spoer = SpoSearcher(train_data)
    train_D = DataGenerator(train_data)

    evaluator = Evaluate(train_model,
                         EMAer=EMAer,
                         dev_data=dev_data,
                         spoer=spoer,
                         subject_model=subject_model,
                         object_model=object_model)

    train_model.fit_generator(train_D.__iter__(),
                              steps_per_epoch=len(train_D),
                              epochs=120,
                              callbacks=[evaluator])
def get_stats(data_dir):
    reward_vals = []

    files = DataGenerator.list_np_files(data_dir)

    for file in files:
        reward_vals.append(DataGenerator.extract_reward(file))

    max_reward = np.max(reward_vals)
    mean_reward = np.average(reward_vals)

    return max_reward, mean_reward
Пример #4
0
def main():
    config = args.parse_args()
    # Load pre-defined config if possible
    if config.config:
        config = load_config(config.config)

    config_str = " | ".join([
        "{}={}".format(attr.upper(), value)
        for attr, value in vars(config).items()
    ])
    print(config_str)

    # create the experiments dirs
    config = create_dirs(config)

    # create tensorflow session
    device_config = tf.ConfigProto()
    device_config.gpu_options.allow_growth = True
    sess = tf.Session(config=device_config)

    # build preprocessor
    preprocessor = DynamicPreprocessor(config)

    # load data, preprocess and generate data
    data = DataGenerator(preprocessor, config)

    # create tensorboard summary writer
    summary_writer = SummaryWriter(sess, config)

    # create trainer and pass all the previous components to it
    trainer = Seq2SeqTrainer(sess, preprocessor, data, config, summary_writer)

    # here you train your model
    trainer.train()
def main():
    # create the experiments dirs
    create_dirs(config)

    # create tensorflow session
    sess = tf.Session()

    # build preprocessor
    preprocessor = Preprocessor(config)

    # load data, preprocess and generate data
    data = DataGenerator(preprocessor, config)

    # create an instance of the model you want
    model = TextCNN.TextCNN(preprocessor, config)

    # create tensorboard logger
    logger = Logger(sess, config)

    # create trainer and pass all the previous components to it
    trainer = Trainer(sess, model, data, config, logger)

    # load model if exists
    model.load(sess)

    # here you train your model
    trainer.train()
Пример #6
0
    def _load_pretrained_model(self):
        base_dir = "/media/scatter/scatterdisk/reply_matching_model/runs/delstm_1024_nsrandom4_lr1e-3/"
        config_dir = base_dir + "config.json"
        best_model_dir = base_dir + "best_loss/best_loss.ckpt"
        model_config = load_config(config_dir)
        model_config.add_echo = False
        preprocessor = DynamicPreprocessor(model_config)
        preprocessor.build_preprocessor()

        infer_config = load_config(config_dir)
        setattr(infer_config, "tokenizer", "SentencePieceTokenizer")
        setattr(
            infer_config, "soynlp_scores",
            "/media/scatter/scatterdisk/tokenizer/soynlp_scores.sol.100M.txt")
        infer_preprocessor = DynamicPreprocessor(infer_config)
        infer_preprocessor.build_preprocessor()
        graph = tf.Graph()
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True

        with graph.as_default():
            Model = get_model(model_config.model)
            data = DataGenerator(preprocessor, model_config)
            infer_model = Model(data, model_config)
            infer_sess = tf.Session(config=tf_config, graph=graph)
            infer_sess.run(tf.global_variables_initializer())
            infer_sess.run(tf.local_variables_initializer())

        infer_model.load(infer_sess, model_dir=best_model_dir)
        self.infer_preprocessor = infer_preprocessor
        return infer_model, infer_sess
Пример #7
0
    def test_dataloader(self):
        test_set = DataGenerator(self.test_data_path, data_col=self.data_col)

        loader = torch.utils.data.DataLoader(test_set,
                                             batch_size=self.batch_size,
                                             shuffle=False,
                                             num_workers=6)
        return loader
Пример #8
0
def main():
    test_df = read_testset(testset_filename)
    test_generator = DataGenerator(list_IDs=test_df.index,
                                   batch_size=batch_size,
                                   img_size=img_size,
                                   img_dir=test_images_dir)
    best_model = keras.models.load_model(MODEL_NAME, compile=False)
    create_submission(best_model, test_generator, test_df)
Пример #9
0
def train_on_train_test_split():
    train_config = get_config()
    bert_config = get_bert_config(train_config)
    cased = train_config.BERT_DIR.split('/')[-1].startswith('cased')
    tokenizer = FullTokenizer(bert_config.vocab, do_lower_case=cased)

    with tf.device('/cpu:0'):
        model = get_bert_base_model(bert_config)

    text, label = load_data(os.path.join(train_config.DATA_DIR, 'train.csv'))
    train_text, val_text, train_label, val_label = train_test_split(
        text, label, test_size=0.055, random_state=59)
    train_gen = DataGenerator(train_text,
                              train_label,
                              tokenizer,
                              batch_size=32)

    val_text = tokenize_examples(val_text, tokenizer, max_len=512)
    val_text = seq_padding(val_text)

    logger = Logger(model=model,
                    val_text=val_text,
                    val_label=(val_label > 0.5).astype(np.float32))

    # OPTIMIZER PARAMs
    lr = 2e-5
    weight_decay = 0.01
    bsz = 32
    decay_steps = 1 * len(train_gen)
    warmup_steps = int(0.1 * decay_steps)

    optimizer = AdamWarmup(
        decay_steps=decay_steps,
        warmup_steps=warmup_steps,
        lr=lr,
        weight_decay=weight_decay,
    )

    parallel_model = multi_gpu_model(model, gpus=4)
    parallel_model.compile(loss='binary_crossentropy', optimizer=optimizer)
    parallel_model.fit_generator(train_gen.__iter__(),
                                 steps_per_epoch=len(train_gen),
                                 epochs=1,
                                 callbacks=[logger],
                                 max_queue_size=100)
Пример #10
0
def main():
    nb_classes = 40
    train_file = './ModelNet40/ply_data_train.h5'
    test_file = './ModelNet40/ply_data_test.h5'

    epochs = 100
    batch_size = 32

    train = DataGenerator(train_file, batch_size, nb_classes, train=True)
    val = DataGenerator(test_file, batch_size, nb_classes, train=False)

    model = PointNet(nb_classes)
    model.summary()
    lr = 0.0001
    adam = Adam(lr=lr)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    if not os.path.exists('./results/'):
        os.mkdir('./results/')
    checkpoint = ModelCheckpoint('./results/pointnet.h5', monitor='val_acc',
                                 save_weights_only=True, save_best_only=True,
                                 verbose=1)
    history = model.fit_generator(train.generator(),
                                  steps_per_epoch=9840 // batch_size,
                                  epochs=epochs,
                                  validation_data=val.generator(),
                                  validation_steps=2468 // batch_size,
                                  callbacks=[checkpoint, onetenth_50_75(lr)],
                                  verbose=1)

    plot_history(history, './results/')
    save_history(history, './results/')
    model.save_weights('./results/pointnet_weights.h5')
Пример #11
0
def train():

    # create dataset
    dataGenerator = DataGenerator("../../data/train2dae.csv", batch_size)
    features_input = dataGenerator.getNFeatures()
    steps_per_epoch = dataGenerator.getSteps()
    #generator = dataGenerator.generate()

    m = model.get_model2(features_input, nhidden)
    decay_rate = learning_rate / NEPOCHS
    optimizer = optimizers.Adam(lr=learning_rate, decay=1 - 0.995)

    callbacks = [
        ModelCheckpoint(filepath="./best_m",
                        monitor='val_loss',
                        save_best_only=True),
        EarlyStopping(monitor='val_loss', patience=2)
    ]

    m.compile(loss="mean_squared_error", optimizer=optimizer, metrics=["mse"])

    m.fit_generator(generator=dataGenerator.generate(),
                    steps_per_epoch=steps_per_epoch,
                    epochs=NEPOCHS,
                    callbacks=callbacks,
                    validation_data=dataGenerator.generate(),
                    validation_steps=steps_per_epoch)
Пример #12
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([
        config['result_dir'], config['checkpoint_dir'],
        config['checkpoint_dir_lstm']
    ])
    # save the config in a txt file
    save_config(config)
    sess_centralized = tf.Session(config=tf.ConfigProto())
    data = DataGenerator(config)
    model_vae = VAEmodel(config, "Centralized")
    model_vae.load(sess_centralized)
    trainer_vae = vaeTrainer(sess_centralized, model_vae, data, config)
    # here you train your model
    if config['TRAIN_VAE']:
        if config['vae_epochs_per_comm_round'] > 0:
            trainer_vae.train()

    if config['TRAIN_LSTM']:
        # create a lstm model class instance
        lstm_model = lstmKerasModel("Centralized", config)

        # produce the embedding of all sequences for training of lstm model
        # process the windows in sequence to get their VAE embeddings
        lstm_model.produce_embeddings(model_vae, data, sess_centralized)

        # Create a basic model instance
        lstm_nn_model = lstm_model.lstm_nn_model
        lstm_nn_model.summary()  # Display the model's architecture
        # checkpoint path
        checkpoint_path = lstm_model.config['checkpoint_dir_lstm']\
                                        + "cp_{}.ckpt".format(lstm_model.name)
        # Create a callback that saves the model's weights
        cp_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoint_path, save_weights_only=True, verbose=1)
        # load weights if possible
        # lstm_model.load_model(lstm_nn_model, config, checkpoint_path)

        # start training
        if config['lstm_epochs_per_comm_round'] > 0:
            lstm_model.train(lstm_nn_model, cp_callback)

    sess_centralized.close()
Пример #13
0
def main():
    nb_classes = 40
    train_file = './ModelNet40/ply_data_train.h5'
    test_file = './ModelNet40/ply_data_test.h5'
    # train_num_points = 9840
    # test_num_points = 2468

    # below variables are for dev propose
    train_num_points = 1648
    test_num_points = 420

    epochs = 100
    batch_size = 32

    train = DataGenerator(train_file, batch_size, nb_classes, train=True)
    val = DataGenerator(test_file, batch_size, nb_classes, train=False)

    model = pointnet2(nb_classes)
    model.summary()

    lr = 0.0001
    adam = Adam(lr=lr)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if not os.path.exists('./results/'):
        os.mkdir('./results/')

    last_epoch, last_meta = get_last_status(model)

    checkpoint = MetaCheckpoint('./results/pointnet.h5',
                                monitor='val_acc',
                                save_weights_only=True,
                                save_best_only=True,
                                verbose=1,
                                meta=last_meta)

    history = model.fit_generator(
        train.generator(),
        steps_per_epoch=train_num_points // batch_size,
        epochs=epochs,
        validation_data=val.generator(),
        validation_steps=test_num_points // batch_size,
        callbacks=[checkpoint, onetenth_50_75(lr)],
        verbose=1,
        initial_epoch=last_epoch + 1)

    plot_history(history, './results/')
    save_history(history, './results/')
    model.save_weights('./results/pointnet_weights.h5')
Пример #14
0
def predict(model_points, predict_points):

    print(
        "Predicting with {} points on a model trained with {} points.".format(
            predict_points, model_points))

    test_file = "./ModelNet40/ply_data_test.h5"
    test_file = h5py.File(test_file, mode="r")

    nb_classes = 40

    val = DataGenerator(test_file, 32, predict_points, nb_classes, train=False)

    model = PointNet_cls(nb_classes, predict_points)
    model.load_weights("./results/pointnet-" + str(model_points) + ".h5")
    pred = np.argmax(model.predict(val), axis=1)

    labels = np.squeeze(
        [test_file["label"][x] for x in range(test_file["label"].shape[0])])
    labels = np.array([int(x) for x in labels])

    print("Accuracy: {:.5}%\n".format(
        100 * sklearn.metrics.accuracy_score(labels[:pred.shape[0]], pred)))
Пример #15
0
def train():

	# create dataset
	dataGenerator = DataGenerator( "../../data/train_woe.csv" , "../../data/labels_train.csv" , batch_size )
	features_input = dataGenerator.getNFeatures()
	steps_per_epoch  = dataGenerator.getSteps()
	#generator = dataGenerator.generate()

	m = model.get_model(features_input , nhidden)
	decay_rate =  learning_rate / NEPOCHS 
	optimizer = optimizers.Adam(lr = learning_rate , decay = decay_rate  )

	callbacks = [EarlyStopping(monitor='val_loss', patience=5),
             ModelCheckpoint(filepath= "./best_m", monitor='val_loss', save_best_only=True)]

	m.compile( loss = binary_crossentropy , optimizer = optimizer , metrics = [ binary_crossentropy , auc_roc  ] ) 


	m.fit_generator( generator = dataGenerator.generate(), steps_per_epoch = steps_per_epoch , epochs = NEPOCHS , callbacks = callbacks , validation_data = dataGenerator.generate()
			 , validation_steps = steps_per_epoch )


	y_train = m.predict( dataGenerator.getData()  )
	print( y_train )
Пример #16
0
model = Model(inputs=input_points, outputs=[prediction])


xx = np.random.rand(32,2048, 3) - 0.5
y = model.predict_on_batch(xx)



nb_classes = 40
train_file = '/home/changetest/datasets/Modelnet40/ply_data_train.h5'
test_file = '/home/changetest/datasets/Modelnet40/ply_data_test.h5'

epochs = 100
batch_size = 32

train = DataGenerator(train_file, batch_size, nb_classes, train=True)
val = DataGenerator(test_file, batch_size, nb_classes, train=False)

model.summary()
lr = 0.0001
adam = Adam(lr=lr)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])


if not os.path.exists('./results/'):
    os.mkdir('./results/')
checkpoint = ModelCheckpoint('./results/pointnet.h5', monitor='val_acc',
                             save_weights_only=True, save_best_only=True,verbose=1)
model.fit_generator(train.generator(), steps_per_epoch=9840 // batch_size, 
                    epochs=epochs, validation_data=val.generator(), 
                    callbacks=[checkpoint, onetenth_50_75(lr)],
def trainer(train_dir_name, eval_dir_name, out_dir_name):
    '''
    Train the model
    :param train_dir_name: path to training set directory
    :param eval_dir_name: path to evaluation set directory
    :param out_dir_name: output path to save model files
    :return: None
    '''
    if not os.path.exists(out_dir_name):
        os.makedirs(out_dir_name)

    train_features, train_texts = load_data(train_dir_name)
    eval_features, eval_texts = load_data(eval_dir_name)
    steps_per_epoch = len(train_texts) / BATCH_SIZE
    print('Image file format is %s' % IMAGE_FILE_FORMAT)
    print('Keras backend file format is %s' % K.image_data_format())
    print('Training images input shape: {}'.format(train_features.shape))
    print('Evaluation images input shape: {}'.format(eval_features.shape))
    print('Training texts shape: {}'.format(len(train_texts)))
    print('Evaluation texts input shape: {}'.format(len(eval_texts)))
    print('Epoch size: {}'.format(EPOCHS))
    print('Batch size: {}'.format(BATCH_SIZE))
    print('Steps per epoch: {}'.format(int(steps_per_epoch)))
    print('Kernel Initializer: {}'.format(KERNEL_INIT))

    with open(os.path.join(out_dir_name, 'config.txt'), 'w') as fh:
        with redirect_stdout(fh):
            print('Image file format is %s' % IMAGE_FILE_FORMAT)
            print('Keras backend file format is %s' % K.image_data_format())
            print('Training images input shape: {}'.format(
                train_features.shape))
            print('Evaluation images input shape: {}'.format(
                eval_features.shape))
            print('Training texts shape: {}'.format(len(train_texts)))
            print('Evaluation texts input shape: {}'.format(len(eval_texts)))
            print('Epoch size: {}'.format(EPOCHS))
            print('Batch size: {}'.format(BATCH_SIZE))
            print('Steps per epoch: {}'.format(int(steps_per_epoch)))
            print('Kernel Initializer: {}'.format(KERNEL_INIT))

    # Prepare tokenizer to create the vocabulary
    tokenizer = Tokenizer(filters='', split=" ", lower=False)
    # Create the vocabulary
    tokenizer.fit_on_texts([load_doc('../data/code.vocab')])

    # Initialize data generators for training and validation
    train_generator = DataGenerator(train_texts,
                                    train_features,
                                    batch_size=BATCH_SIZE,
                                    tokenizer=tokenizer,
                                    shuffle=True,
                                    image_data_format=IMAGE_FILE_FORMAT)
    validation_generator = DataGenerator(eval_texts,
                                         eval_features,
                                         batch_size=BATCH_SIZE,
                                         tokenizer=tokenizer,
                                         shuffle=True,
                                         image_data_format=IMAGE_FILE_FORMAT)

    # Initialize model
    model = CodeGeneratorModel(IMAGE_SIZE,
                               out_dir_name,
                               image_file_format=IMAGE_FILE_FORMAT,
                               kernel_initializer=KERNEL_INIT)
    model.save_model()
    model.summarize()
    model.summarize_image_model()
    model.plot_model()

    if VALIDATE:
        model.fit_generator(generator=train_generator,
                            steps_per_epoch=steps_per_epoch,
                            callbacks=generate_callbacks(out_dir_name),
                            validation_data=validation_generator)
    else:
        model.fit_generator(generator=train_generator,
                            steps_per_epoch=steps_per_epoch,
                            callbacks=generate_callbacks(out_dir_name))
Пример #18
0
import pandas as pd
import numpy as np
from data_loader import DataGenerator
from model import unet, update_model, model_conv_4_colors, conv_net
from utils import *
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
import tqdm
import os


saved_encoder_net = 'vgg_cells_last.hdf5'
saved_detector_net = 'detector_net.hdf5'

train_dataset = load_dataset()
train_datagen = DataGenerator.create_train(train_dataset, 10)

model = conv_net()

image = DataGenerator.load_set_from_image(os.path.join(train_path, '000a6c98-bb9b-11e8-b2b9-ac1f6b6435d0'))

#model.load_weights(os.path.join(data_path, saved_detector_net))
'''callbacks = [
    ReduceLROnPlateau(monitor='val_categorical_accuracy', factor=0.5, patience=5,
                      min_delta=0.005, mode='max', cooldown=3, verbose=1),
             ModelCheckpoint(filepath=os.path.join(data_path, saved_detector_net),
                             monitor='loss', save_best_only=True, verbose=1)
]'''
callbacks = [EarlyStopping(monitor='loss', patience=2),
             ModelCheckpoint(filepath=os.path.join(data_path, saved_detector_net),
                             monitor='loss', save_best_only=True, verbose=1)]
Пример #19
0
def main():

    # Check command line arguments.
    #if len(sys.argv) != 2 or sys.argv[1] not in model_names:
    #    print("Must provide name of model.")
    #    print("Options: " + " ".join(model_names))
    #    exit(0)
    #model_name = sys.argv[1]

    # Data preparation.
    nb_classes = 40
    train_file = './ModelNet40/ply_data_train.h5'
    test_file = './ModelNet40/ply_data_test.h5'

    # Hyperparameters.
    number_of_points = 1024
    epochs = 100
    batch_size = 32

    # Data generators for training and validation.
    train = DataGenerator(train_file,
                          batch_size,
                          number_of_points,
                          nb_classes,
                          train=True)
    val = DataGenerator(test_file,
                        batch_size,
                        number_of_points,
                        nb_classes,
                        train=False)

    # Create the model.
    if model_name == "pointnet":
        model = create_pointnet(number_of_points, nb_classes)
    elif model_name == "gapnet":
        model = GAPNet()
    model.summary()

    # Ensure output paths.
    output_path = "logs"
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    output_path = os.path.join(output_path, model_name)
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    output_path = os.path.join(output_path, training_name)
    if os.path.exists(output_path):
        shutil.rmtree(output_path)
    os.mkdir(output_path)

    # Compile the model.
    lr = 0.0001
    adam = Adam(lr=lr)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Checkpoint callback.
    checkpoint = ModelCheckpoint(os.path.join(output_path, "model.h5"),
                                 monitor="val_acc",
                                 save_weights_only=True,
                                 save_best_only=True,
                                 verbose=1)

    # Logging training progress with tensorboard.
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=output_path,
        histogram_freq=0,
        batch_size=32,
        write_graph=True,
        write_grads=False,
        write_images=True,
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None,
        embeddings_data=None,
        update_freq="epoch")

    callbacks = []
    #callbacks.append(checkpoint)
    callbacks.append(onetenth_50_75(lr))
    callbacks.append(tensorboard_callback)

    # Train the model.
    history = model.fit_generator(train.generator(),
                                  steps_per_epoch=9840 // batch_size,
                                  epochs=epochs,
                                  validation_data=val.generator(),
                                  validation_steps=2468 // batch_size,
                                  callbacks=callbacks,
                                  verbose=1)

    # Save history and model.
    plot_history(history, output_path)
    save_history(history, output_path)
    model.save_weights(os.path.join(output_path, "model_weights.h5"))
Пример #20
0
prediction = Flatten()(c)
'''
model = Model(inputs=input_points, outputs=[out_0, prediction])
xx = np.random.rand(32,2048, 3) - 0.5
y = model.predict_on_batch(xx)
'''

model = Model(inputs=input_points, outputs=[prediction])
nb_classes = 40
train_file = '/home/changetest/datasets/Modelnet40/ply_data_train.h5'
test_file = '/home/changetest/datasets/Modelnet40/ply_data_test.h5'

epochs = 100
batch_size = 32

train = DataGenerator(train_file, batch_size, nb_classes, train=True)
val = DataGenerator(test_file, batch_size, nb_classes, train=False)

model.summary()
lr = 0.0001
adam = Adam(lr=lr)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

if not os.path.exists('./results/'):
    os.mkdir('./results/')
checkpoint = ModelCheckpoint('./results/pointnet.h5',
                             monitor='val_acc',
                             save_weights_only=True,
                             save_best_only=True,
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import time
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization, concatenate
import matplotlib
import matplotlib.pyplot as plt

# Generate Dataset
from data_loader import DataGenerator

data = DataGenerator()
data.print()
x_train = data.x_train
x_test = data.x_test
y_train = data.y_train
y_test = data.y_test

# Training Parameters
num_epochs = 10
display_step = 1
batch_size = 4

# Network Parameters
WIDTH = data.WIDTH
HEIGHT = data.HEIGHT
CHANNELS = data.CHANNELS
Пример #22
0
        index = config.cls_classes - i * 50
        s = s[0:30 * index, :]
        l = l[0:30 * index, :]
    x_valid.extend(s)
    y_valid.extend(l)
    del s
x_valid = np.array(x_valid)
y_valid = np.array(y_valid)
print("x_valid: {} | {:.2f} ~ {:.2f}".format(x_valid.shape, np.min(x_valid),
                                             np.max(x_valid)))
print("y_valid: {} | {:.2f} ~ {:.2f}".format(y_valid.shape, np.min(y_valid),
                                             np.max(y_valid)))

training_generator = DataGenerator(x_train,
                                   y_train,
                                   batch_size=config.batch_size,
                                   dim=(config.input_rows, config.input_cols,
                                        config.input_deps),
                                   nb_classes=config.cls_classes)
validation_generator = DataGenerator(x_valid,
                                     y_valid,
                                     batch_size=config.batch_size,
                                     dim=(config.input_rows, config.input_cols,
                                          config.input_deps),
                                     nb_classes=config.cls_classes)

if os.path.exists(os.path.join(config.model_path, config.exp_name + ".txt")):
    os.remove(os.path.join(config.model_path, config.exp_name + ".txt"))
with open(os.path.join(config.model_path, config.exp_name + ".txt"),
          'w') as fh:
    model.summary(positions=[.3, .55, .67, 1.],
                  print_fn=lambda x: fh.write(x + '\n'))
Пример #23
0
import keras
from model_cls import PointNet
from data_loader import DataGenerator

nb_classes = 40
test_file = './ModelNet40/ply_data_test.h5'
epochs = 100
batch_size = 32
model = PointNet(nb_classes)
val = DataGenerator(test_file, batch_size, nb_classes, train=False)
model.load_weights("./results/pointnet.h5")
loss, acc = model.evaluate_generator(val.generator(), verbose=1)
print("loss is {}, and acc is {}".format(loss, acc))

Пример #24
0
'''

from __future__ import absolute_import, division, print_function
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization, concatenate
import matplotlib
import matplotlib.pyplot as plt

# Generate Dataset
from data_loader import DataGenerator
data = DataGenerator()
data.print()

# Training Parameters
num_epochs = 10
display_step = 1
batch_size = 4

# Network Parameters
WIDTH = data.WIDTH
HEIGHT = data.HEIGHT
CHANNELS = data.CHANNELS
NUM_INPUTS = WIDTH * HEIGHT * CHANNELS
NUM_OUTPUTS = 2

Пример #25
0
                    learning_rate=5e-4,
                    num_epochs=num_epochs,
                    decay_rate=0.8,
                    decay_steps=1,
                    weights="imagenet",
                    verbose=1,
                    train_image_dir=train_images_dir,
                    model_filename=model_filename)

model.load("epoch2.hdf5")

#model.load(model_filename)  # Use previous checkpoint

if (TRAINING == True):

    df = read_trainset(trainset_filename)
    ss = ShuffleSplit(n_splits=10, test_size=0.1,
                      random_state=816).split(df.index)
    # lets go for the first fold only
    train_idx, valid_idx = next(ss)

    # Train the model
    model.fit_model(df.iloc[train_idx], df.iloc[valid_idx])

test_df = read_testset(testset_filename)
test_generator = DataGenerator(test_df.index, None, 1, img_shape,
                               test_images_dir)
best_model = K.models.load_model(model.model_filename, compile=False)

prediction_df = create_submission(best_model, test_generator, test_df)
Пример #26
0
    infer_config = load_config(config_dir)
    setattr(infer_config, "tokenizer", TOKENIZER)
    setattr(infer_config, "soynlp_scores",
            "/media/scatter/scatterdisk/tokenizer/soynlp_scores.sol.100M.txt")
    infer_preprocessor = DynamicPreprocessor(infer_config)
    infer_preprocessor.build_preprocessor()

    model_config.add_echo = False

    graph = tf.Graph()
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with graph.as_default():
        Model = get_model(model_config.model)
        data = DataGenerator(preprocessor, model_config)
        infer_model = Model(data, model_config)
        infer_sess = tf.Session(config=tf_config, graph=graph)
        infer_sess.run(tf.global_variables_initializer())
        infer_sess.run(tf.local_variables_initializer())

        infer_model.load(infer_sess, model_dir=best_model_dir)

    with open("../reply_matching_model/data/reply_set_new.txt", "r") as f:
        reply_set = [line.strip() for line in f if line]
    indexed_reply_set, reply_set_lengths = zip(
        *[infer_preprocessor.preprocess(r) for r in reply_set])

    def get_result(query, reply):
        preprocessed_query, query_length = infer_preprocessor.preprocess(query)
        preprocessed_reply, reply_length = infer_preprocessor.preprocess(reply)
Пример #27
0
        'batch_size': config.batch_size,
        'n_classes': 2,
        'n_channels': 3,
        'shuffle': False,
        'aug': False
    }

    datalist = pd.read_csv('dataset/train/train.txt',
                           header=None,
                           delimiter=' ')
    X_train, X_test, y_train, y_test = train_test_split(datalist[0].values,
                                                        datalist[1].values,
                                                        stratify=datalist[1],
                                                        test_size=0.2)

    train_generator = DataGenerator(X_train, y_train, **train_params)
    val_generator = DataGenerator(X_test, y_test, **val_params)

    print("Training size =", len(train_generator))
    print("Validation size =", len(val_generator))

    # models
    # add a global spatial average pooling layer
    base_model = get_base_model()
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # and a logistic layer -- let's say we have 2 classes
    predictions = Dense(2, activation='softmax')(x)
Tensorflow Code for a color segmentation network
'''

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# Import Dataset
from data_loader import DataGenerator
data = DataGenerator()

# Import Models
from .model import unet

# Training Parameters
learning_rate = 0.0001
num_steps = 1000
batch_size = 32
display_step = 100

# Network Parameters
WIDTH = 256
HEIGHT = 256
CHANNELS = 2
NUM_INPUTS = WIDTH * HEIGHT * CHANNELS
Пример #29
0
def get_available_gpus():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']


print(get_available_gpus())

# load VAE model
config = process_config('PX4_config.json')
# create the experiments dirs
create_dirs([config['result_dir'], config['checkpoint_dir']])
# create tensorflow session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# create your data generator
data = DataGenerator(config)
# create a CNN model
model_vae = VAEmodel(config)
# create a CNN model
trainer_vae = vaeTrainer(sess, model_vae, data, config)
model_vae.load(sess)

# here you train your model
if config['TRAIN_VAE']:
    if config['num_epochs_vae'] > 0:
        trainer_vae.train()

# load LSTM model
lstm_model = lstmKerasModel(data)
lstm_model.produce_embeddings(config, model_vae, data, sess)
lstm_nn_model = lstm_model.create_lstm_model(config)
Tensorflow Code for a color segmentation network
'''

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf 
import matplotlib
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# Import Dataset
from data_loader import DataGenerator
data = DataGenerator()
data.print()

# Training Parameters
learning_rate = 0.0001
num_steps = 10000
batch_size = 32
display_step = 100

# Network Parameters 
WIDTH = 128; HEIGHT = 128; CHANNELS = 3
NUM_INPUTS = WIDTH * HEIGHT * CHANNELS
NUM_OUTPUTS = 2

# Network Varibles and placeholders
X = tf.placeholder(tf.float32, [None, HEIGHT, WIDTH, CHANNELS])  # Input