Example #1
0
def main():
    parser = ArgumentParser()
    parser.add_argument('sources', help='sources', type=existing_directory)
    parser.add_argument('--model',
                        help='model name',
                        default='model' + datetime.now().isoformat('T'))
    args = parser.parse_args()

    k = 2
    images, labels = load_images_and_labels(args.sources)

    train_data = images[len(images)//k:]
    train_labels = labels[len(images)//k:]

    test_data = images[:len(images)//k]
    test_labels = labels[:len(labels)//k]

    model.fit(train_data,
              train_labels,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              shuffle=True,
              validation_data=(test_data, test_labels))

    with open('bin/{}.json'.format(args.model), 'w+') as file:
        file.write(model.to_json())

    model.save_weights('bin/{}.h5'.format(args.model))
Example #2
0
from tensorflow import keras

logdir = "./folder" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

from get_image_generator import train_generator, validation_generator, test_generator
from model import model

from meta_parameters import EPOCHS, STEPS_PER_EPOCH, VALIDATION_STEPS, BATCH_SIZE

model.fit_generator(
    train_generator,
    steps_per_epoch=STEPS_PER_EPOCH,
    epochs=EPOCHS,
    validation_data=validation_generator,
    validation_steps=VALIDATION_STEPS,
    use_multiprocessing=True,
    callbacks=[tensorboard_callback],
)

model.save_weights('my_model_weights.h5')
print("evaluate", model.metrics_names)
print(model.evaluate_generator(
    test_generator,
    use_multiprocessing=True,
))

model.count_params()
model.summary()
#TODO numbe of parameters
# https://stackoverflow.com/questions/35792278/how-to-find-number-of-parameters-of-a-keras-model
Example #3
0
from keras.optimizers import Adam
from nmt_utils import *
from model import model

m = 10000
Tx = 30
Ty = 10
n_a = 32
n_s = 64
learning_rate = 0.005
batch_size = 100

dataset, human_vocab, machine_vocab, inv_vocab = load_dataset(m)
X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty)
model = model(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab))
# model.summary()
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, decay=0.001)
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
outputs = list(Yoh.swapaxes(0, 1))
model.fit([Xoh, s0, c0], outputs, epochs=50, batch_size=batch_size)
model.save_weights('models/model_50.h5')
Example #4
0
reduce_lr_callback = tf.keras.callbacks.ReduceLROnPlateau(
    monitor='loss',
    factor=FLAGS['lr_factor'],
    patience=FLAGS['patience'],
    verbose=0,
    mode='auto',
    min_delta=FLAGS['min_delta'],
    cooldown=0,
    min_lr=FLAGS['min_lr'])

early_stop_callback = tf.keras.callbacks.EarlyStopping(monitor='loss',
                                                       patience=10)

EPOCHS = FLAGS['epochs']

history = model.fit(dataset,
                    epochs=EPOCHS,
                    callbacks=[checkpoint_callback, reduce_lr_callback])

end = datetime.now()
END_TIME = str(end).replace(' ', '_')[:-7]

training_time = str(end - start)
print('Training took {} hour/min/sec'.format(training_time.split('.')[0]))

# Save final model weights for freezing and exporting later
save_model_path = os.path.join(basedir, 'saved_models',
                               'final_{}'.format(END_TIME))
model.save_weights(save_model_path)
Example #5
0
 def on_epoch_end(self, epoch, logs=None):
     # 保存最优
     if logs['loss'] <= self.lowest:
         self.lowest = logs['loss']
         model.save_weights(config.BEST_MODEL_PATH)
     print(utils.generate_random_poetry(tokenizer, model))
Example #6
0
epochs = 100
batch_size = 256
dataset_size = x_train.shape[0]
printer = NeatPrinter(epoch=epochs)
indices = gen_indices(batch_size, dataset_size)

for i, epoch in enumerate(range(epochs), 1):
    loss = []
    accuracy = []
    x_train, y_train = shuffle(x_train, y_train)

    for (a, b) in indices:
        x_true = x_train[a:b]
        y_true = y_train[a:b]

        with tf.GradientTape() as tape:
            y_pred = model(x_true, training=True)
            loss_value = loss_fn(y_true, y_pred)
            loss.append(loss_value.numpy())
            acc = accuracy_fn(y_true, y_pred)
            accuracy.append(acc.numpy())

        grads = tape.gradient(loss_value, model.trainable_weights)
        optimizer.apply_gradients(zip(grads, model.trainable_weights))

    loss_val = sum(loss) / len(loss)
    acc_val = sum(accuracy) / len(accuracy)
    printer.print(i, loss_val, acc_val)

model.save_weights('../MODEL DATA/tf-model.h5')
Example #7
0
from model import model
from properties import *
from keras.callbacks import BaseLogger, ModelCheckpoint

from data_generator import midi_input_generator, generate_song_array, save_array_to_midi

print "Training model"

#print generate_song_array(model)
#save_array_to_midi([generate_song_array(model)], 'Generated_zero.mid')
cp = ModelCheckpoint(model_check_point_file,
                     monitor='loss',
                     verbose=1,
                     save_best_only=True,
                     mode='max')
model.fit_generator(midi_input_generator(),
                    num_seq_in_epoch,
                    nb_epoch=num_epochs,
                    callbacks=[BaseLogger(), cp])

model.save_weights('net_dump.nw')

save_array_to_midi([generate_song_array(model)], 'Generated.mid')
        preprocess_function=preprocess_input)
    reduce_lr_01 = ReduceLROnPlateau(monitor='val_1st_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0,
                                     mode='max')
    reduce_lr_02 = ReduceLROnPlateau(monitor='val_2nd_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0.,
                                     mode='max')
    reduce_lr_03 = ReduceLROnPlateau(monitor='val_3rd_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0,
                                     mode='max')
    reduce_lr_04 = ReduceLROnPlateau(monitor='val_4th_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0.,
                                     mode='max')
    for i in range(30):
        f = model.fit_generator(
            train_gen,
            steps_per_epoch=7210 / 32,
            epochs=1,
            validation_data=test_gen,
            validation_steps=1000 / 32,
            callbacks=[reduce_lr_01, reduce_lr_02, reduce_lr_03, reduce_lr_04])
        model.save_weights('weights/classifier.h5')
def save(model):
    model.save_weights('my_model.h5')
Example #10
0
 def on_epoch_end(self, epoch, logs=None):
     if logs['loss'] <= self.lowest:
         self.lowest = logs['loss']
         model.save_weights(settings.BEST_MODEL_PATH)
     for i in range(settings.SHOW_NUM):
         print(utils.generate_random_poetry(tokenizer, model))
Example #11
0
    return INIT_LR * 0.9**epoch


model = model()

model.fit(
    x_train,
    y_train,  # prepared data
    batch_size=BATCH_SIZE,
    epochs=EPOCHS,
    callbacks=[
        keras.callbacks.LearningRateScheduler(lr_scheduler),
        LrHistory(),
        TqdmProgressCallback()
    ],
    validation_data=(x_test, y_test),
    shuffle=True,
    verbose=0,
    initial_epoch=0)

print('SAVING THE MODEL')
model.save_weights("weights.h5")
print('MODEL SAVED')

# make test predictions

y_pred_test = model.predict_proba(x_test)

y_pred_test_classes = np.argmax(y_pred_test, axis=1)

y_pred_test_max_probas = np.max(y_pred_test, axis=1)
Example #12
0
EPOCHS = 20

train_gen = data_gen(train_dir, BATCH_SIZE)
val_gen = data_gen(test_dir, BATCH_SIZE)
train_gen.build_data()
val_gen.build_data()

now = jdatetime.datetime.today()

model_folder_name = '%s-%s[%s-%s]__Model' % (now.month, now.day, now.hour,
                                             now.minute)
os.mkdir('data\\models\\%s' % model_folder_name)

tensorboard_callback = k.callbacks.TensorBoard(log_dir='data\\models\\%s' %
                                               (model_folder_name),
                                               histogram_freq=1)

adam = k.optimizers.Adam()
model.compile(loss='mean_squared_error', optimizer=adam)

history = model.fit_generator(
    generator=train_gen.next_batch(),
    steps_per_epoch=int(train_gen.n / train_gen.batch_size),
    epochs=EPOCHS,
    validation_data=val_gen.next_batch(),
    validation_steps=int(val_gen.n / val_gen.batch_size),
    verbose=1,
    callbacks=[tensorboard_callback])

model.save_weights('data\\models\\%s\\model.h5' % model_folder_name)
Example #13
0
imgs = np.mean(imgs_color, axis=1)
imgs = imgsGray[:, None, :, :]

# Train a little bit
nb_epoch = 100
mini_epoch = 10
num_steps = int(nb_epoch / mini_epoch)
for step in tqdm(range(0, num_steps)):
    h = model.fit([speedx, imgs], {'steer_out': steer},
                  batch_size=32,
                  nb_epoch=mini_epoch,
                  verbose=1,
                  validation_split=0.1,
                  shuffle=True)
    model.save_weights('steer_comma_{0}_{1:4.5}.h5'.format(
        step, h.history['val_loss'][-1]),
                       overwrite=True)

# Make predictions
start = 25000
stop = 27000
preds = model.predict([speedx[start:stop], imgs[start:stop]])
steer_preds = preds.reshape([-1])


# Animation!
def get_point(s, start=0, end=63, height=16):
    X = int(s * (end - start))
    if X < start:
        X = start
    if X > end: