Example #1
0
texts_data = tokenizer.texts_to_sequences(captions_data)



test_n, val_n = int(len(texts_data)*0.2), int(len(texts_data)*0.2)
test_data_text,  val_data_text, train_data_text   = mymodel.split_test_train_function(texts_data,test_n,val_n)
test_data_image,  val_data_image, train_data_image   = mymodel.split_test_train_function(images_data,test_n,val_n)
test_fnames,val_fnames, train_fnames  = mymodel.split_test_train_function(filenames,test_n,val_n)

max_length = np.max([len(text) for text in texts_data])

X_train_text, X_train_image, y_train_text = mymodel.preprocessing(train_data_text,train_data_image,max_length,vocabulary_size)
X_val_text,   X_val_image,   y_val_text   = mymodel.preprocessing(val_data_text,val_data_image,max_length,vocabulary_size)

model_=mymodel.create_model(X_train_image,max_length,vocabulary_size)
hist=mymodel.fit_model(model_,X_train_text, X_train_image, y_train_text,X_val_text,   X_val_image,   y_val_text)


index_word = dict([(index,word) for word, index in tokenizer.word_index.items()])





nkeep = 5
pred_good, pred_bad, bleus = [], [], [] 
count = 0 
for jpgfnm, image_feature, tokenized_text in zip(test_fnames,test_data_image,test_data_text):
    count += 1
    if count % 200 == 0:
Example #2
0
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras

# Helper libraries
import numpy as np
# import matplotlib.pyplot as plt
from scipy.io import loadmat
import matplotlib.pyplot as plt
from loadr import digits_class_names
import mymodel
from PIL import Image
import matplotlib.pyplot as plt


print(tf.__version__)

class_names = digits_class_names()

data_format = 'channels_last'

input_shape = [28, 28, 1]

checkpoint_path = "training_digits/cp.ckpt"

model = mymodel.create_model(input_shape, len(class_names))
model.summary()

model.load_weights(checkpoint_path)

model.save('my_model.h5')
Example #3
0
    def __init__(self, file):
        self.file = file
    def on_epoch_end(self, epoch, logs={}):
        self.model.save(self.file)
        json.dump(logs, open(summary, 'w'))
        make_checkpoint()

#neptune.init('dmpetrov/sandbox')
#neptune.create_experiment(name='exp1', params=params)

mnist = tf.keras.datasets.mnist

(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

model = create_model(dropout)
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(optimizer=opt,
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

if os.path.exists(weights_file):
    model.load_weights(weights_file)

log_dir = "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)

csv_logger = tf.keras.callbacks.CSVLogger(log_file)

start_real = time.time()
start_process = time.process_time()