Exemplo n.º 1
0
def evaluate():
    with tf.Graph().as_default() as g:
        eval_data = FLAGS.eval_data == 'test'
        images, labels = read_record.read_and_decode('./train.tfrecords')
        image_batch, label_batch = cnn.inputs(images, labels, FLAGS.batch_size,
                                              False)

        logits = cnn.cnn_model(image_batch)

        top_k_op = tf.nn.in_top_k(logits, label_batch, 1)

        variable_averages = tf.train.ExponentialMovingAverage(
            cnn.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        print(1)
        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
def objective(params):
    m_opt = cnn_model()
    m_opt.new_model(x_train, y_train, 2, params)
    start = timer()
    # add a special logs directory to see what is happening during each iteration
    m_opt.train(on_tpu="dominique-c-a-paul",
                epochs=100,
                batch_size=256,
                tb_logs_dir="gs://data-imr-unisg/logs_hyperopt/")
    run_time = timer() - start
    val_loss = m_opt.hist.history["val_loss"][-1]
    val_accuracy = m_opt.hist.history["val_sparse_categorical_accuracy"][-1]
    val_f1 = m_opt.hist.history["val_f1_score"][-1]
    train_loss = m_opt.hist.history["loss"][-1]
    train_accuracy = m_opt.hist.history["sparse_categorical_accuracy"][-1]
    train_f1 = m_opt.hist.history["f1_score"][-1]

    output_vals = [
        params["conv_layers"], params["conv_filters"], params["dense_layers"],
        params["dense_neurons"], params["dropout_rate_dense"],
        params["learning_rate"], run_time, val_loss, val_accuracy, val_f1,
        train_loss, train_accuracy, train_f1
    ]

    # adding lines to csv
    with open(out_file, 'a') as csv_file:
        writer = csv.writer(csv_file)
        writer.writerow(output_vals)

    return {"loss": val_loss, "params": params, "status": hyperopt.STATUS_OK}
Exemplo n.º 3
0
def train():
    print(1)
    with tf.Graph().as_default():
        #print(2)
        global_step = tf.Variable(0, trainable=False)

        images, labels = read_record.read_and_decode(FLAGS.data_dir +
                                                     '/train.tfrecords')
        image_batch, label_batch = cnn.inputs(images, labels, FLAGS.batch_size)
        #print(3)
        logits = cnn.cnn_model(image_batch)
        loss = cnn.loss(logits, label_batch)
        #print(4)
        train_op = cnn.train(loss, global_step, FLAGS.batch_size)
        #print(5)
        saver = tf.train.Saver(tf.global_variables())

        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()

        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=FLAGS.log_device_placement))

        sess.run(init)

        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

        loss_list = []
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, loss])
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
            loss_list.append(loss_value)

            if step % 465 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = 0  #num_examples_per_step / duration
                sec_per_batch = float(duration)
                average_loss_value = np.mean(loss_list)
                #total_loss_list.append(average_loss_value)
                loss_list.clear()
                format_str = (
                    '%s: epoch %d, loss = %.4f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str %
                      (datetime.now(), step / 465, average_loss_value,
                       examples_per_sec, sec_per_batch))

            if step % (465 * 30 + 1) == 0:
                checkpoint_path = os.path.join(FLAGS.checkpoint_dir,
                                               'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Exemplo n.º 4
0
 def new_ensemble_model(self, num_models, x_data, y_data, num_classes,
                        config):
     for i in range(num_models):
         self.models[i] = cnn_model()
         self.models[i].new_model(x_data=x_data,
                                  y_data=y_data,
                                  num_classes=num_classes,
                                  config=config)
     self.num_models = num_models
Exemplo n.º 5
0
 def testCNN(self):
     data, labels = np.random.random((100, 180, 1)), np.random.random(100)
     conv_layout = [
         conv_operation(Conv1D, 64, 2, 1, "valid", "relu", MaxPooling1D, 2),
         conv_operation(Conv1D, 32, 2, 1, "valid", "relu", MaxPooling1D, 2)
     ]
     model = cnn_model(conv_layout,
                       input_shape=(180, 1),
                       dense_nb_neurons=[40, 40, 1],
                       dense_activations=[sigmoid, sigmoid, linear])
     model.fit(data, labels, epochs=1)
Exemplo n.º 6
0
    def testReshapePrenet(self):
        data, labels = np.random.random((100, 180)), np.random.random(100)
        conv_layout = [
            conv_operation(Conv1D, 64, 2, 1, "valid", "relu", MaxPooling1D, 2),
            conv_operation(Conv1D, 32, 2, 1, "valid", "relu", MaxPooling1D, 2)
        ]

        model = Sequential()
        model.add(Reshape((180, 1), input_shape=(180, )))

        model = cnn_model(conv_layout,
                          dense_nb_neurons=[40, 40, 1],
                          dense_activations=[sigmoid, sigmoid, linear],
                          pre_model=model)
        model.fit(data, labels, epochs=1)
Exemplo n.º 7
0
    def init_model(self):
        # initialize model

        self.model = cnn_model(input_shape=self.x_train[0].shape[1:][0],
                               num_classes=self.num_classes,
                               num_features=self.num_features,
                               embedding_matrix=self.embedding_matrix,
                               filters=64,
                               kernel_sizes=[3, 4, 5],
                               dropout_rate=0.4,
                               embedding_trainable=True,
                               l2_lambda=1.0)

        loss = 'sparse_categorical_crossentropy'
        optimizer = adam(lr=1e-3)
        self.model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])
Exemplo n.º 8
0
 def load_ensemble(self, folder_path):
     if os.path.exists(folder_path) is False:
         raise ValueError(
             "Folder not found. Please be sure that you have specified a folder and not a file path"
         )
     else:
         for i, (dirpath, dirnames,
                 filename) in enumerate(os.walk(folder_path)):
             if ".HDF5" in filename:
                 model_file_path = os.path.join(folder_path, filename)
                 self.models[i] = cnn_model()
                 self.models[i].load_model(model_file_path)
             else:
                 print(
                     "{} was not considered as it does not appear to be a saved model (.HDF5 extension missing)"
                     .format(filename))
         print("{} models were loaded".format(len(self.models)))
def run_custom_network(object_name, data_type, augmented):
    start = timer()
    m1 = cnn_model()
    m1.new_model(x_train, y_train, own_network_config)
    print("Training custom net for {}".format(object_name))
    m1.train(epochs=1000,
             batch_size=256,
             on_tpu="dominique-c-a-paul",
             tb_logs_dir="./out_files/log_files/master_logs/",
             verbose=True)
    y_preds = m1.predict_classes(x_test)
    run_time = timer() - start
    name = "own_network_{}_{}_{}".format(data_type, augmented, object_name)

    write_outputs(x_train=x_train,
                  x_test=x_test,
                  predictions=y_preds,
                  run_time=run_time,
                  name=name,
                  object_name=object_name,
                  method_type="own Network",
                  data_type=data_type,
                  augmented=augmented)
Exemplo n.º 10
0
test_label_data = np.genfromtxt(test_label_file, delimiter=',', skip_header=1, dtype=None)
# create array to store labels of the testing images
test_labels = np.zeros(shape=(len(test_label_data)))
# loop through file name/ label pairs: assing label to label array and data 
    # from file to 4D data array, guaranteeing that entries with the same index
    # in both arrays correspond to the same file
for index, row in enumerate(test_label_data):
  filename, label = row
  data = fits.getdata(test_data_dir+'/'+filename.decode("utf-8"))
  test_data[index] = data
  test_labels[index] = int(label)
print('data_loaded')

# define the path to the checkpoint as defined in training script
checkpoint_path = "./checkpoint/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
print('checkpoint_found')
# create a new model instance
model = cnn_model()
print('model_found')
# load the weights from training
model.load_weights(checkpoint_path)
print('weights_loaded')
# evaluate the model
loss, acc = model.evaluate(test_data, test_labels, verbose=1)
print("\nRestored model, accuracy: {:5.2f}%".format(100*acc))

# elapsed time
elapsed = time.perf_counter() - start
print('\nTesting: elapsed %.3f seconds.' % elapsed)
* make predictions on new data that in an evaluation ready format (names + predictions)
* save our results to the local disk
"""

# these first imports are required to tell python where to look for packages
# in this case we link to the directory containing the preprocessing package
import sys
from os.path import dirname
sys.path.append(dirname("../."))

from cnn import cnn_model
import numpy as np
import pandas as pd

# instantiate a new empty cnn model
m2 = cnn_model()
# load the previously saved model from our disk
m2.load_model(file_path="./example_output_folder/saved_cnn_model.HDF5")

# load the new data
new_images, names = np.load(
    "./example_output_folder/unlabelled_data_image_package_no_labels_0.npy")

# the data is can be easily used to output the results to format that is nicer
# to read for humans or to be read into a database
df = pd.DataFrame(names[0])
# predict and print the classes for previously unseen images
df["predictions"] = m2.predict_classes(new_images)
# we save the results locally
df.to_csv("./example_output_folder/my_cnn_classification_results.csv")
Exemplo n.º 12
0
 # Loading data
 X_train, X_test, y_train, y_test = load_train_test_data()
 # initalization of list for assigning values
 train_accuracy, test_accuracy, threshold = ([] for i in range(3))
 models = [
     "simple_nn", "CNN", "CNN multi-filter", "LSTM", "Bidirectional LSTM",
     "CNN-LSTM", "HAN"
 ]
 df = pd.DataFrame(columns=[
     "NN models", "Training Accuracy", "Testing accuracy", "threshold_score"
 ])
 train_acc, test_acc, thresh = simple_nn(X_train, X_test, y_train, y_test)
 train_accuracy.append(train_acc)
 test_accuracy.append(test_acc)
 threshold.append(thresh)
 train_acc, test_acc, thresh = cnn_model()
 train_accuracy.append(train_acc)
 test_accuracy.append(test_acc)
 threshold.append(thresh)
 train_acc, test_acc, thresh = cnn_multi_filter(X_train, X_test, y_train,
                                                y_test)
 train_accuracy.append(train_acc)
 test_accuracy.append(test_acc)
 threshold.append(thresh)
 train_acc, test_acc, thresh = lstm(X_train, X_test, y_train, y_test)
 train_accuracy.append(train_acc)
 test_accuracy.append(test_acc)
 threshold.append(thresh)
 train_acc, test_acc, thresh = bidirectional_lstm(X_train, X_test, y_train,
                                                  y_test)
 train_accuracy.append(train_acc)
Exemplo n.º 13
0
import tensorflow as tf
import numpy as np
import cnn

c_cnn_model = cnn.cnn_model()

# show the information about the model
print(c_cnn_model.summary())

# inferencing
print(c_cnn_model(np.random.random([10, 32, 32, 3]).astype(np.float32)))
Exemplo n.º 14
0
# in this case we link to the directory containing the preprocessing package
import sys
from os.path import dirname
sys.path.append(
    dirname("/Users/dominiquepaul/xCoding/classification_tool/Main/"))

from cnn import cnn_model
import numpy as np
import pandas as pd

# load the new data
new_images, names = np.load(
    "./example_output_folder/unlabelled_data_image_package_no_labels_0.npy")

# instantiate a new empty cnn model
transfer_model_new = cnn_model()
# load the previously saved model from our disk
transfer_model_new.load_model(
    file_path="./example_output_folder/saved_cnn_model.HDF5")

# again, the data is can be easily used to output the results to format that is nicer
# to read for humans or to be read into a database
df = pd.DataFrame(names[0])
# predict and print the classes for previously unseen images
df["predictions"] = transfer_model_new.predict_classes(new_images)
# we save the results locally
df.to_csv("./example_output_folder/my_transfernet_classification_results.csv")

print("Results:")
print(df)
verbose = 1
num_classes = 7
patience = 50
model_base_path = '../models/'
image_size = (48, 48)

# data generator
data_generator = ImageDataGenerator(featurewise_center=False,
                                    featurewise_std_normalization=False,
                                    rotation_range=10,
                                    width_shift_range=0.1,
                                    height_shift_range=0.1,
                                    zoom_range=.1,
                                    horizontal_flip=True)

model = cnn_model(input_shape, num_classes)
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
# model.summary()

# callbacks
log_file_path = model_base_path + '_emotion_training.log'
csv_logger = CSVLogger(log_file_path, append=False)
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss',
                              factor=0.1,
                              patience=int(patience / 4),
                              verbose=1)
trained_models_path = model_base_path + 'model'
model_names = trained_models_path + '.{epoch:02d}-{val_accuracy:.2f}.hdf5'
from cnn import cnn_model
import numpy as np

# load the data that was previously
file_paths = [
    './example_output_folder/apparel_image_package_train_val_split_0.npy'
]
x_train, y_train, x_val, y_val, conversion = join_npy_data(
    file_paths, training_data_only=False)

# as there is only one file to read we could also just use:
file_path = './example_output_folder/apparel_image_package_train_val_split_0.npy'
x_train, y_train, x_val, y_val, conversion = np.load(file_path)

# instantiate the empty cnn model
m1 = cnn_model()

# define the general parameters of the model
config_file = {
    "conv_layers": 2,
    "conv_filters": 32,
    "dense_layers": 5,
    "dense_neurons": 10,
    "dropout_rate_dense": 0.2,
    "learning_rate": 1e-04
}

# construct a new model and the configuration file and data to be usef for training
m1.new_model(x_data=x_train, y_data=y_train, config=config_file)
# start training and state where to save the logs
m1.train(epochs=2,