def __init__(self):

        goods_dataset = GoodsDataset("dataset-181018.list",
                                     "dataset-181018.labels",
                                     settings.IMAGE_SIZE, settings.train_batch,
                                     settings.valid_batch, settings.multiply,
                                     settings.valid_percentage)
        train_set = goods_dataset.get_train_dataset()
        valid_set = goods_dataset.get_valid_dataset()

        input_tensor = keras.layers.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1],
                                                 3))
        base_model = InceptionV3(weights='imagenet',
                                 include_top=False,
                                 pooling='avg',
                                 input_tensor=input_tensor)
        output_layer_number = 248
        intermediate_layer_model = keras.Model(
            inputs=base_model.input,
            outputs=base_model.layers[output_layer_number].output)

        def _intermediate_processing(images, labels):
            images = intermediate_layer_model.predict(images, steps=77)
            return images, labels

        self.train_set = train_set.map(
            _intermediate_processing)  #, num_parallel_calls=8)
        self.valid_set = valid_set.map(
            _intermediate_processing)  #, num_parallel_calls=8)
Ejemplo n.º 2
0
predictions = keras.layers.Dense(settings.num_classes,
                                 activation='softmax')(main_model.output)
new_model = keras.Model(inputs=main_model.input, outputs=predictions)

new_model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy', top_6])

session = keras.backend.get_session()

print(new_model.summary())

for i in range(0, len(base_model.layers)):
    new_model.layers[i + 248].set_weights(base_model.layers[i].get_weights())

print("new model inputs")
for node in new_model.inputs:
    print(node.op.name)

print("new model outputs")
for node in new_model.outputs:
    print(node.op.name)

dataset = GoodsDataset("dataset-181018.list", "dataset-181018.labels",
                       (IMAGE_SIZE[0], IMAGE_SIZE[1]), 32, 32, 5, 0.1)
results = new_model.evaluate(dataset.get_valid_dataset(), steps=77)
print(results)

new_model.save("output/inception_{0}.hdf5".format(model_name))
Ejemplo n.º 3
0
    metrics=['accuracy', top_6])

callbacks = [
    keras.callbacks.ModelCheckpoint(
        "./checkpoints/FINE_TUNE_MODEL_4_DIRECT_inceptionv3-181018-{epoch:02d}-{acc:.3f}-{val_acc:.3f}[{val_top_6:.3f}].hdf5",
        save_best_only=True,
        monitor='val_top_6',
        mode='max'),
    keras.callbacks.TensorBoard(
        log_dir='./tensorboard-incv4',
        write_images=True,
    )
]

goods_dataset = GoodsDataset("dataset-181018.list", "dataset-181018.labels",
                             settings.IMAGE_SIZE, settings.train_batch,
                             settings.valid_batch, settings.multiply,
                             settings.valid_percentage)
train_dataset = goods_dataset.get_train_dataset()
valid_dataset = goods_dataset.get_valid_dataset()

results = model.evaluate(
    goods_dataset.get_images_for_label(94).batch(16).repeat(), steps=6)
print(results)

model.fit(
    train_dataset.prefetch(2).repeat(),
    callbacks=callbacks,
    epochs=30,
    steps_per_epoch=1157,
    validation_data=valid_dataset.repeat(),
    validation_steps=77,
Ejemplo n.º 4
0
    ax2.grid(color='g', linestyle='-', linewidth=0.2)
    ymaxval = max(results['valid_top6'])
    ymin = 0.9 if ymaxval > 0.95 else (0.8 if ymaxval > 0.85 else 0.6)
    ax2.set_ylim(ymin, 1.0)
    #plt.show()
    outfile = '_plot_[{}].png'.format(net_model_name)
    plt.savefig(outfile)


#------------
# dataset
from dataset_factory import GoodsDataset
#from dataset_factory_imgaug import GoodsDatasetImgaug as GoodsDataset

goods_dataset = GoodsDataset(settings.dataset_list, settings.labels_list,
                             settings.IMAGE_SIZE, settings.train_batch,
                             settings.valid_batch, settings.multiply,
                             settings.valid_percentage)

train_dataset = goods_dataset.get_train_dataset()
valid_dataset = goods_dataset.get_valid_dataset()

num_epochs = 500
epochs_checkpoint = 20  # interval for saving checkpoints and pb-file
train_steps_per_epoch = 724  #1157
valid_steps_per_epoch = 78  #77
train_dataset = train_dataset.repeat()
valid_dataset = valid_dataset.repeat()
"""
def model_function(next_element):
	x, y = next_element
	logits, end_points = inception.inception_v3(
Ejemplo n.º 5
0
flags.DEFINE_string(
    'data_dir', '/tmp/mnist/data',
    'Directory where mnist data will be downloaded'
    ' if the data is not already there')
flags.DEFINE_string('model_dir', '/tmp/mnist/model',
                    'Directory where all models are saved')
flags.DEFINE_integer('batch_size', 100, 'Batch size.')
flags.DEFINE_integer('num_epochs', 1, 'Num of batches to train (epochs).')
flags.DEFINE_float('learning_rate', 0.001, 'Learning Rate')
FLAGS = flags.FLAGS

#----------

from dataset_factory import GoodsDataset
goods_dataset = GoodsDataset("dataset-181018.list", "dataset-181018.labels",
                             settings.IMAGE_SIZE, settings.train_batch,
                             settings.valid_batch, settings.multiply,
                             settings.valid_percentage)
train_dataset = goods_dataset.get_train_dataset()
valid_dataset = goods_dataset.get_valid_dataset()


def train_data():
    #data = dataset.train(FLAGS.data_dir)
    #data = data.cache()
    #data = data.batch(FLAGS.batch_size)
    #return data
    return train_dataset.prefetch(2).repeat()


def eval_data():
    #data = dataset.test(FLAGS.data_dir)
callbacks = [
    # Interrupt training if `val_loss` stops improving for over 2 epochs
    # keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
    # Write TensorBoard logs to `./logs` directory
    keras.callbacks.ModelCheckpoint(
        "./checkpoints/se_recognizer131018.{epoch:02d}-{val_loss:.2f}.hdf5"),
    keras.callbacks.TensorBoard(
        log_dir='./tensorboard',
        write_images=True,
    )
]

#goods_dataset = GoodsDataset("dataset.list", OUTPUT_FOLDER + "/" + OUTPUT_MODEL_NAME + ".txt",
#    IMAGE_SIZE, 20, 20, 5, 0.1)
goods_dataset = GoodsDataset("dataset-181018.list", "dataset-181018.labels",
                             (IMAGE_SIZE[0], IMAGE_SIZE[1]),
                             settings.train_batch, settings.valid_batch,
                             settings.multiply, settings.valid_percentage)

input_tensor = keras.layers.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
# input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3)
model = InceptionResNetV2(include_top=True,
                          weights=None,
                          classes=goods_dataset.classes_count,
                          input_tensor=input_tensor)

# tf.train.RMSPropOptimizer(0.001),
optimizer = keras.optimizers.RMSprop(lr=0.005)
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.framework import graph_io
from dataset_factory import GoodsDataset

# tf.enable_eager_execution()


def top_6(y_true, y_pred):
    k = 6
    return tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k)


goods_dataset = GoodsDataset("dataset-181018.list", "dataset-181018.labels",
                             (299, 299), 32, 32, 5, 0.1)

base_model = keras.models.load_model(
    "./output/inpcetionv3_top60_181018-02-0.876-0.584[0.881]_rnd_adagrad.hdf5",
    custom_objects={'top_6': top_6})

base_model.compile(optimizer='rmsprop',
                   loss='categorical_crossentropy',
                   metrics=['accuracy', top_6])

results = base_model.evaluate(
    goods_dataset.get_images_for_label(94).batch(16).repeat(), steps=6)
print(results)

# for i, (img, lbl) in enumerate(goods_dataset.get_ambroziya().batch(32).repeat()):
#     l = tf.argmax(lbl)
#     r = tf.math.equal(l, tf.constant(45, dtype=tf.int64))