def __init__(self):

        goods_dataset = GoodsDataset("dataset-181018.list",
                                     "dataset-181018.labels",
                                     settings.IMAGE_SIZE, settings.train_batch,
                                     settings.valid_batch, settings.multiply,
                                     settings.valid_percentage)
        train_set = goods_dataset.get_train_dataset()
        valid_set = goods_dataset.get_valid_dataset()

        input_tensor = keras.layers.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1],
                                                 3))
        base_model = InceptionV3(weights='imagenet',
                                 include_top=False,
                                 pooling='avg',
                                 input_tensor=input_tensor)
        output_layer_number = 248
        intermediate_layer_model = keras.Model(
            inputs=base_model.input,
            outputs=base_model.layers[output_layer_number].output)

        def _intermediate_processing(images, labels):
            images = intermediate_layer_model.predict(images, steps=77)
            return images, labels

        self.train_set = train_set.map(
            _intermediate_processing)  #, num_parallel_calls=8)
        self.valid_set = valid_set.map(
            _intermediate_processing)  #, num_parallel_calls=8)
예제 #2
0
predictions = keras.layers.Dense(settings.num_classes,
                                 activation='softmax')(main_model.output)
new_model = keras.Model(inputs=main_model.input, outputs=predictions)

new_model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy', top_6])

session = keras.backend.get_session()

print(new_model.summary())

for i in range(0, len(base_model.layers)):
    new_model.layers[i + 248].set_weights(base_model.layers[i].get_weights())

print("new model inputs")
for node in new_model.inputs:
    print(node.op.name)

print("new model outputs")
for node in new_model.outputs:
    print(node.op.name)

dataset = GoodsDataset("dataset-181018.list", "dataset-181018.labels",
                       (IMAGE_SIZE[0], IMAGE_SIZE[1]), 32, 32, 5, 0.1)
results = new_model.evaluate(dataset.get_valid_dataset(), steps=77)
print(results)

new_model.save("output/inception_{0}.hdf5".format(model_name))
예제 #3
0
        "./checkpoints/FINE_TUNE_MODEL_4_DIRECT_inceptionv3-181018-{epoch:02d}-{acc:.3f}-{val_acc:.3f}[{val_top_6:.3f}].hdf5",
        save_best_only=True,
        monitor='val_top_6',
        mode='max'),
    keras.callbacks.TensorBoard(
        log_dir='./tensorboard-incv4',
        write_images=True,
    )
]

goods_dataset = GoodsDataset("dataset-181018.list", "dataset-181018.labels",
                             settings.IMAGE_SIZE, settings.train_batch,
                             settings.valid_batch, settings.multiply,
                             settings.valid_percentage)
train_dataset = goods_dataset.get_train_dataset()
valid_dataset = goods_dataset.get_valid_dataset()

results = model.evaluate(
    goods_dataset.get_images_for_label(94).batch(16).repeat(), steps=6)
print(results)

model.fit(
    train_dataset.prefetch(2).repeat(),
    callbacks=callbacks,
    epochs=30,
    steps_per_epoch=1157,
    validation_data=valid_dataset.repeat(),
    validation_steps=77,
)
"""
1) num_last_trainable_layers = 60 
# strategy = tf.contrib.distribute.MirroredStrategy()
# config = tf.estimator.RunConfig(train_distribute=strategy)

# keras_estimator = keras.estimator.model_to_estimator(
#   keras_model=model,
#   config=config,
#   model_dir='/tmp/model_dir')

# keras_estimator.train(input_fn=goods_dataset.get_train_dataset, steps=10)

model.fit(
    goods_dataset.get_train_dataset(),
    callbacks=callbacks,
    epochs=60,
    steps_per_epoch=800,  # 20 * 21 * 155 = 65100 < 13174 * 5 = 65870
    validation_data=goods_dataset.get_valid_dataset().repeat(),
    validation_steps=80)

session = keras.backend.get_session()
gr = session.graph.as_graph_def()
# for n in gr.node:
#     print(n.name)

# model.save_weights('./checkpoints/{}'.format(OUTPUT_MODEL_NAME))

# for n in keras.backend.get_session().graph.as_graph_def().node:
#     print(n.name)

frozen_graph = freeze_session(
    keras.backend.get_session(),
    output_names=[out.op.name for out in model.outputs])