コード例 #1
0
import numpy as np
from tensorflow.contrib.keras.api.keras import layers
from tensorflow.contrib.keras.api.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from tensorflow.contrib.keras.api.keras.models import Model, load_model
from tensorflow.contrib.keras.api.keras.preprocessing import image
from resnet_utils import *
from tensorflow.contrib.keras.api.keras.initializers import glorot_uniform
import tensorflow.contrib.keras.api.keras.backend as K

K.set_image_data_format('channels_last')
K.set_learning_phase(1)


def identity_block(X, f, filters, stage, block):

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    F1, F2, F3 = filters

    X_shortcut = X

    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2a',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F2,
コード例 #2
0
def main_fun(args, ctx):
    import numpy
    import os
    import tensorflow as tf
    import tensorflow.contrib.keras as keras
    from tensorflow.contrib.keras.api.keras import backend as K
    from tensorflow.contrib.keras.api.keras.models import Sequential, load_model, save_model
    from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout
    from tensorflow.contrib.keras.api.keras.optimizers import RMSprop
    from tensorflow.contrib.keras.python.keras.callbacks import LambdaCallback, TensorBoard

    from tensorflow.python.saved_model import builder as saved_model_builder
    from tensorflow.python.saved_model import tag_constants
    from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def

    from tensorflowonspark import TFNode

    cluster, server = TFNode.start_cluster_server(ctx)

    if ctx.job_name == "ps":
        server.join()
    elif ctx.job_name == "worker":

        def generate_rdd_data(tf_feed, batch_size):
            print("generate_rdd_data invoked")
            while True:
                batch = tf_feed.next_batch(batch_size)
                imgs = []
                lbls = []
                for item in batch:
                    imgs.append(item[0])
                    lbls.append(item[1])
                images = numpy.array(imgs).astype('float32') / 255
                labels = numpy.array(lbls).astype('float32')
                yield (images, labels)

        with tf.device(
                tf.train.replica_device_setter(
                    worker_device="/job:worker/task:%d" % ctx.task_index,
                    cluster=cluster)):

            IMAGE_PIXELS = 28
            batch_size = 100
            num_classes = 10

            # the data, shuffled and split between train and test sets
            if args.input_mode == 'tf':
                from tensorflow.contrib.keras.api.keras.datasets import mnist
                (x_train, y_train), (x_test, y_test) = mnist.load_data()
                x_train = x_train.reshape(60000, 784)
                x_test = x_test.reshape(10000, 784)
                x_train = x_train.astype('float32') / 255
                x_test = x_test.astype('float32') / 255

                # convert class vectors to binary class matrices
                y_train = keras.utils.to_categorical(y_train, num_classes)
                y_test = keras.utils.to_categorical(y_test, num_classes)
            else:  # args.mode == 'spark'
                x_train = tf.placeholder(tf.float32,
                                         [None, IMAGE_PIXELS * IMAGE_PIXELS],
                                         name="x_train")
                y_train = tf.placeholder(tf.float32, [None, 10],
                                         name="y_train")

            model = Sequential()
            model.add(Dense(512, activation='relu', input_shape=(784, )))
            model.add(Dropout(0.2))
            model.add(Dense(512, activation='relu'))
            model.add(Dropout(0.2))
            model.add(Dense(10, activation='softmax'))

            model.summary()

            model.compile(loss='categorical_crossentropy',
                          optimizer=RMSprop(),
                          metrics=['accuracy'])

        saver = tf.train.Saver()

        with tf.Session(server.target) as sess:
            K.set_session(sess)

            def save_checkpoint(epoch, logs=None):
                if epoch == 1:
                    tf.train.write_graph(sess.graph.as_graph_def(),
                                         args.model_dir, 'graph.pbtxt')
                saver.save(sess,
                           os.path.join(args.model_dir, 'model.ckpt'),
                           global_step=epoch * args.steps_per_epoch)

            ckpt_callback = LambdaCallback(on_epoch_end=save_checkpoint)
            tb_callback = TensorBoard(log_dir=args.model_dir,
                                      histogram_freq=1,
                                      write_graph=True,
                                      write_images=True)

            # add callbacks to save model checkpoint and tensorboard events (on worker:0 only)
            callbacks = [ckpt_callback, tb_callback
                         ] if ctx.task_index == 0 else None

            if args.input_mode == 'tf':
                # train & validate on in-memory data
                history = model.fit(x_train,
                                    y_train,
                                    batch_size=batch_size,
                                    epochs=args.epochs,
                                    verbose=1,
                                    validation_data=(x_test, y_test),
                                    callbacks=callbacks)
            else:  # args.input_mode == 'spark':
                # train on data read from a generator which is producing data from a Spark RDD
                tf_feed = TFNode.DataFeed(ctx.mgr)
                history = model.fit_generator(
                    generator=generate_rdd_data(tf_feed, batch_size),
                    steps_per_epoch=args.steps_per_epoch,
                    epochs=args.epochs,
                    verbose=1,
                    callbacks=callbacks)

            if args.export_dir and ctx.job_name == 'worker' and ctx.task_index == 0:
                # save a local Keras model, so we can reload it with an inferencing learning_phase
                save_model(model, "tmp_model")

                # reload the model
                K.set_learning_phase(False)
                new_model = load_model("tmp_model")

                # export a saved_model for inferencing
                builder = saved_model_builder.SavedModelBuilder(
                    args.export_dir)
                signature = predict_signature_def(
                    inputs={'images': new_model.input},
                    outputs={'scores': new_model.output})
                builder.add_meta_graph_and_variables(
                    sess=sess,
                    tags=[tag_constants.SERVING],
                    signature_def_map={'predict': signature},
                    clear_devices=True)
                builder.save()

            if args.input_mode == 'spark':
                tf_feed.terminate()
コード例 #3
0
    cam = np.maximum(cam, 0)
    heatmap = cam / np.max(cam)

    # Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image -= np.min(image)
    image = np.minimum(image, 255)

    cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
    cam = np.float32(cam) + np.float32(image)
    cam = 255 * cam / np.max(cam)
    return np.uint8(cam), heatmap


preprocessed_input = load_image(find(INPUT, INPUT_FOLDER))
k.set_learning_phase(0)
model = load_model('cifarClassification.h5')
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
predictions = model.predict(preprocessed_input)

top_3 = decode_predictions(predictions)[0][0:3]
print('Predicted class:')
for x in range(0, len(top_3)):
    print('%s with probability %.2f' % (top_3[x][0], top_3[x][1]))

predicted_class = np.argmax(predictions)
cam, heatmap = grad_cam(model, preprocessed_input, predicted_class, layer_name)
cv2.imwrite(
    OUTPUT_FOLDER + "gradcam_" + INPUT[:-5] + "_" + layer_name + ".jpg", cam)
print('Gradiant class activation image saved in the current directory!')
コード例 #4
0
ファイル: tf-keras-gan.py プロジェクト: vaaale/GAN
mb_size = 64
Z_dim = 64
y_dim = 10
h_dim = 128

# x_train = x_train / 255.
# x_test = x_test / 255.

logs_path = 'logs'
if not os.path.isdir(logs_path):
    os.makedirs(logs_path)

if not os.path.exists('out/'):
    os.makedirs('out/')

K.set_learning_phase(True)


# Discriminator Net model
def discriminator(X_dim, y_dim):
    df_dim = 64
    x_in = Input(shape=(X_dim), name='X_input')
    y_in = Input(shape=(y_dim, ), name='Y_input')
    D_h = LeakyReLU(0.2)(Convolution2D(df_dim,
                                       kernel_size=(5, 5),
                                       strides=(2, 2),
                                       padding='same')(x_in))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 2, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))
    D_h = LeakyReLU(0.2)(BatchNormalization()(Convolution2D(
        df_dim * 2, kernel_size=(5, 5), strides=(2, 2), padding='same')(D_h)))