Esempio n. 1
0
def train_evaluate(model, hidden_units, train_file, valid_file, ckpt_folder,
                   optimizer, batch_size, max_steps, lr, eval_steps,
                   export_format):

    if model == 'vgg16base1':
        model_fn = vgg16base1(IMAGE_SHAPE, INPUT_NAME, hidden_units)
    elif model == 'vgg16base2':
        model_fn = vgg16base2(IMAGE_SHAPE, INPUT_NAME, hidden_units)
    elif model == 'basenet':
        model_fn = basenet(IMAGE_SHAPE, INPUT_NAME, hidden_units)

    if optimizer == 'Adam':
        optimizer = Adam(lr=lr)

    metrics = ['categorical_accuracy']
    loss = 'categorical_crossentropy'

    model_fn.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    estimator = model_to_estimator(keras_model=model_fn, model_dir=ckpt_folder)

    train_input_fn = lambda: input_fn(
        file=train_file, batch_size=batch_size, train=True)
    valid_input_fn = lambda: input_fn(
        file=valid_file, batch_size=batch_size, train=False)

    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
                                        max_steps=max_steps)
    eval_spec = tf.estimator.EvalSpec(input_fn=valid_input_fn,
                                      steps=eval_steps)

    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
Esempio n. 2
0
def main(argv=None):
    # input image dimensions
    img_rows, img_cols = 28, 28
    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    # convert class vectors to binary class matrices
    y_train = tf.keras.utils.to_categorical(y_train, 10)
    y_test = tf.keras.utils.to_categorical(y_test, 10)

    model = Sequential()
    model.add(
        Conv2D(20, (5, 5),
               activation='relu',
               padding='valid',
               input_shape=input_shape,
               name='x'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
    model.add(Conv2D(40, (5, 5), activation='relu', padding='valid'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='softmax', name='probabilities'))

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adam(0.002),
                  metrics=['accuracy'])

    model_dir = "gs://mnist-estimator/train"
    est_mnist = model_to_estimator(keras_model=model, model_dir=model_dir)

    train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x_input": x_train},
                                                        y=y_train,
                                                        batch_size=128,
                                                        num_epochs=None,
                                                        shuffle=True)
    train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=500)

    eval_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x_input": x_test},
                                                       y=y_test,
                                                       num_epochs=1,
                                                       shuffle=False)
    exporter = tf.estimator.FinalExporter('mnist', serving_input_fn)
    eval_spec = tf.estimator.EvalSpec(eval_input_fn,
                                      steps=1,
                                      exporters=[exporter])
    #eval_spec = tf.estimator.EvalSpec(eval_input_fn, steps=1)

    tf.estimator.train_and_evaluate(est_mnist, train_spec, eval_spec)
Esempio n. 3
0
def my_train_and_evaluate(model_fn, train_file, valid_file, ckpt_folder, batch_size, max_steps):
    
    estimator = model_to_estimator(keras_model = model_fn, model_dir=ckpt_folder)
    
    train_input_fn = lambda: input_fn(file=train_file, batch_size=batch_size, train=True)
    valid_input_fn = lambda: input_fn(file=valid_file, batch_size=batch_size, train=False)

    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=max_steps)
    eval_spec = tf.estimator.EvalSpec(input_fn=valid_input_fn, steps=None)


    tf.logging.set_verbosity(tf.logging.INFO)
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def export_for_serving(model):
    '''
    Converts model to the TensorFlow estimator and saves it to the disk

    :param model: keras model to prepare for serving
    '''
    export_dir = 'export_model/'
    if os.path.exists(export_dir):
        shutil.rmtree(export_dir)

    tf_estimator = model_to_estimator(keras_model=model)
    tf_estimator.export_savedmodel(export_dir,
                                   serving_input_receiver_fn,
                                   strip_default_attrs=True)
Esempio n. 5
0
def keras_estimator(model_dir, config, learning_rate):
    """Creates a CNN using Keras.

    This function creates a CNN using TensorFlow's Keras API. The Keras model is
    converted to a Tensorflow Estimator so that it can be consumed by
    SageMaker's sagemaker.tensorflow.TensorFlow API.

    Args:
      model_dir: (str) File path where training files will be written.
      config: (tf.estimator.RunConfig) Configuration options to save model.
      learning_rate: (float) Gradient Descent learning rate.

    Returns:
      A keras.Model
    """

    # Input layer name must match the feature dictionary feeding the network
    # defined in the input_fn() / _parse_fun()
    inputs = Input(shape=(28, 28, 1), name='image_input')
    x = Conv2D(
        filters=32,
        kernel_size=[3, 3],
        padding='same',
        activation=tf.nn.relu)(inputs)
    x = MaxPool2D(pool_size=(3, 3), strides=2)(x)
    x = Conv2D(
        filters=64,
        kernel_size=[3, 3],
        padding='same',
        activation=tf.nn.relu)(x)
    x = MaxPool2D(pool_size=(2, 2), strides=2)(x)
    x = Flatten()(x)
    x = Dense(128, activation=tf.nn.relu)(x)
    x = Dropout(rate=0.4)(x)
    output = Dense(10, activation=tf.nn.softmax)(x)
    model = Model(inputs, output)

    # Compile model with learning parameters.
    optimizer = Adam(lr=learning_rate)
    model.compile(
        optimizer=optimizer,
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])

    # Converts the Keras model to a TensorFlow Estimator
    estimator = model_to_estimator(
        keras_model=model, model_dir=model_dir, config=config)
    return estimator
Esempio n. 6
0
def create_estimator(params):
    # Import VGG16 model for transfer learning
    base_model = VGG16(weights='imagenet')
    base_model.summary()

    x = base_model.get_layer('fc2').output

    x = Dropout(params['dropout rate'])(x)

    predictions = Dense(params['num classes'],
                        activation="sigmoid",
                        name="sm_out")(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    for layer in model.layers:
        layer.trainable = False

    for layer in model.layers[0:23]:
        layer.trainable = True

    model.summary()

    model.compile(loss="binary_crossentropy",
                  optimizer=tf.train.AdamOptimizer(params['learning rate'],
                                                   beta1=0.9,
                                                   beta2=0.999),
                  metrics=["categorical_accuracy"])

    if params['isRunOnCloud']:

        run_config = tf.estimator.RunConfig(model_dir=params['output path'])
    else:

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 0.95
        # session_config = tf.contrib.learn.RunConfig(session_config=config)

        run_config = tf.estimator.RunConfig(session_config=config,
                                            model_dir=params['output path'])

    # Convert to Estimator (https://cloud.google.com/blog/products/gcp/new-in-tensorflow-14-converting-a-keras-model-to-a-tensorflow-estimator)
    estimator_model = kes.model_to_estimator(keras_model=model,
                                             config=run_config)

    return estimator_model
Esempio n. 7
0
def model_fn(model_name, hidden_units, ckpt_folder, optimizer, lr):
    if model_name == 'vgg16base1':
        model_fn = vgg16base1(IMAGE_SHAPE, INPUT_NAME, hidden_units)
    elif model_name == 'vgg16base2':
        model_fn = vgg16base2(IMAGE_SHAPE, INPUT_NAME, hidden_units)
    elif model_name == 'basenet':
        model_fn = basenet(IMAGE_SHAPE, INPUT_NAME, hidden_units)

    if optimizer == 'Adam':
        optimizer = Adam(lr=lr)

    metrics = ['categorical_accuracy']
    loss = 'categorical_crossentropy'

    model_fn.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    estimator = model_to_estimator(keras_model=model_fn, model_dir=ckpt_folder)

    return estimator
Esempio n. 8
0
def train_evaluate():

    #Create a keras model
    network_model = model.network(FLAGS.hidden_units)
    loss = 'sparse_categorical_crossentropy'
    metrics = ['accuracy']
    opt = Adadelta()
    network_model.compile(loss=loss, optimizer=opt, metrics=metrics)

    #Convert the the keras model to tf estimator
    estimator = model_to_estimator(keras_model=network_model,
                                   model_dir=FLAGS.job_dir)

    #Create training, evaluation, and serving input functions
    train_input_fn = lambda: input_fn(data_file=FLAGS.training_file,
                                      is_training=True,
                                      batch_size=FLAGS.batch_size,
                                      num_parallel_calls=FLAGS.
                                      num_parallel_calls)

    valid_input_fn = lambda: input_fn(data_file=FLAGS.training_file,
                                      is_training=False,
                                      batch_size=FLAGS.batch_size,
                                      num_parallel_calls=FLAGS.
                                      num_parallel_calls)

    #Create training and validation specifications
    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
                                        max_steps=FLAGS.max_steps)

    export_latest = tf.estimator.FinalExporter("image_classifier",
                                               serving_input_fn)

    eval_spec = tf.estimator.EvalSpec(input_fn=valid_input_fn,
                                      steps=None,
                                      throttle_secs=FLAGS.throttle_secs,
                                      exporters=export_latest)

    #Start training
    tf.logging.set_verbosity(FLAGS.verbosity)
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
Esempio n. 9
0
def main(train_file, valid_file, ckpt_dir):

    batch_size = 64
    print("Starting training: batch_size:{0}".format(batch_size))

    train_input_fn = lambda: input_fn(
        file=train_file, batch_size=batch_size, train=True)
    valid_input_fn = lambda: input_fn(
        file=valid_file, batch_size=batch_size, train=False)

    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
                                        max_steps=25000)
    eval_spec = tf.estimator.EvalSpec(input_fn=valid_input_fn)

    keras_model = model_fn(image_shape=IMAGE_SHAPE, input_name=INPUT_NAME)
    keras_estimator = model_to_estimator(keras_model=keras_model,
                                         model_dir=ckpt_dir)

    tf.logging.set_verbosity(tf.logging.INFO)

    tf.estimator.train_and_evaluate(keras_estimator, train_spec, eval_spec)
Esempio n. 10
0
def main1(mode, train_file, valid_file, ckpt_dir):

    with h5py.File(train_file) as data:
        images = data['images'].value
        labels = data['labels'].value

        images = images[0:5000]
        labels = labels[0:5000]

        labels = tf.keras.utils.to_categorical(labels, 7)
        images = images / 255

    assert images.shape[0] == labels.shape[0]

    train_input_fn = lambda: input_fn1(images, labels, train=True)
    keras_model = model_fn(image_shape=IMAGE_SHAPE, input_name='image')
    keras_estimator = model_to_estimator(keras_model=keras_model,
                                         model_dir=ckpt_dir)
    tf.logging.set_verbosity(tf.logging.INFO)

    keras_estimator.train(input_fn=train_input_fn, steps=1000)
Esempio n. 11
0
def main(mode, train_file, valid_file, ckpt_dir):
    print(train_file, valid_file, ckpt_dir)

    if mode == 'train':
        train_input_fn = lambda: input_fn(filenames=train_file, train=True)
        valid_input_fn = lambda: input_fn(filenames=valid_file, train=False)
        train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
                                            max_steps=5000)
        eval_spec = tf.estimator.EvalSpec(input_fn=valid_input_fn)
        keras_model = model_fn(image_shape=IMAGE_SHAPE, input_name='image')
        keras_estimator = model_to_estimator(keras_model=keras_model,
                                             model_dir=ckpt_dir)
        tf.logging.set_verbosity(tf.logging.INFO)
        tf.estimator.train_and_evaluate(keras_estimator, train_spec, eval_spec)

    elif mode == 'train_evaluate':
        print('train_evaluate')
    elif mode == 'evaluate':
        print('evaluate')
    elif mode == 'predict':
        print('predict')
    else:
        print('else')
Esempio n. 12
0
def model_to_npu_estimator(keras_model=None,
                           keras_model_path=None,
                           custom_objects=None,
                           model_dir=None,
                           checkpoint_format='saver',
                           config=None,
                           job_start_file=''):
    """Constructs an `NPUEstimator` instance from given keras model.
    """
    tf_estimator = model_to_estimator(
        keras_model=keras_model,
        keras_model_path=keras_model_path,
        custom_objects=custom_objects,
        model_dir=model_dir,
        config=run_config.RunConfig(model_dir=model_dir),
        checkpoint_format=checkpoint_format)

    estimator = NPUEstimator(model_fn=tf_estimator._model_fn,
                             model_dir=model_dir,
                             config=config,
                             job_start_file=job_start_file,
                             warm_start_from=tf_estimator._warm_start_settings)

    return estimator
Esempio n. 13
0
def main(model, train_file, valid_file, augment, ckpt_dir, opt, batch_size,
         max_steps, lr):

    if not os.path.exists(train_file):
        print("Training file does not exist")
        return

    if not os.path.exists(valid_file):
        print("Validation file does not exist")
        return

    if not os.path.isdir(ckpt_dir):
        print("Checkpoint directory does not exist !!!")
        return

    if (augment != 1) and (augment != 0):
        print("Wrong augment value")
        return

    if opt == 'Adam':
        optimizer = Adam(lr=lr)
    else:
        print("Unsupported optimizer")
        return

    start_time = strftime('%d-%m-%H%M')
    ckpt_folder = join(ckpt_dir, model + '_' + start_time)

    if model == 'base':
        model_fn = simple_cnn_model_fn(image_shape=IMAGE_SHAPE,
                                       input_name=INPUT_NAME,
                                       optimizer=optimizer)
    elif model == 'vgg16base':
        model_fn = VGG16base(image_shape=IMAGE_SHAPE,
                             input_name=INPUT_NAME,
                             optimizer=optimizer)

    else:
        print("Unsupported model")
        return

    estimator = model_to_estimator(keras_model=model_fn, model_dir=ckpt_folder)

    train_input_fn = lambda: input_fn(
        file=train_file, batch_size=batch_size, train=True, augment=augment)
    valid_input_fn = lambda: input_fn(
        file=valid_file, batch_size=batch_size, train=False, augment=False)

    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
                                        max_steps=max_steps)
    eval_spec = tf.estimator.EvalSpec(input_fn=valid_input_fn, steps=None)

    tf.logging.set_verbosity(tf.logging.INFO)

    summary_file = join(ckpt_dir, model + '_' + start_time + '.txt')
    with open(summary_file, 'w') as logfile:
        logfile.write("Training run started at: {0}\n".format(strftime('%c')))
        logfile.write("Model trained: {0}\n".format(model))
        logfile.write("Hyperparameters:\n")
        logfile.write("  Optimizer: {0}\n".format(opt))
        logfile.write("  Learning rate: {0}\n".format(lr))
        logfile.write("  Training file: {0}\n".format(train_file))
        logfile.write("  Validation file: {0}\n".format(valid_file))
        logfile.write("  Data augmentation: {0}\n".format('On' if augment ==
                                                          1 else 'Off'))
        logfile.write("  Batch size: {0}\n".format(batch_size))
        logfile.write("  Max steps: {0}\n".format(max_steps))

    # Start training
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def main(**args):
    DATA_DIR = args['data_dir']
    OUTPUT_DIR = args['output_dir']
    KERAS_SAVE_PERIOD = args['keras_save_period']
    TRAIN_BATCH_SIZE = int(args['train_batch_size'])
    EPOCHS = int(args['epochs'])
    STEPS_PER_EPOCH = int(args['steps_per_epoch'])
    VALIDATION_STEPS = int(args['validation_steps'])
    VAL_BATCH_SIZE = int(args['val_batch_size'])
    DROPOUT_RATE = float(args['dropout_rate'])
    LEARNING_RATE = float(args['learning_rate'])
    PLATEAU_FACTOR = float(args['plateau_factor'])
    PLATEAU_PATIENT = int(args['plateau_patient'])
    PLATEAU_MIN_LEARNING_RATE = float(args['plateau_min_learning_rate'])
    global NUMBER_OF_CLASSES, HEIGHT, WIDTH, DEPTH
    NUMBER_OF_CLASSES = int(args['number_of_classes'])
    WIDTH = int(args['width'])
    HEIGHT = int(args['height'])
    DEPTH = int(args['depth'])
    TRAIN_PATH = os.path.join(DATA_DIR, 'train_data')
    VAL_PATH = os.path.join(DATA_DIR, 'val_data')
    KERAS_MODEL_PATH = os.path.join(OUTPUT_DIR, 'keras_model{epoch:02d}.h5')
    ESTIMATOR_PATH = os.path.join(OUTPUT_DIR, 'estimator')
    EXPORT_MODEL_PATH = os.path.join(OUTPUT_DIR, 'export')
    TENSORBOARD_PATH = os.path.join(OUTPUT_DIR, 'logs')
    TRAIN_RESULTS_PATH = os.path.join(OUTPUT_DIR, 'train_results')
    VAL_RESULTS_PATH = os.path.join(OUTPUT_DIR, 'val_results')

    model = build_model(DROPOUT_RATE, LEARNING_RATE)

    data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
    val_gen = data_gen.flow_from_directory(VAL_PATH,
                                           target_size=(HEIGHT, WIDTH),
                                           color_mode='rgb',
                                           batch_size=VAL_BATCH_SIZE,
                                           shuffle=False)

    train_images_data_generator = ImageDataGenerator(preprocessing_function=preprocess_input,
                                                     rotation_range=180,
                                                     width_shift_range=0.2,
                                                     height_shift_range=0.2,
                                                     zoom_range=0.2,
                                                     shear_range=0.2,
                                                     horizontal_flip=True,
                                                     vertical_flip=True,
                                                     fill_mode='nearest')
    augmented_images_gen = train_images_data_generator.flow_from_directory(TRAIN_PATH,
                                                                           shuffle=True,
                                                                           target_size=(HEIGHT, WIDTH),
                                                                           batch_size=TRAIN_BATCH_SIZE)

    model_checkpoint = ModelCheckpoint(KERAS_MODEL_PATH, period=KERAS_SAVE_PERIOD, save_weights_only=True)
    reduce_plateau = ReduceLROnPlateau(monitor='val_categorical_accuracy',
                                       factor=PLATEAU_FACTOR,
                                       patience=PLATEAU_PATIENT,
                                       verbose=1,
                                       mode='max',
                                       min_lr=PLATEAU_MIN_LEARNING_RATE)
    tensorboard = TensorBoard(log_dir=TENSORBOARD_PATH, histogram_freq=0, write_graph=True, write_images=False)

    model.fit_generator(augmented_images_gen,
                        validation_data=val_gen,
                        epochs=EPOCHS,
                        steps_per_epoch=STEPS_PER_EPOCH,
                        validation_steps=VALIDATION_STEPS,
                        callbacks=[model_checkpoint, reduce_plateau, tensorboard])

    train_val_gen = data_gen.flow_from_directory(TRAIN_PATH,
                                                 target_size=(HEIGHT, WIDTH),
                                                 color_mode='rgb',
                                                 batch_size=VAL_BATCH_SIZE,
                                                 shuffle=False)

    y_train = train_val_gen.classes
    y_train_ids = train_val_gen.filenames
    # we are assuming that STEPS_PER_EPOCH represents iteration over whole training set
    y_train_pred = model.predict_generator(train_val_gen,
                                           steps=int(np.ceil(STEPS_PER_EPOCH * TRAIN_BATCH_SIZE / VAL_BATCH_SIZE)))

    # we are assuming that VALIDATION_STEPS represents iteration over whole training set
    val_gen.reset()
    y_val = val_gen.classes
    y_val_ids = val_gen.filenames
    y_val_pred = model.predict_generator(val_gen, steps=VALIDATION_STEPS)


    with open(TRAIN_RESULTS_PATH, mode='w') as f:
        csv_writer = csv.writer(f, delimiter=',')
        csv_writer.writerow(['nr', 'image_id', 'true', 'predicted', 'probabilities'])
        for i, example in enumerate(y_train_pred):
            csv_writer.writerow([i + 1, y_train_ids[i], y_train[i], np.argmax(example), list(example)])

    with open(VAL_RESULTS_PATH, mode='w') as f:
        csv_writer = csv.writer(f, delimiter=',')
        csv_writer.writerow(['nr', 'image_id', 'true', 'predicted', 'probabilities'])
        for i, example in enumerate(y_val_pred):
            csv_writer.writerow([i + 1, y_val_ids[i], y_val[i], np.argmax(example), list(example)])

    estimator = model_to_estimator(keras_model=model, model_dir=ESTIMATOR_PATH)
    # https://github.com/tensorflow/tensorflow/issues/26178
    estimator._model_dir = os.path.join(ESTIMATOR_PATH, 'keras')
    estimator.export_savedmodel(EXPORT_MODEL_PATH, serving_input_receiver_fn=serving_input_fn)
Esempio n. 15
0
        logfile.write("  Training file: {0}\n".format(train_file))
        logfile.write("  Validation file: {0}\n".format(valid_fiel))
        logfile.write("  Data augmentation: {0}\n".format('On' if augment==1 else 'Off'))          
        logfile.write("  Batch size: {0}\n".ormat(batch_size))
        logfile.write("  Max steps: {0}\n".format(max_steps))
 


    train_input_fn = lambda: input_fn(file=train_file, batch_size=batch_size, train=True, augment=True)
    valid_input_fn = lambda: input_fn(file=valid_file, batch_size=batch_size, train=False, augment=False)

    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=max_steps)
    eval_spec = tf.estimator.EvalSpec(input_fn=valid_input_fn)

    
    keras_estimator = model_to_estimator(keras_model = model_fn, model_dir=ckpt_dir)

    tf.logging.set_verbosity(tf.logging.INFO)

    tf.estimator.train_and_evaluate(keras_estimator, train_spec, eval_spec)
    
    with open(join(ckpt_dir, 'run_hyperparameters.txt'), 'w') as logfile:
        logfile.write("Training completed at: {0}\n".format(strftime('%c')))
   

if __name__ == '__main__':
      parser = argparse.ArgumentParser("Training, evaluation worklfow")
        
      parser.add_argument(
          '--model',
          type=str,
Esempio n. 16
0
def create_estimator(params):

    if params['Transfer learning model'] == "VGG16":
        from tensorflow.python.keras.applications.vgg16 import VGG16 as TL_model
        base_model = TL_model(weights='imagenet')

    elif params['Transfer learning model'] == "InceptionV3":
        from tensorflow.python.keras.applications.inception_v3 import InceptionV3 as TL_model
        base_model = TL_model(weights='imagenet',
                              include_top=False,
                              input_shape=[224, 224, 3])

    elif params['Transfer learning model'] == "Nasnet":
        from tensorflow.python.keras.applications.nasnet import nasnet
        base_model = nasnet(weights='imagenet', input_shape=[331, 331, 3])

    # Import selected model for transfer learning
    base_model.summary()

    if params['Transfer learning model'] == "VGG16":
        x = base_model.get_layer('fc2').output

        x = Dropout(params['dropout rate'])(x)

        x = Dense(1000, activation='relu')(x)

        x = Dropout(params['dropout rate'])(x)

        x = Dense(500, activation='relu')(x)

        x = Dropout(params['dropout rate'])(x)

        predictions = Dense(params['num classes'],
                            activation="softmax",
                            name="sm_out")(x)

    elif params['Transfer learning model'] == "InceptionV3":
        print("Building Inception V3")
        # base_model.trainable = False
        x = base_model.output
        x = GlobalAveragePooling2D(trainable=True)(x)
        x = Dropout(params['dropout rate'])(x)

        x = Dense(1000, activation='relu', trainable=True)(x)

        x = Dropout(params['dropout rate'])(x)

        predictions = Dense(params['num classes'],
                            activation="softmax",
                            trainable=True,
                            name="sm_out")(x)

    model = Model(inputs=base_model.input, outputs=predictions)
    model.summary()

    if params['Transfer learning model'] == "VGG16":
        for layer in model.layers:
            layer.trainable = True

    elif params['Transfer learning model'] == "InceptionV3":
        for layer in model.layers[:249]:
            layer.trainable = False
        for layer in model.layers[249:]:
            layer.trainable = True

    model.compile(
        loss="categorical_crossentropy",
        optimizer=tf.keras.optimizers.Adam(),
        # train.AdamOptimizer(params['learning rate'],
        #                              beta1=0.9,
        #                              beta2=0.999),
        # optimizer=tf.train.AdadeltaOptimizer(params['learning rate']),
        # optimizer=tf.train.RMSPropOptimizer(params['learning rate']),
        metrics=["categorical_accuracy"])

    if params['isRunOnCloud']:

        run_config = tf.estimator.RunConfig(model_dir=params['output path'])
    else:

        config = tf.compat.v1.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 0.95
        # session_config = tf.contrib.learn.RunConfig(session_config=config)

        run_config = tf.estimator.RunConfig(
            session_config=config,
            model_dir=params['output path'],
            # save_checkpoints_steps=1000
        )

    # Convert to Estimator (https://cloud.google.com/blog/products/gcp/new-in-tensorflow-14-converting-a-keras-model-to-a-tensorflow-estimator)
    estimator_model = kes.model_to_estimator(keras_model=model,
                                             config=run_config)

    return estimator_model