Esempio n. 1
0
def create_estimator(params):

    # Import VGG16 model for transfer learning
    base_model = VGG16(weights='imagenet')
    base_model.summary()

    x = base_model.get_layer('fc2').output

    x = Dropout(params['dropout rate'])(x)

    predictions = Dense(params['num classes'],
                        activation="softmax",
                        name="sm_out")(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    for layer in model.layers:
        layer.trainable = False

    for layer in model.layers[20:23]:
        layer.trainable = True

    model.summary()

    model.compile(loss="categorical_crossentropy",
                  optimizer=tf.train.AdamOptimizer(params['learning rate'],
                                                   beta1=0.9,
                                                   beta2=0.999),
                  metrics=["accuracy"])

    if params['isRunOnCloud']:

        run_config = tf.estimator.RunConfig(model_dir=params['output path'])
    else:

        # Set up training config according to Intel recommendations
        NUM_PARALLEL_EXEC_UNITS = 4
        session_config = tf.ConfigProto(
            intra_op_parallelism_threads=NUM_PARALLEL_EXEC_UNITS,
            inter_op_parallelism_threads=2,
            allow_soft_placement=True,
            device_count={'CPU': NUM_PARALLEL_EXEC_UNITS})

        os.environ["OMP_NUM_THREADS"] = "4"
        os.environ["KMP_BLOCKTIME"] = "30"
        os.environ["KMP_SETTINGS"] = "1"
        os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"

        run_config = tf.estimator.RunConfig(session_config=session_config,
                                            model_dir=params['output path'])

    # Convert to Estimator (https://cloud.google.com/blog/products/gcp/
    # new-in-tensorflow-14-converting-a-keras-model-to-a-tensorflow-estimator)
    estimator_model = kes.model_to_estimator(keras_model=model,
                                             config=run_config)

    return estimator_model
def model_estimator(params, model_dir, run_config, retraining_weights,
                    model_id):
    """Get a model as a tf.estimator object"""

    # Get the original resnet model pre-initialized weights
    base_model = Xception(
        weights='imagenet',
        include_top=False,  # Peel off top layer
        pooling='avg',
        input_shape=params['input_shape'])
    # Get final layer of base Resnet50 model
    x = base_model.output
    # Add a fully-connected layer
    x = Dense(params['dense_size_a'],
              activation=params['dense_activation'],
              name='dense')(x)
    # Add (optional) dropout and output layer
    x = Dropout(rate=params['dense_dropout_rate_a'])(x)
    x = Dense(params['dense_size'],
              activation=params['dense_activation'],
              name='dense_preoutput')(x)
    x = Dropout(rate=params['dense_dropout_rate'])(x)
    output = Dense(params['n_classes'], name='output', activation='sigmoid')(x)

    model = Model(inputs=base_model.input, outputs=output)

    # Get (potentially decaying) learning rate
    optimizer = get_optimizer(params['optimizer'], params['learning_rate'])
    model.compile(optimizer=optimizer,
                  loss=params['loss'],
                  metrics=params['metrics'])

    if retraining_weights:
        print('in retraining weights')
        with zipfile.ZipFile(retraining_weights, "r") as zip_ref:
            zip_ref.extractall('/tmp/checkpoint')
            retraining_weights_ckpt = '/tmp/checkpoint/keras/' + 'keras_model.ckpt'
            print(retraining_weights_ckpt)
        model.load_weights(retraining_weights_ckpt)

    model_id = model_id

    # Return estimator
    m_e = model_to_estimator(keras_model=model,
                             model_dir=model_dir + model_id,
                             config=run_config)
    return m_e
Esempio n. 3
0
def trainer_fn(trainer_fn_args, schema):

    TRAIN_BATCH_SIZE = 20

    tf_transform_output = tft.TFTransformOutput(
        trainer_fn_args.transform_output)

    # get dataset
    model = Sequential([
        layers.InputLayer(100, sparse=True, batch_size=20, name='input_1'),
        layers.Dense(100),
        layers.Dense(2, activation='sigmoid')
    ])

    model.compile(optimizer='adam', loss='mse', metrics=['accuracy', 'mse'])
    # fit model

    estimator = model_to_estimator(model)

    # Creating train Specification

    train_spec = tf.estimator.TrainSpec(_input_fn,
                                        max_steps=trainer_fn_args.train_steps)

    # Create a train function

    train_input_fn = lambda: _input_fn(trainer_fn_args.train_files,
                                       tf_transform_output)

    train_spec = tf.estimator.TrainSpec(train_input_fn,
                                        max_steps=trainer_fn_args.train_steps)

    # Create a eval function

    eval_input_fn = lambda: _input_fn(trainer_fn_args.eval_files,
                                      tf_transform_output,
                                      batch_size=TRAIN_BATCH_SIZE)

    eval_spec = tf.estimator.EvalSpec(eval_input_fn,
                                      steps=trainer_fn_args.eval_steps)

    print(train_spec)
    return {
        'estimator': estimator,
        'train_spec': train_spec,
        'eval_spec': eval_spec
    }
Esempio n. 4
0
def train_and_eval(args):
    n_classes = 28
    input_shape = (512, 512, 3) # Input image dimensions (and 3 color channels)
    
    keras_model = build_model(input_shape=input_shape, n_classes=n_classes)

    global_step = tf.Variable(0, trainable=False, name='global_step')

    boundaries = [10000, 20000, 27500, 44000]
    values = [5E-4, 2.5E-4, 5E-5, 2E-5, 1E-5]
    piecewise_lr = tf.train.piecewise_constant(global_step, boundaries, values)
    adam_optimizer = tf.keras.optimizers.Adam(lr=piecewise_lr)
    losses = {
            'scores': 'binary_crossentropy',
            'key': lambda y_true, y_pred: tf.constant(0, dtype=tf.float32)
            }
    
    # weights chosen to make the average equal to the loss associated with scores
    loss_weights = {'scores': 2.0, 'key': 0.0}

    keras_model.compile(adam_optimizer, loss=losses, loss_weights=loss_weights)

    # This will be used in the input functions
    feature_names = keras_model.input_names

    run_config = tf.estimator.RunConfig(model_dir=args['model_dir'])
    model = model_to_estimator(keras_model=keras_model, config=run_config)
    model = tf.contrib.estimator.add_metrics(model, f1)

    # training and evaluation
    train_input_fn = input_fn(tf.estimator.ModeKeys.TRAIN, args['train_data_files'],
                              args['batch_size'], feature_names, input_shape)
    eval_input_fn = input_fn(tf.estimator.ModeKeys.EVAL, args['eval_data_files'],
                              args['batch_size'], feature_names, input_shape)

    eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=None) # Exhaust eval data
    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=None) # train indefinitely
    
    # Launches training
    tf.estimator.train_and_evaluate(model, train_spec, eval_spec)
Esempio n. 5
0
def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("-model",
                        type=str,
                        required=True,
                        help="path to a prediction model (.hdf5 file)")
    parser.add_argument("-class_mapping_json",
                        type=str,
                        required=True,
                        help="path to label_mappings.json")
    parser.add_argument("-pre_processing_json",
                        type=str,
                        required=True,
                        help="path to the image_processing.json")
    parser.add_argument("-output_dir",
                        type=str,
                        required=True,
                        help="Root directory to which model is exported")
    parser.add_argument(
        "-log_outdir",
        type=str,
        required=False,
        default=None,
        help="The directory to write logfiles to (defaults to output_dir)")
    parser.add_argument(
        "-estimator_save_dir",
        type=str,
        required=False,
        help="Directory to which estimator is saved (if not specified)\
              a temporary location is chosen")

    # Parse command line arguments
    args = vars(parser.parse_args())

    # Configure Logging
    if args['log_outdir'] is None:
        args['log_outdir'] = args['output_dir']

    setup_logging(log_output_path=args['log_outdir'])

    logger = logging.getLogger(__name__)

    print("Using arguments:")
    for k, v in args.items():
        print("Arg: %s: %s" % (k, v))

    args = vars(parser.parse_args())

    # Load Model and extract input/output layers
    keras_model = load_model_from_disk(args['model'])

    input_names = keras_model.input_names
    output_names = keras_model.output_names

    label_mapping = read_json(args['class_mapping_json'])
    pre_processing = read_json(args['pre_processing_json'])
    estimator = model_to_estimator(keras_model,
                                   model_dir=args['estimator_save_dir'])

    def decode_and_process_image(image):
        image = tf.image.decode_image(image, channels=3)
        image = preprocess_image(image, **pre_processing)
        return image

    def generate_dataset_iterator(image_list):
        """ Dataset Iterator from a list of Image Bytes """
        dataset = tf.data.Dataset.from_tensor_slices(image_list)
        dataset = dataset.map(decode_and_process_image)
        dataset = dataset.batch(128)
        next_example = tf.data.experimental.get_single_element(dataset)
        return next_example

    def serving_input_receiver_fn():
        """
        This is used to define inputs to serve the model.

        :return: ServingInputReciever
        """
        # Input Tensor (list of image bytes)
        list_of_image_bytes = tf.placeholder(shape=[1], dtype=tf.string)
        receiver_tensors = {'image': list_of_image_bytes}

        # Generate an iterator for the images
        image_batch = generate_dataset_iterator(list_of_image_bytes)
        features = {input_names[0]: image_batch}
        return tf.estimator.export.ServingInputReceiver(
            receiver_tensors=receiver_tensors, features=features)

    # Save the model
    estimator.export_savedmodel(
        args['output_dir'],
        serving_input_receiver_fn=serving_input_receiver_fn,
        assets_extra={'label_mappings.json': args['class_mapping_json']})
model = models.Sequential([
    layers.Dense(16, activation='relu', input_shape=(4, )),
    layers.Dropout(0.2),
    layers.Dense(1, activation='sigmoid')
])

model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()


def input_fn():
    split = tfds.Split.TRAIN
    dataset = tfds.load('iris', split=split, as_supervised=True)
    dataset = dataset.map(lambda features, labels: ({
        'dense_input': features
    }, labels))
    dataset = dataset.batch(32).repeat()
    return dataset


for features_batch, labels_batch in input_fn().take(1):
    print(features_batch)
    print(labels_batch)

keras_estimator = estimator.model_to_estimator(keras_model=model,
                                               model_dir=output_model)

keras_estimator.train(input_fn=input_fn, steps=25)
eval_result = keras_estimator.evaluate(input_fn=input_fn, steps=10)
print('Eval result: {}'.format(eval_result))
Esempio n. 7
0
    print("Using arguments:")
    for k, v in args.items():
        print("Arg: %s: %s" % (k, v))

    args = vars(parser.parse_args())

    # Load Model and extract input/output layers
    keras_model = load_model_from_disk(args['model'])

    input_names = keras_model.input_names
    output_names = keras_model.output_names

    label_mapping = read_json(args['class_mapping_json'])
    pre_processing = read_json(args['pre_processing_json'])
    estimator = model_to_estimator(
        keras_model,
        model_dir=args['estimator_save_dir'])

    def decode_and_process_image(image):
        image = tf.image.decode_jpeg(image, channels=3)
        image = preprocess_image(image, **pre_processing)
        return image

    def generate_dataset_iterator(image_list):
        """ Dataset Iterator from a list of Image Bytes """
        dataset = tf.data.Dataset.from_tensor_slices(image_list)
        dataset = dataset.map(decode_and_process_image)
        dataset = dataset.batch(128)
        next_example = tf.contrib.data.get_single_element(dataset)
        return next_example