def main():
    '''
    Entry point
    '''
    model_dir = utils.create_model_dir()
    # download_model_from_local_file(model_dir)
    utils.download_model_from_bucket(model_dir)
    protobuf_file_name = utils.get_env_var_or_raise_exception(
        settings.MODEL_PROTOBUF_FILE_NAME_ENV_VAR)
    model_path = model_dir + '/' + protobuf_file_name

    # create model...
    #with GANModel(model_dir) as gan_model:
    with DetectionModel(model_path) as detection_model:
        # ...load the image
        # image_file_name = './svnh_test_images/image_3.jpg'
        # with open(image_file_name, 'rb') as f:
        #     image = f.read()
        image = utils.download_image_from_bucket('grassland-images',
                                                 'image_3.jpg')

        # ...make prediction
        #scores = gan_model.predict(image)
        scores = detection_model.predict(image)

        # print results
        results_json = [{
            'digit': str(score[0]),
            'probability': str(score[1])
        } for score in scores]
        print("Scores: {}".format(json.dumps(results_json)))
Example #2
0
 def search(layers, units, optimizer, learning_rate, activation_fn,
            dropout):
     params['layers'] = layers
     params['units'] = units
     params['optimizer'] = optimizer
     params['learning_rate'] = learning_rate
     params['activation_fn'] = activation_fn
     params['dropout'] = dropout
     print params
     new_model_dir = utils.create_model_dir(model_dir, params)
     estimator = get_estimator(params, new_model_dir, feature_column,
                               CONFIG)
     val_x, val_y = train_input_fn(VALIDATION_DATA_MINI)
     validation_monitor = utils.get_validation_monitor(val_x, val_y)
     history = estimator.fit(x=x,
                             y=y,
                             steps=EPOCHS,
                             batch_size=BATCH_SIZE,
                             monitors=[validation_monitor])
     validation_input = create_callable_train_input_fn(
         VALIDATION_DATA_MINI)
     accuracy = evaluate.get_rmse_model(estimator, validation_input)
     global BEST_ACCURACY
     print 'rmse: ' + str(accuracy)
     if accuracy < BEST_ACCURACY:
         print 'Exporting model...'
         feature_spec = tf.feature_column.make_parse_example_spec(
             feature_column)
         serving_input_fn = build_parsing_serving_input_fn(feature_spec)
         estimator.export_savedmodel(new_model_dir, serving_input_fn)
         BEST_ACCURACY = accuracy
     print 'Best rmse: ' + str(BEST_ACCURACY)
     del estimator
     tf.reset_default_graph()
     return accuracy
def estimator_from_tf_record(shape_in: Tuple[int, int],
                             shape_out: Tuple[int],
                             tf_records_name: str,
                             batch_size: int = 10,
                             epochs: Optional[int] = 10,
                             steps: int = 1,
                             model_dir: str = r'..\tmp\test',
                             consistent_model: bool = True,
                             activate_tb: bool = False):
    """
    train & test read from TFRecord
    :param shape_in:
    :param shape_out:
    :param tf_records_name:
    :param batch_size:
    :param epochs:
    :param steps:
    :param model_dir:
    :param consistent_model:
    :param activate_tb:
    :return:
    """
    model = create_compiled_model(shape_in=shape_in, shape_out=shape_out)
    model_dir = create_model_dir(model_dir, consistent_model=consistent_model)
    estimator = model_to_estimator(model, model_dir=model_dir)

    for _ in range(steps):
        estimator.train(
            input_fn=lambda: set_input_fn_tf_record(tf_records_name,
                                                    is_train=True,
                                                    shape_in=shape_in,
                                                    shape_out=shape_out,
                                                    batch_size=batch_size,
                                                    num_epochs=epochs)
        )
        result = estimator.evaluate(
            input_fn=lambda: set_input_fn_tf_record(tf_records_name,
                                                    is_train=False,
                                                    shape_in=shape_in,
                                                    shape_out=shape_out,
                                                    batch_size=batch_size)
        )
        print(result)

    if activate_tb:
        launch_tb(model_dir)
    return estimator
Example #4
0
def main():
    '''
    Entry point
    '''
    model_dir = utils.create_model_dir()
    # download_model_from_local_file(model_dir)
    utils.download_model_from_bucket(model_dir)

    # create model...
    with GANModel(model_dir) as gan_model:
        # ...load the image
        # image_file_name = './svnh_test_images/image_3.jpg'
        # with open(image_file_name, 'rb') as f:
        #     image = f.read()
        image = utils.download_image_from_bucket('vb-tf-aws-lambda-images', 'image_3.jpg')

        # ...make prediction
        scores = gan_model.predict(image)

        # print results
        results_json = [{'digit': str(score[0]), 'probability': str(score[1])} for score in scores]
        print "Scores: {}".format(json.dumps(results_json))
Example #5
0
def train():
    params = {
        'biased': True,
        'n_factors': 40,
        'n_epochs': 50,
        'learning_rate': 0.001,
        'reg': 0.1
    }
    model_dir = './models/svd/'
    print 'Training SVD model...'
    print params
    print 'Loading data...'
    train_data, validation_data, test_data = get_training(
        TRAIN_DATA_FULL, VALIDATION_DATA_FULL, TEST_DATA_FULL)
    print 'Factorizing...'
    model = run_svd(train_data, params, svdpp=True)
    # Get rmse from validation
    predictions = model.test(validation_data)
    print 'rmse: ' + str(get_rmse_temp(predictions))
    # Print predictions
    predictions = model.test(test_data)
    print_predictions_temp(predictions, './SVD_data/svdpp.txt')
    # Save model
    surprise.dump.dump(utils.create_model_dir(model_dir, params), algo=model)
Example #6
0
import json
'''
This is needed so that the script running on AWS will pick up the pre-compiled dependencies
from the vendored folder
'''
current_location = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(current_location, 'vendored'))
'''
The following imports must be placed after picking up of pre-compiled dependencies
'''
from gan_model import GANModel
import utils
'''
Declare global objects living across requests
'''
model_dir = utils.create_model_dir()
utils.download_model_from_bucket(model_dir)
gan_model = GANModel(model_dir)


def get_param_from_url(event, param_name):
    '''
    Retrieve query parameters from a Lambda call. Parameters are passed through the
    event object as a dictionary. We interested in 'queryStringParameters', since
    the bucket name and the key are passed in the query string

    :param event: the event as input in the Lambda function
    :param param_name: the name of the parameter in the query string
    :return: parameter value or None if the parameter is not in the event dictionary
    '''
    params = event['queryStringParameters']
def estimator_from_csv(shape_in: Tuple[int, int],
                       shape_out: Tuple[int],
                       file_csv: str,
                       feature_cols: Union[List[int], int] = 0,
                       batch_size: int = 10,
                       epochs: Optional[int] = 10,
                       steps: int = 1,
                       model_dir: str = r'..\tmp\test',
                       consistent_model: bool = True,
                       activate_tb: bool = False):
    """
    train & test read from csv
    :param shape_in:
    :param shape_out:
    :param file_csv:
    :param feature_cols:
    :param batch_size:
    :param epochs:
    :param steps:
    :param model_dir:
    :param consistent_model:
    :param activate_tb:
    :return:
    """
    n_in, n_out = shape_in[0], shape_out[0]

    model = create_compiled_model(shape_in=shape_in, shape_out=shape_out)
    model_dir = create_model_dir(model_dir, consistent_model=consistent_model)
    estimator = model_to_estimator(model, model_dir=model_dir)

    d = read_data_from_csv(file_csv)
    raw_trn_data, raw_tst_data = split_data(d)
    trn_fea, trn_lbl = to_supervised(raw_trn_data,
                                     n_in,
                                     n_out,
                                     feature_cols=feature_cols,
                                     label_col=0,
                                     is_train=True)
    tst_fea, tst_lbl = to_supervised(raw_tst_data,
                                     n_in,
                                     n_out,
                                     feature_cols=feature_cols,
                                     label_col=0,
                                     is_train=False)

    for _ in range(steps):
        estimator.train(
            input_fn=lambda: set_input_fn_csv(trn_fea,
                                              trn_lbl,
                                              batch_size=batch_size,
                                              num_epochs=epochs)
        )
        result = estimator.evaluate(
            input_fn=lambda: set_input_fn_csv(tst_fea,
                                              tst_lbl,
                                              batch_size=batch_size)
        )
        print(result)

    if activate_tb:
        launch_tb(model_dir)
    return estimator
def estimator_from_model_fn(shape_in: Union[Tuple[int, int], List[Tuple[int, int]]],
                            shape_out: Tuple[int],
                            tf_records_name: str,
                            batch_size: int = 10,
                            epochs: int = 10,
                            model_dir: str = r'.\tmp\test',
                            consistent_model: bool = True,
                            activate_tb: bool = True,
                            n_checkpoints: int = 1,
                            model_fn=model_fn_default,
                            network_fn=create_multichannel_model,
                            learning_rate: float = None,
                            batch_norm: bool = False):
    """
    :param batch_norm:
    :param shape_in:
    :param shape_out:
    :param tf_records_name:
    :param batch_size:
    :param epochs:
    :param model_dir:
    :param consistent_model:
    :param activate_tb:
    :param n_checkpoints:
    :param model_fn:
    :param network_fn:
    :param learning_rate:
    :return:
    """
    model_dir = create_model_dir(model_dir, consistent_model=consistent_model)

    params = {
        'network_fn': network_fn,
        'network_params': {
            'shape_in': shape_in,
            'shape_out': shape_out,
            'batch_norm': batch_norm,
        },
    }

    train_epochs = epochs // n_checkpoints

    if learning_rate is not None:
        params['learning_rate'] = learning_rate

    estimator = est.Estimator(
        model_fn=model_fn,
        model_dir=model_dir,
        params=params
    )

    for _ in range(n_checkpoints):
        estimator.train(
            input_fn=lambda: set_input_fn_tf_record(tf_records_name,
                                                    is_train=True,
                                                    shape_in=shape_in,
                                                    shape_out=shape_out,
                                                    batch_size=batch_size,
                                                    num_epochs=train_epochs),
        )

        result = estimator.evaluate(
            input_fn=lambda: set_input_fn_tf_record(tf_records_name,
                                                    is_train=False,
                                                    shape_in=shape_in,
                                                    shape_out=shape_out,
                                                    batch_size=batch_size)
        )

        print(result)

        prd = estimator.predict(
            input_fn=lambda: set_input_fn_tf_record(tf_records_name,
                                                    is_train=False,
                                                    shape_in=shape_in,
                                                    shape_out=shape_out,
                                                    batch_size=batch_size)
        )

        n = 5
        for i in prd:
            print(i, f' len: {len(i)}')
            n -= 1
            if n == 0:
                break

    if activate_tb:
        launch_tb(model_dir)
    return estimator