コード例 #1
0
def test_predict():
    test_review = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.'
    model_dir = os.path.join(here, './data/modelDir')

    model = model_fn(model_dir)
    print(model)

    data_X, data_len = convert_and_pad(model.word_dict, test_review)
    print('data_X ')
    print(data_X)
    print('data_len')
    print(data_len)

    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    print('data_pack (shape: {})'.format(data_pack.shape))
    print(data_pack)
    data_pack = data_pack.reshape(1, -1)

    print('data_pack reshaped (shape: {})'.format(data_pack.shape))
    print(data_pack)

    data = torch.from_numpy(data_pack)

    print('data (shape: {})'.format(data.shape))
    print(data)
    input_data = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.'
    result = predict_fn(input_data, model)
    print(result)
コード例 #2
0
def test_serve():
    model_dir = os.path.join(here, './data/modelDir')

    model = model_fn(model_dir)
    print(model)

    # input = input_fn(pickle.dumps(u"Best movie ever"), 'text/plain')
    # print(input)

    input_data = "Best movie ever"
    predict_fn(input_data=input_data, model=model)
コード例 #3
0
ファイル: pca_vgg.py プロジェクト: diegoami/DA_ML_Capstone
    parser.add_argument('--img-width', type=int, default=320, metavar='N',
                        help='width of image (default: 128)')
    parser.add_argument('--img-height', type=int, default=180, metavar='N',
                        help='height of image (default: 72)')
    parser.add_argument('--batch-size', type=int, default=8, metavar='N',
                        help='input batch size for training (default: 8)')

    args = parser.parse_args()

    print(f'Data Dir: {args.data_dir}')
    print(f'Model Dir: {args.model_dir}')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    use_gpu = torch.cuda.is_available()
    model = model_fn(args.model_dir)
    model_info = get_model_info(args.model_dir)
    args = parser.parse_args()

    # remove last fully-connected layer to get feaut
    new_classifier = nn.Sequential(*list(model.classifier.children())[:-1])
    model.classifier = new_classifier

    model.train(False)
    model.eval()

    # retrieves data loaders and datasets, and the label names
    dataloader, dataset_size = get_data_loaders(img_dir=args.data_dir,  img_height=args.img_height, img_width=args.img_width, batch_size=args.batch_size)

    X, y = get_feature_matrix_from_dataset(dataloader)
コード例 #4
0
    """ Call Predict function on a loaded LSTM model"""

    if request.method == "POST":
        LOG.info("I am a post")
        if request.form:
            LOG.info("I have form data")
            #print(request.form['kommentar'])
        if request.data:
            LOG.info("I have data")
            LOG.info(request.data)
        if request.json:
            LOG.info("I have json")
            # Do stuff with the data...
            return jsonify({"message": "OK"})
        else:
            LOG.info("fail")

    data = request.data
    LOG.info("Form data is: \n %s" % data.decode('utf-8'))

    # get an output prediction from the pretrained model, model
    result = predict_fn(data.decode('utf-8'), model)
    LOG.info("Prediction value is: %s" % result)
    return str(result)


if __name__ == "__main__":
    # load pretrained model as model
    model = model_fn("./model")
    app.run(host='0.0.0.0', port=80, debug=True) # specify port=80