scores = app.net.classify(image)
        indices, predictions = app.net.top_k_prediction(scores, 5)
        # In addition to the prediction text, we will also produce the length
        # for the progress bar visualization.
        max_score = scores[indices[0]]
        meta = [(p, '%.5f' % scores[i]) for i, p in zip(indices, predictions)]
        logging.info('result: %s', str(meta))
    except Exception as err:
        logging.info('Classification error: %s', err)
        return (False, 'Oops, something wrong happened wieh classifying the'
                       ' image. Maybe try another one?')
    # If everything is successful, return the results
    endtime = time.time()
    return (True, meta, '%.3f' % (endtime-starttime))

if __name__ == '__main__':
    gflags.FLAGS(sys.argv)
    # try to make the upload directory.
    try:
        os.makedirs(UPLOAD_FOLDER)
    except Exception as err:
        pass
    logging.getLogger().setLevel(logging.INFO)
    app.net = jeffnet.JeffNet(net_file=FLAGS.net_file,
                              meta_file=FLAGS.meta_file)
    #app.run(host='0.0.0.0')
    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(5001)
    IOLoop.instance().start()

def make_app(argv, debug=False):
    app = Flask(__name__)
    #CORS(app, support_credentials=True)
    # TODO
    # @cross_origin(supports_credentials=True)
    # def login():
    #    return jsonify({'success': 'ok'})
    app.debug = debug

    parser = Parser().getParser()
    #argv = sys.argv[1:]
    args, _ = parser.parse_known_args()

    # args = parser.parse_args()

    if (args.train == 1) and (args.task != 'fewrel'):
        app.net = train_and_fit(args)

    if (args.infer == 1) and (args.task != 'fewrel'):
        app.inferer = infer_from_trained(args.model_path,
                                         detect_entities=True,
                                         args=args)

    if args.task == 'fewrel':
        fewrel = FewRel(args.model_path, args)
        meta_input, e1_e2_start, meta_labels, outputs = fewrel.evaluate()

    def find_best_prediction(out):
        best_pred = max(out, key=itemgetter(2))
        return best_pred[0], best_pred[1], best_pred[2]

    def get_best_predictions(data, inputtype="simplejson"):
        if inputtype == "simplejson":
            sen_name = "sentext"
        elif inputtype == "texoo":
            sen_name = "text"

        for line in data:
            #logger.info("sentence"+ str(line[sen_name]))
            out = app.inferer.infer_sentence(line[sen_name],
                                             detect_entities=True)
            logger.info("out: " + str(out))
            line["sentence"], line["pred"], line[
                "prob"] = find_best_prediction(out)
        return data

    def get_all_predictions(data, inputtype="simplejson"):
        new_data = []
        if inputtype == "simplejson":
            sen_name = "sentext"
        elif inputtype == "texoo":
            sen_name = "text"

        for line in data:
            logger.info("sentence" + str(line[sen_name]))
            out = app.inferer.infer_sentence(line[sen_name],
                                             detect_entities=True)
            logger.info("out: " + str(out))

            if len(out) == 0:
                logger.info("test")
                line["sentence"], line["pred"], line["prob"] = None, None, None
                new_data.append(line)
            else:
                for pred in out:
                    logger.info("pred : " + str(pred))
                    newline = copy.deepcopy(line)
                    newline["sentence"], newline["pred"], newline[
                        "prob"] = pred[0], pred[1], pred[2]
                    logger.info("newline : " + str(newline))
                    new_data.append(newline)

        logger.info("new _DATA : \n" + str(new_data) + "\n")
        return new_data

    # takes new texoo json for each line
    # ignores annotations and generates new ones
    @app.route('/api/importtexoo', methods=['POST'])
    def get_input_importtexoo():
        print("request.data: ", request.data)
        logger.info("request.data:" + str(request.data))

        jsonInput = request.get_json(force=True)
        # jsonInput = { "options": {"returnAllPredictions": False},
        #             "data": [
        #                 {'length': 12,
        #                 'documentRef': 2,
        #                 'uid': 123,
        #                 'text': "I  love  Easter Sunday as a fashion moment because every church goer is ready to praise while dressed to the nines in their best Spring-inspired looks .",
        #                 'begin': 0,
        #                 'class': "thisClass",
        #                 'type': "type",
        #                 "tokens": None,
        #                 "empty": None,
        #                 "language": "ENg",
        #                 "sentences": None,
        #                 'source': "source",
        #                 'id': None,
        #                 "title": "Output for bert-relex-api.demo.datexis.com",
        #
        #                 "annotations": [
        #                     {"relationArguments": [
        #                         {"arg1": "blq"},{"arg2": "ble"}]},
        #                     {"relationArguments": [
        #                         {"arg1": "blj"}, {"arg2": "blg"}]}
        #                     ]
        #                 }
        #             ]
        #          }

        if jsonInput["options"]["returnAllPredictions"]:
            data = get_all_predictions(jsonInput["data"], "texoo")
        else:
            data = get_best_predictions(jsonInput["data"], "texoo")

        return make_result_json(data, "texoo")

    @app.route('/api/importjson', methods=['POST'])
    def get_input_importjson():
        print("request.data: ", request.data)
        logger.info("request.data:" + str(request.data))

        jsonInput = request.get_json(force=True)
        #jsonInput = '{ "options": {"returnAllPredictions": false},' \
        #            '"data": [' \
        #            '{"sentext": "I  love  Easter Sunday as a fashion moment because every church goer is ready to praise while dressed to the nines in their best Spring-inspired looks ."},' \
        #           ' {"sentext": "Wear  them with basics and sparse accessories ."}' \
        #            ']}'
        #jsonInput = json.loads(jsonInput)

        if jsonInput["options"]["returnAllPredictions"]:
            data = get_all_predictions(jsonInput["data"])
        else:
            data = get_best_predictions(jsonInput["data"])

        return make_result_json(data)

    return app
        scores = app.net.classify(image)
        indices, predictions = app.net.top_k_prediction(scores, 5)
        # In addition to the prediction text, we will also produce the length
        # for the progress bar visualization.
        max_score = scores[indices[0]]
        meta = [(p, '%.5f' % scores[i]) for i, p in zip(indices, predictions)]
        logging.info('result: %s', str(meta))
    except Exception as err:
        logging.info('Classification error: %s', err)
        return (False, 'Oops, something wrong happened wieh classifying the'
                       ' image. Maybe try another one?')
    # If everything is successful, return the results
    endtime = time.time()
    return (True, meta, '%.3f' % (endtime-starttime))

if __name__ == '__main__':
    gflags.FLAGS(sys.argv)
    # try to make the upload directory.
    try:
        os.makedirs(UPLOAD_FOLDER)
    except Exception as err:
        pass
    logging.getLogger().setLevel(logging.INFO)
    app.net = imagenet.DecafNet(net_file=FLAGS.net_file,
                              meta_file=FLAGS.meta_file)
    #app.run(host='0.0.0.0')
    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(5001)
    IOLoop.instance().start()

Example #4
0
        scores = app.net.classify(image)
        indices, predictions = app.net.top_k_prediction(scores, 5)
        # In addition to the prediction text, we will also produce the length
        # for the progress bar visualization.
        max_score = scores[indices[0]]
        meta = [(p, '%.5f' % scores[i]) for i, p in zip(indices, predictions)]
        logging.info('result: %s', str(meta))
    except Exception as err:
        logging.info('Classification error: %s', err)
        return (False, 'Oops, something wrong happened wieh classifying the'
                ' image. Maybe try another one?')
    # If everything is successful, return the results
    endtime = time.time()
    return (True, meta, '%.3f' % (endtime - starttime))


if __name__ == '__main__':
    gflags.FLAGS(sys.argv)
    # try to make the upload directory.
    try:
        os.makedirs(UPLOAD_FOLDER)
    except Exception as err:
        pass
    logging.getLogger().setLevel(logging.INFO)
    app.net = jeffnet.JeffNet(net_file=FLAGS.net_file,
                              meta_file=FLAGS.meta_file)
    #app.run(host='0.0.0.0')
    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(5001)
    IOLoop.instance().start()