Esempio n. 1
0
def predict(captcha_image):
    from load import load_graph
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--frozen_model_filename", default="results/frozen_model.pb", type=str,
                        help="Frozen model file to import")
    parser.add_argument("--gpu_memory", default=.2, type=float, help="GPU memory per process")
    args = parser.parse_args()
    graph = load_graph('/Users/alpha/github/flask/flasky/app/cnn/model/frozen_model.pb')
    x = graph.get_tensor_by_name('prefix/p_x:0')
    y = graph.get_tensor_by_name('prefix/p_y:0')
    keep_prob = graph.get_tensor_by_name('prefix/keep_prob:0')
    print(x, y, keep_prob)
    print('Starting Session, setting the GPU memory usage to %f' % args.gpu_memory)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)
    persistent_sess = tf.Session(graph=graph, config=sess_config)
    out_put = graph.get_tensor_by_name("prefix/out_put:0")
    predict = tf.argmax(tf.reshape(out_put, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
    text_list = persistent_sess.run(predict, feed_dict={x: [captcha_image], keep_prob: 1})
    text = text_list[0].tolist()
    vector = np.zeros(MAX_CAPTCHA * CHAR_SET_LEN)
    i = 0
    for n in text:
        vector[i * CHAR_SET_LEN + n] = 1
        i += 1
    return vec2text(vector)
Esempio n. 2
0
def predict(captcha_image):
    from load import load_graph
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--frozen_model_filename", default="results/frozen_model.pb", type=str,
                        help="Frozen model file to import")
    parser.add_argument("--gpu_memory", default=.2, type=float, help="GPU memory per process")
    args = parser.parse_args()
    graph = load_graph('/Users/alpha/github/flask/flasky/app/cnn/model/frozen_model.pb')
    x = graph.get_tensor_by_name('prefix/p_x:0')
    y = graph.get_tensor_by_name('prefix/p_y:0')
    keep_prob = graph.get_tensor_by_name('prefix/keep_prob:0')
    print(x, y, keep_prob)
    print('Starting Session, setting the GPU memory usage to %f' % args.gpu_memory)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)
    persistent_sess = tf.Session(graph=graph, config=sess_config)
    out_put = graph.get_tensor_by_name("prefix/out_put:0")
    predict = tf.argmax(tf.reshape(out_put, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
    text_list = persistent_sess.run(predict, feed_dict={x: [captcha_image], keep_prob: 1})
    text = text_list[0].tolist()
    vector = np.zeros(MAX_CAPTCHA * CHAR_SET_LEN)
    i = 0
    for n in text:
        vector[i * CHAR_SET_LEN + n] = 1
        i += 1
    return vec2text(vector)
Esempio n. 3
0
    parser = argparse.ArgumentParser()
    parser.add_argument("--frozen_model_filename",
                        default="results/frozen_model.pb",
                        type=str,
                        help="Frozen model file to import")
    parser.add_argument("--gpu_memory",
                        default=.2,
                        type=float,
                        help="GPU memory per process")
    args = parser.parse_args()

    ##################################################
    # Tensorflow part
    ##################################################
    print('Loading the model')
    graph = load_graph(args.frozen_model_filename)
    x = graph.get_tensor_by_name('prefix/Placeholder/inputs_placeholder:0')
    y = graph.get_tensor_by_name('prefix/Accuracy/predictions:0')

    print('Starting Session, setting the GPU memory usage to %f' %
          args.gpu_memory)
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=args.gpu_memory)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)
    persistent_sess = tf.Session(graph=graph, config=sess_config)
    ##################################################
    # END Tensorflow part
    ##################################################

    print('Starting the API')
    app.run()
Esempio n. 4
0
        default='',
        help=
        "Path to image folder. This is where the images from the run will be saved."
    )
    parser.add_argument('-u',
                        action='store_true',
                        dest='update',
                        help="Overwrite/update the specified model file.")
    args = parser.parse_args()

    ioloop = asyncio.get_event_loop()
    ioloop.run_until_complete(check_model_path(args))
    ioloop.close()

    print("Loading from {}".format(args.model))
    graph = load_graph(os.path.join(args.model))
    X = graph.get_tensor_by_name('prefix/X:0')
    keep_prob = graph.get_tensor_by_name('prefix/keep_prob:0')
    prediction = graph.get_tensor_by_name('prefix/output:0')
    sess = tf.Session(graph=graph)

    if args.image_folder != '':
        print("Creating image folder at {}".format(args.image_folder))
        if not os.path.exists(args.image_folder):
            os.makedirs(args.image_folder)
        else:
            shutil.rmtree(args.image_folder)
            os.makedirs(args.image_folder)
        print("RECORDING THIS RUN ...")
    else:
        print("NOT RECORDING THIS RUN ...")
Esempio n. 5
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--frozen_model_filename", default="results/frozen_model.pb", type=str,
                        help="Frozen model file to import")
    parser.add_argument("--gpu_memory", default=.2, type=float, help="GPU memory per process")
    args = parser.parse_args()

    ##################################################
    # Tensorflow part
    ##################################################
    print('Loading the model')
    # graph = load_graph(args.frozen_model_filename)
    # x = graph.get_tensor_by_name('prefix/Placeholder/inputs_placeholder:0')
    # y = graph.get_tensor_by_name('prefix/Accuracy/predictions:0')

    graph = load_graph('/Users/alpha/github/flask/flasky/app/cnn/model/frozen_model.pb')
    # x = graph.get_tensor_by_name('prefix/inputs_placeholder:0')
    # y = graph.get_tensor_by_name('prefix/predictions:0')

    X = tf.placeholder(tf.float32, [None, 60 * 160])
    Y = tf.placeholder(tf.float32, [None, 4 * 57])
    keep_prob = tf.placeholder(tf.float32)

    print('Starting Session, setting the GPU memory usage to %f' % args.gpu_memory)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)
    persistent_sess = tf.Session(graph=graph, config=sess_config)
    ##################################################
    # END Tensorflow part
    ##################################################
        type=int,
        default=5,
        help='number of topology suggestions to print out (sorted by depth).')
    parser.add_argument(
        '--print-only',
        '-p',
        action='store_true',
        help='only read and print from existing CSV result file.')
    args = parser.parse_args()

    # Create folders if not exist
    mkdirp(RESULT_FOLDER)
    mkdirp(CACHE_FOLDER)

    fname = os.path.join(CACHE_FOLDER, args.site + '-' + args.name + '.pick')
    G = load.load_graph(args.site, args.name, fname, args.reload)

    csvname = os.path.join(RESULT_FOLDER, args.site + "-" + args.name + '.csv')
    if not args.print_only:
        df = check_all(G,
                       args.site,
                       args.name,
                       kappa=args.kappa,
                       reduction=not args.no_reduction,
                       margin=args.margin)
        df.to_csv(csvname)
    elif not os.path.isfile(csvname):
        print("No CSV result file to read from")
        exit(1)
    else:
        df = pd.read_csv(csvname)
Esempio n. 7
0
                        help="Frozen model file to import")
    parser.add_argument("--gpu_memory",
                        default=.2,
                        type=float,
                        help="GPU memory per process")
    args = parser.parse_args()

    ##################################################
    # Tensorflow part
    ##################################################
    print('Loading the model')
    # graph = load_graph(args.frozen_model_filename)
    # x = graph.get_tensor_by_name('prefix/Placeholder/inputs_placeholder:0')
    # y = graph.get_tensor_by_name('prefix/Accuracy/predictions:0')

    graph = load_graph(
        '/Users/alpha/github/flask/flasky/app/cnn/model/frozen_model.pb')
    # x = graph.get_tensor_by_name('prefix/inputs_placeholder:0')
    # y = graph.get_tensor_by_name('prefix/predictions:0')

    X = tf.placeholder(tf.float32, [None, 60 * 160])
    Y = tf.placeholder(tf.float32, [None, 4 * 57])
    keep_prob = tf.placeholder(tf.float32)

    print('Starting Session, setting the GPU memory usage to %f' %
          args.gpu_memory)
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=args.gpu_memory)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)
    persistent_sess = tf.Session(graph=graph, config=sess_config)
    ##################################################
    # END Tensorflow part
from load import load_graph

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
"""
Adapted from https://gist.github.com/morgangiraud/4a062f31e8a7b71a030c2ced3277cc20#file-medium-tffreeze-3-py
"""

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("-m",
                        dest='model_filename',
                        type=str,
                        help="frozen model .pb file to import")
    args = parser.parse_args()

    graph = load_graph(args.model_filename)

    for op in graph.get_operations():
        print(op.name)

    X = graph.get_tensor_by_name('prefix/X:0')
    keep_prob = graph.get_tensor_by_name('prefix/keep_prob:0')
    output = graph.get_tensor_by_name('prefix/output:0')

    with tf.Session(graph=graph) as sess:
        prediction = sess.run(output,
                              feed_dict={
                                  X: np.random.randn(40, 66, 200, 3) * 100,
                                  keep_prob: 1.0
                              })
        print(prediction)
Esempio n. 9
0
    
    return json_data
##################################################
# END API part
##################################################

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--frozen_model_filename", default="results/frozen_model.pb", type=str, help="Frozen model file to import")
    parser.add_argument("--gpu_memory", default=.2, type=float, help="GPU memory per process")
    args = parser.parse_args()

    ##################################################
    # Tensorflow part
    ##################################################
    print('Loading the model')
    graph = load_graph(args.frozen_model_filename)
    x = graph.get_tensor_by_name('prefix/Placeholder/inputs_placeholder:0')
    y = graph.get_tensor_by_name('prefix/Accuracy/predictions:0')

    print('Starting Session, setting the GPU memory usage to %f' % args.gpu_memory)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)
    persistent_sess = tf.Session(graph=graph, config=sess_config)
    ##################################################
    # END Tensorflow part
    ##################################################

    print('Starting the API')
    app.run()