コード例 #1
0
ファイル: script.py プロジェクト: samimideksa/All-In-One
def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    all_in_one_pb2_grpc.add_AllInOneServicer_to_server(Greeter(), server)
    server.add_insecure_port('[::]:50051')
    server.start()
    model = load_model(
        "/home/samuel/projects/All-In-One/allinonemodels/allinone.json",
        "/home/samuel/projects/All-In-One/allinonemodels/freeze2.h5",
        ["age_estimation", "smile", "gender_probablity"])
    model.summary()
    detector = dlib.get_frontal_face_detector()
    #image demo
    images_demo(model,
                "/home/samuel/projects/All-In-One/grpc/test/test_images/",
                detector)
    #video demo
    #webcam_demo(model,detector)
    video_demo(
        model,
        "/home/samuel/projects/All-In-One/grpc/test/test_videos/video_demo3.mp4",
        detector)
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        server.stop(0)
コード例 #2
0
def main():
    print("loading model")
    model = load_model(
        "/home/samuel/projects/All-In-One/allinonemodels/allinone.json",
        "/home/samuel/projects/All-In-One/allinonemodels/freeze2.h5",
        ["age_estimation", "smile", "gender_probablity"])
    model.summary()
    print("loaded model")
    detector = dlib.get_frontal_face_detector()
    images_demo(model, "/home/samuel/projects/All-In-One/Images/", detector)
コード例 #3
0
def main():
    global args
    args = parser.parse_args()

    if not os.path.isdir(args.out):
        os.mkdir(args.out)

    topics = []
    velo_topics = []

    odom_topics = []
    if args.odom_topics != None:
        for t in args.odom_topics.split(","):
            odom_topics.append(t)
            topics.append(t)

    cam_topics = []
    if args.cam_topics != None:
        for t in args.cam_topics.split(","):
            cam_topics.append(t)
            topics.append(t)

    if args.velo_topics != None:
        for t in args.velo_topics.split(","):
            velo_topics.append(t)
            topics.append(t)
    import json
    print(json.dumps(cfg, sort_keys=True, indent=2))
    model = load_model()
    offset = 0
    for b in args.bags.split(","):
        print("start bag", b)
        bag_name = b.split('/')[-1]
        car_name = bag_name.split('_')[1]
        date = int(bag_name.split('_')[0].split('T')[0])
        sys.stdout.flush()
        bag = rosbag.Bag(b)
        msg_it = iter(
            buffered_message_generator(bag, args.tolerance, topics,
                                       odom_topics))
        offset = msg_loop(args.out, args.rate, args.frame_limit, topics,
                          velo_topics, cam_topics, odom_topics, msg_it, offset,
                          model, car_name, date)
コード例 #4
0
ファイル: app.py プロジェクト: ahcognmm/nlp
from flask import Flask, flash, render_template, redirect, url_for, request
import demo

model = demo.load_model()

app = Flask(__name__)


@app.route('/')
def hello_world():
    return redirect('/classify')


@app.route('/classify', methods=['GET', 'POST'])
def classify_system():
    error = None
    if request.method == 'POST':
        comment = request.form['comment']
        if comment == '':
            error = 'Bạn chưa nhập dữ liệu. Hãy nhập lại'
            return render_template('UI_demo.html', error=error)
        else:
            predict = demo.classify_one_comment(model, comment)[-1]
            print(predict)
            if predict == 0:
                emotional = "Tích cực"
            else:
                emotional = "Tiêu cực"
            return render_template('UI_demo.html',
                                   error=error,
                                   comment=comment,
コード例 #5
0
from demo import load_model, detect
import glob
import time

MODEL = 'faster_rcnn_inception_resnet_v2_atrous_coco_11_06_2017'

sess = load_model(MODEL)
THRESHOLD = 0.7

TEST_IMAGE_PATHS = glob.glob("/root/data/demo/van_big/*.jpg")

for img_path in TEST_IMAGE_PATHS:
    tic = time.time()
    outputs = detect(sess, img_path, thresh=THRESHOLD)
    print("Detection Time {0:.2f} sec".format(time.time() - tic))
    for output in outputs:
        score = output['score']
        class_name = output['class']
        x = output['x']
        y = output['y']
        width = output['width']
        height = output['height']

        print("'{}' detected with confidence {} in [{}, {}, {}, {}]\n".format(class_name.upper(),\
                                                                           score, x, y, width,\
                                                                           height))
コード例 #6
0
ファイル: script.py プロジェクト: samimideksa/All-In-One
    #video demo
    #webcam_demo(model,detector)
    video_demo(
        model,
        "/home/samuel/projects/All-In-One/grpc/test/test_videos/video_demo3.mp4",
        detector)
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        server.stop(0)


if __name__ == '__main__':
    model = load_model(
        "/home/samuel/projects/All-In-One/allinonemodels/allinone.json",
        "/home/samuel/projects/All-In-One/allinonemodels/freeze2.h5",
        ["age_estimation", "smile", "gender_probablity"])
    model.summary()
    detector = dlib.get_frontal_face_detector()
    #image demo
    images_demo(model,
                "/home/samuel/projects/All-In-One/grpc/test/test_images/",
                detector)
    #video demo
    #webcam_demo(model,detector)
    video_demo(
        model,
        "/home/samuel/projects/All-In-One/grpc/test/test_videos/video_demo3.mp4",
        detector)
    serve()
コード例 #7
0
def main():
    parser = get_parser()
    args = parser.parse_args()
    device = torch.device(args.device)

    smpl = SMPL_Layer().to(device)

    train_parser = TrainingOptionParser()
    model_args = train_parser.load(pjoin(args.model_path, 'args.txt'))

    test_pose, test_loc = load_test_anim(args.pose_file, device)
    test_shape = torch.tensor(np.load('./eval_constant/test_shape.npy'),
                              device=device)

    topo_loader = TopologyLoader(device=device, debug=False)
    smpl_topo_begin, len_topo_smpl = topo_loader.load_smpl_group(
        './dataset/Meshes/SMPL/topology/', is_train=False)

    env_model, res_model = load_model(device,
                                      model_args,
                                      topo_loader,
                                      args.model_path,
                                      envelope_only=False)

    res_weight = []
    res_skeleton = []
    res_verts = []
    res_verts_lbs = []

    gt_skeleton = smpl.get_offset(test_shape)
    gt_verts = []

    print('Evaluating model...')
    for i in tqdm(range(test_shape.shape[0])):
        pose_ph = torch.zeros((1, 72), device=device)
        t_pose = smpl.forward(pose_ph, test_shape[[i]])[0][0]
        # t_pose = t_pose[topo_loader.v_masks[i]]
        gt_vs = smpl.forward(test_pose,
                             test_shape[[i]].expand(test_pose.shape[0], -1))[0]
        gt_vs = gt_vs[:, topo_loader.v_masks[i]]
        gt_verts.append(gt_vs)

        weight, skeleton, vs, vs_lbs, _, _ = run_single_mesh(t_pose,
                                                             smpl_topo_begin +
                                                             i,
                                                             test_pose,
                                                             env_model,
                                                             res_model,
                                                             requires_lbs=True)
        res_weight.append(weight)
        res_skeleton.append(skeleton)
        res_verts.append(vs)
        res_verts_lbs.append(vs_lbs)

    err_weight = []
    err_avg_verts = []
    err_max_verts = []
    err_lbs_verts = []
    err_j2j = []
    err_j2b = []
    err_b2b = []

    print('Aggregating error...')
    for i in tqdm(range(test_shape.shape[0])):
        mask = topo_loader.v_masks[i]
        weight_gt = smpl.weights[mask]
        err_weight.append(chamfer_weight(res_weight[i], weight_gt))

        err_vert = vert_distance(res_verts[i], gt_verts[i])
        err_lbs = vert_distance(res_verts_lbs[i], gt_verts[i])
        err_avg_verts.append(err_vert[0])
        err_max_verts.append(err_vert[1])
        err_lbs_verts.append(err_lbs[0])

        err_j2j.append(
            chamfer_j2j(res_skeleton[i], gt_skeleton[i], parent_smpl))
        err_j2b.append(
            chamfer_j2b(res_skeleton[i], gt_skeleton[i], parent_smpl))
        err_b2b.append(
            chamfer_b2b(res_skeleton[i], gt_skeleton[i], parent_smpl))

    err_weight = np.array(err_weight).mean()
    err_avg_verts = np.array(err_avg_verts).mean()
    err_max_verts = np.array(err_max_verts).mean()
    err_lbs_verts = np.array(err_lbs_verts).mean()
    err_j2j = np.array(err_j2j).mean()
    err_j2b = np.array(err_j2b).mean()
    err_b2b = np.array(err_b2b).mean()
    print('Skinning Weight L1 = %.7f' % err_weight)
    print('Vertex Mean Loss L2 = %.7f' % err_avg_verts)
    print('Vertex Max Loss L2 = %.7f' % err_max_verts)
    print('Envelope Mean Loss L2 = %.7f' % err_lbs_verts)
    print('CD-J2J = %.7f' % err_j2j)
    print('CD-J2B = %.7f' % err_j2b)
    print('CD-B2B = %.7f' % err_b2b)