コード例 #1
0
def pipeline(vis=False):
    resized_image_list, box_list, filename_list, image_list, score_list = infer_phase_1(vis)
    classes_list, new_score_list = infer_phase_2(np.array(resized_image_list))

    print(len(resized_image_list))

    if not LOCAL:
        ans_writer = judger_hand.get_output_file_object()

        for i, resized_image in enumerate(resized_image_list):
            box = box_list[i]
            score_ = new_score_list[i][classes_list[i]]
            class_ = classes_list[i]
            str_ = '%s %d %d %d %d %d %f\n' % (
                filename_list[i], int(box[0]), int(box[1]), int(box[2]), int(box[3]), class_,
                score_list[i])
            ans_writer.write(str_.encode())

        # for i, image in enumerate(image_list):
        #     print(i, filename_list[i])
        #     filename = filename_list[i].replace(os.path.sep, '/').split("/")[-1]
        #     box = box_list[i]
        #     score_ = new_score_list[i][classes_list[i]]
        #
        #     class_ = "L" if classes_list[i] == 0 else "R"
        #
        #     draw_bounding_box_on_image(image, int(box[1]), int(box[0]), int(box[3]), int(box[2]),
        #                                color=CLASS_TO_COLOR[class_], use_normalized_coordinates=False,
        #                                display_str_list=[class_ + ": {0:.3f}, {1:.3f}".format(score_list[i], score_)])
        #     skimage.io.imsave("./outputs/{}.jpg".format(filename), image)

        score, err = judger_hand.judge()
        if err is not None:  # in case we failed to judge your submission
            print(err)
        else:
            print("score", score)
    else:
        if not os.path.isdir("./outputs"):
            os.mkdir("./outputs")

        for i, image in enumerate(image_list):
            filename = filename_list[i].replace(os.path.sep, '/').split("/")[-1]
            box = box_list[i]
            score_ = new_score_list[i][classes_list[i]]

            class_ = "L" if classes_list[i] == 0 else "R"

            draw_bounding_box_on_image(image, int(box[1]), int(box[0]), int(box[3]), int(box[2]),
                                       color=CLASS_TO_COLOR[class_], use_normalized_coordinates=False,
                                       display_str_list=[class_ + ": {0:.3f}, {1:.3f}".format(score_list[i], score_)])
            skimage.io.imsave("./outputs/{}.jpg".format(filename), image)
コード例 #2
0
ファイル: transfer.py プロジェクト: vickyliin/hand-detection
def main(args=sys.argv[1:]):
    args, tf_args, generator_args, nms_args, inference_args, transfer_args = parse_args(
        args)
    from utils import set_tf_environ
    set_tf_environ(**vars(tf_args))

    import tensorflow as tf
    import keras.backend as K
    from utils import get_session, get_name, record_hyperparameters
    from model import build_model, model_path
    K.set_session(get_session())

    import judger_hand
    from model import load_model

    imgs = inference_args.inputs or judger_hand.get_file_names()
    output = inference_args.output or judger_hand.get_output_file_object()
    model = load_model(inference_args.weights, vars(nms_args), compile=True)
    sep = ',' if inference_args.output else ' '

    name = get_name(transfer_args.__dict__.pop('name'), 'transfer')
    log_dir, model_dir = model_path(name)
    print(name)
    writer = tf.summary.FileWriter(log_dir)
    record_hyperparameters(args, writer)

    with open('%s/config.yml' % model_dir, 'w') as f:
        f.write(model.to_yaml())

    try:
        buff = inference_train(model,
                               imgs,
                               output=output,
                               sep=sep,
                               **vars(generator_args),
                               **vars(transfer_args),
                               name=name,
                               writer=writer)
    except KeyboardInterrupt:
        pass

    if not inference_args.output:
        score, err = judger_hand.judge()
        print('score', score)
        if err is not None:  # in case we failed to judge your submission
            raise Exception(err)
        return score

    return model, name
コード例 #3
0
def main(args=sys.argv[1:]):
    tf_args, generator_args, nms_args, inference_args = parse_args(args)[1:]
    from utils import set_tf_environ
    set_tf_environ(**vars(tf_args))

    import judger_hand
    from model import load_model
    imgs = inference_args.inputs or judger_hand.get_file_names()
    f = inference_args.output or judger_hand.get_output_file_object()
    input_generator = make_generator(imgs, **vars(generator_args))
    model = load_model(inference_args.weights, vars(nms_args))
    sep = ',' if inference_args.output else ' '
    base_dir = os.path.dirname(f.name) if inference_args.output else None
    inference(model, input_generator, f, sep, base_dir)

    if not inference_args.output:
        score, err = judger_hand.judge()
        print('score', score)
        if err is not None:  # in case we failed to judge your submission
            raise Exception(err)
        return score

    else:
        return f
コード例 #4
0
test_files = judger_hand.get_file_names()
output_f = judger_hand.get_output_file_object()
bbox_ = {'bbox': {}}
for fname in test_files:
    img = skimage.io.imread(fname)
    bbox, img = find_hand_by_color(img)
    if not bbox['bbox']:
        bbox = bbox_
    for hand, box in bbox['bbox'].items():
        hand = 0 if hand == 'L' else 1
        out = '%s %d %d %d %d %d 1.0 \n' % (fname, box[0], box[1], box[2],
                                            box[3], hand)
        print(out)
        output_f.write(out.encode())
    bbox_ = bbox
judger_hand.judge()
""" For demo use
# Camera
camera = cv2.VideoCapture(0)

while(1):
    # Capture frame from camera
    ret, frame = camera.read()
    frame = cv2.bilateralFilter(frame,5,50,100)
    bbox, frame = find_hand_by_color(frame, color_range=[[0, 140, 80], [255,180,128]], height=300)
    
    cv2.imshow('Hand Detection',frame)
    interrupt=cv2.waitKey(10)
"""
コード例 #5
0
            (boxes, scores, classes, num) = sess.run(
                [detection_boxes, detection_scores, detection_classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})
            
            """
            vis_util.visualize_boxes_and_labels_on_image_array(
                image_np,
                np.squeeze(boxes),
                np.squeeze(classes).astype(np.int32),
                np.squeeze(scores),
                category_index,
                use_normalized_coordinates=True,
                line_thickness=8)
            plt.figure(figsize=IMAGE_SIZE)
            plt.imshow(image_np)
            plt.show()
            """

            for box, score, cls in zip(boxes[0][:], scores[0][:], classes[0][:]):
                x0 = int(box[1] * image.width)
                x1 = int(box[3] * image.width)
                y0 = int(box[0] * image.height)
                y1 = int(box[2] * image.height)
                result = '{} {} {} {} {} {} {}\n'.format(img, x0, y0, x1, y1, int(cls - 1), score)
                result = result.encode('utf-8')
                f.write(result)
        score, err = judger_hand.judge()
        if err is not None:  # in case we failed to judge your submission
            print(err)

コード例 #6
0
def _main_(args):

    config_path = args.conf
    weights_path = args.weights
    image_path = args.input

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    ###############################
    #   Make the model
    ###############################

    yolo = YOLO(architecture=config['model']['architecture'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    ###############################
    #   Load trained weights
    ###############################

    print(weights_path)
    yolo.load_weights(weights_path)

    ###############################
    #   Predict bounding boxes
    ###############################

    #pairs = read_data.getRealTestPairs()
    imgs = judger_hand.get_file_names()
    #imgs = ['data/DeepQ-Vivepaper/data/air/img/img_00000.png', 'data/DeepQ-Vivepaper/data/air/img/img_00001.png']
    pairs = [[img, ''] for img in imgs]
    hand_labels = predict_hands.predictHands_prod(
        pairs)  # 0: left, 1: right, 2: both
    f = judger_hand.get_output_file_object()
    #f = open('test_output.txt', 'wb')
    for index, pair in enumerate(pairs):
        hand_label = hand_labels[index]
        image_path = pair[0]
        if image_path[-4:] == '.mp4':
            video_out = image_path[:-4] + '_detected' + image_path[-4:]

            video_reader = cv2.VideoCapture(image_path)

            nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
            frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
            frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

            video_writer = cv2.VideoWriter(video_out,
                                           cv2.VideoWriter_fourcc(*'MPEG'),
                                           50.0, (frame_w, frame_h))

            for i in tqdm(range(nb_frames)):
                _, image = video_reader.read()

                boxes = yolo.predict(image)
                image = draw_boxes(image, boxes, config['model']['labels'])

                video_writer.write(np.uint8(image))

            video_reader.release()
            video_writer.release()
        else:
            #image = cv2.imread(image_path)
            #boxes = yolo.predict(image, hand_label)
            #image = draw_boxes(image, boxes, config['model']['labels'])

            image = cv2.imread(image_path)
            boxes = yolo.predict(image, hand_label)
            image = draw_boxes(image, boxes, config['model']['labels'])
            for box in boxes:
                x0 = int((box.x - box.w / 2) * image.shape[1])
                x1 = int((box.x + box.w / 2) * image.shape[1])
                y0 = int((box.y - box.h / 2) * image.shape[0])
                y1 = int((box.y + box.h / 2) * image.shape[0])
                x0 = np.clip(x0, 0, image.shape[1])
                x1 = np.clip(x1, 0, image.shape[1])
                y0 = np.clip(y0, 0, image.shape[0])
                y1 = np.clip(y1, 0, image.shape[0])
                #for box in boxes:
                #f.write(str.encode('%s %d %d %d %d %d %f\n' % (image_path, x0, y0, x1, y1, box.get_label(), box.get_score())))
                #f.write('%s %d %d %d %d %d %f\n' % (image_path, x0, y0, x1, y1, box.get_label(), box.get_score()))
                f.write((image_path + ' ' + str(x0) + ' ' + str(y0) + ' ' +
                         str(x1) + ' ' + str(y1) + ' ' + str(box.get_label()) +
                         ' ' + str(box.get_score()) + '\n').encode('ascii'))
    score, err = judger_hand.judge()
    print('score:', score)
    print('err:', err)