コード例 #1
0
ファイル: transfer.py プロジェクト: vickyliin/hand-detection
def main(args=sys.argv[1:]):
    args, tf_args, generator_args, nms_args, inference_args, transfer_args = parse_args(
        args)
    from utils import set_tf_environ
    set_tf_environ(**vars(tf_args))

    import tensorflow as tf
    import keras.backend as K
    from utils import get_session, get_name, record_hyperparameters
    from model import build_model, model_path
    K.set_session(get_session())

    import judger_hand
    from model import load_model

    imgs = inference_args.inputs or judger_hand.get_file_names()
    output = inference_args.output or judger_hand.get_output_file_object()
    model = load_model(inference_args.weights, vars(nms_args), compile=True)
    sep = ',' if inference_args.output else ' '

    name = get_name(transfer_args.__dict__.pop('name'), 'transfer')
    log_dir, model_dir = model_path(name)
    print(name)
    writer = tf.summary.FileWriter(log_dir)
    record_hyperparameters(args, writer)

    with open('%s/config.yml' % model_dir, 'w') as f:
        f.write(model.to_yaml())

    try:
        buff = inference_train(model,
                               imgs,
                               output=output,
                               sep=sep,
                               **vars(generator_args),
                               **vars(transfer_args),
                               name=name,
                               writer=writer)
    except KeyboardInterrupt:
        pass

    if not inference_args.output:
        score, err = judger_hand.judge()
        print('score', score)
        if err is not None:  # in case we failed to judge your submission
            raise Exception(err)
        return score

    return model, name
コード例 #2
0
def main(args=sys.argv[1:]):
    tf_args, generator_args, nms_args, inference_args = parse_args(args)[1:]
    from utils import set_tf_environ
    set_tf_environ(**vars(tf_args))

    import judger_hand
    from model import load_model
    imgs = inference_args.inputs or judger_hand.get_file_names()
    f = inference_args.output or judger_hand.get_output_file_object()
    input_generator = make_generator(imgs, **vars(generator_args))
    model = load_model(inference_args.weights, vars(nms_args))
    sep = ',' if inference_args.output else ' '
    base_dir = os.path.dirname(f.name) if inference_args.output else None
    inference(model, input_generator, f, sep, base_dir)

    if not inference_args.output:
        score, err = judger_hand.judge()
        print('score', score)
        if err is not None:  # in case we failed to judge your submission
            raise Exception(err)
        return score

    else:
        return f
コード例 #3
0
    candidate_box = sorted(candidate_box, key=lambda x: -x[0])[:2]
    bbox = {'bbox': {}}
    for _, box in candidate_box:
        coor = [box[0], box[1], box[0] + box[2], box[1] + box[3]]
        if box[0] + box[2] > 0.6 * img.shape[1]:
            bbox['bbox']['R'] = coor
        else:
            bbox['bbox']['L'] = coor
    print(img.shape)
    print(candidate_box)
    print(bbox)
    return bbox, img


test_files = judger_hand.get_file_names()
output_f = judger_hand.get_output_file_object()
bbox_ = {'bbox': {}}
for fname in test_files:
    img = skimage.io.imread(fname)
    bbox, img = find_hand_by_color(img)
    if not bbox['bbox']:
        bbox = bbox_
    for hand, box in bbox['bbox'].items():
        hand = 0 if hand == 'L' else 1
        out = '%s %d %d %d %d %d 1.0 \n' % (fname, box[0], box[1], box[2],
                                            box[3], hand)
        print(out)
        output_f.write(out.encode())
    bbox_ = bbox
judger_hand.judge()
コード例 #4
0

# ## Loading label map

# In[ ]:


label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)


# In[ ]:


imgs = judger_hand.get_file_names()


# In[ ]:


IMAGE_SIZE = (12, 8)


# In[ ]:


def read_image_by_filename(img_path):
    img = Image.open(img_path)
    return np.array(img)
コード例 #5
0
    x = Dropout(drop_out_ratio)(x)
    x = Dense(256, activation='relu')(x)
    x = Dense(64, activation='relu')(x)
    Output = Dense(8, activation='linear')(x)
    model = Model(cnn.input, Output)

    return model


exist_model = build_exist_model()
exist_model.load_weights(exist_model_path)

bbx_model = build_bbx_model()
bbx_model.load_weights(bbx_model_path)

img_paths = judger_hand.get_file_names()
f = judger_hand.get_output_file_object()

for i in range(len(img_paths)):

    img_path = img_paths[i]
    img_dump_path = os.path.join(test_output_img_path, str(i) + '_bbox.png')
    lab_dump_path = os.path.join(test_output_label_path,
                                 str(i) + '_label.json')

    answers = inference(exist_model, bbx_model, img_path, img_dump_path,
                        lab_dump_path)

    for j in range(len(answers)):
        box = answers[j]
        to_write = '%s %d %d %d %d %d %f\n' % (img_path, box[0], box[1],
コード例 #6
0
    else:
        LOCAL = False

    if LOCAL:
        print("LOCAL")
        TOTAL_TEST_IMAGE_PATHS = []
        for path in args.infer_data:
            TOTAL_TEST_IMAGE_PATHS += [
                os.path.join(path, filename) for idx, filename in enumerate(os.listdir(path)) if idx % args.step == 0]
        print(len(TOTAL_TEST_IMAGE_PATHS))

        # shuffle(TOTAL_TEST_IMAGE_PATHS)

        for chunk in [TOTAL_TEST_IMAGE_PATHS[x:x + chunk_size] for x in
                      range(0, len(TOTAL_TEST_IMAGE_PATHS), chunk_size)]:
            TEST_IMAGE_PATHS = chunk
            pipeline(True)
            tf.reset_default_graph()
            K.clear_session()

    else:
        print("Not LOCAL")

        try:

            TEST_IMAGE_PATHS = judger_hand.get_file_names()
            print("Total image:", len(TEST_IMAGE_PATHS))
            pipeline(False)
        except ImportError:
            print("You need to install judger_hand")
コード例 #7
0
def _main_(args):

    config_path = args.conf
    weights_path = args.weights
    image_path = args.input

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    ###############################
    #   Make the model
    ###############################

    yolo = YOLO(architecture=config['model']['architecture'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    ###############################
    #   Load trained weights
    ###############################

    print(weights_path)
    yolo.load_weights(weights_path)

    ###############################
    #   Predict bounding boxes
    ###############################

    #pairs = read_data.getRealTestPairs()
    imgs = judger_hand.get_file_names()
    #imgs = ['data/DeepQ-Vivepaper/data/air/img/img_00000.png', 'data/DeepQ-Vivepaper/data/air/img/img_00001.png']
    pairs = [[img, ''] for img in imgs]
    hand_labels = predict_hands.predictHands_prod(
        pairs)  # 0: left, 1: right, 2: both
    f = judger_hand.get_output_file_object()
    #f = open('test_output.txt', 'wb')
    for index, pair in enumerate(pairs):
        hand_label = hand_labels[index]
        image_path = pair[0]
        if image_path[-4:] == '.mp4':
            video_out = image_path[:-4] + '_detected' + image_path[-4:]

            video_reader = cv2.VideoCapture(image_path)

            nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
            frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
            frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

            video_writer = cv2.VideoWriter(video_out,
                                           cv2.VideoWriter_fourcc(*'MPEG'),
                                           50.0, (frame_w, frame_h))

            for i in tqdm(range(nb_frames)):
                _, image = video_reader.read()

                boxes = yolo.predict(image)
                image = draw_boxes(image, boxes, config['model']['labels'])

                video_writer.write(np.uint8(image))

            video_reader.release()
            video_writer.release()
        else:
            #image = cv2.imread(image_path)
            #boxes = yolo.predict(image, hand_label)
            #image = draw_boxes(image, boxes, config['model']['labels'])

            image = cv2.imread(image_path)
            boxes = yolo.predict(image, hand_label)
            image = draw_boxes(image, boxes, config['model']['labels'])
            for box in boxes:
                x0 = int((box.x - box.w / 2) * image.shape[1])
                x1 = int((box.x + box.w / 2) * image.shape[1])
                y0 = int((box.y - box.h / 2) * image.shape[0])
                y1 = int((box.y + box.h / 2) * image.shape[0])
                x0 = np.clip(x0, 0, image.shape[1])
                x1 = np.clip(x1, 0, image.shape[1])
                y0 = np.clip(y0, 0, image.shape[0])
                y1 = np.clip(y1, 0, image.shape[0])
                #for box in boxes:
                #f.write(str.encode('%s %d %d %d %d %d %f\n' % (image_path, x0, y0, x1, y1, box.get_label(), box.get_score())))
                #f.write('%s %d %d %d %d %d %f\n' % (image_path, x0, y0, x1, y1, box.get_label(), box.get_score()))
                f.write((image_path + ' ' + str(x0) + ' ' + str(y0) + ' ' +
                         str(x1) + ' ' + str(y1) + ' ' + str(box.get_label()) +
                         ' ' + str(box.get_score()) + '\n').encode('ascii'))
    score, err = judger_hand.judge()
    print('score:', score)
    print('err:', err)