Exemple #1
0
def test():
    os.system('rm -rf tmp/pos/*')
    os.system('rm -rf tmp/neg/*')
    os.system('rm -rf tmp/part/*')

    logger.info('Load WIDER')
    train_data, val_data = load_wider()
    print("TRAIN DATA: ", len(train_data))
    img_path, bboxes = train_data[np.random.choice(len(train_data))]
    bboxes = np.asarray(bboxes)
    #pdb.set_trace()
    img = cv2.imread(img_path, cv2.IMREAD_COLOR)
    detector = JfdaDetector(cfg.PROPOSAL_NETS['r'])
    #print("HERE??>> 529")

    negatives, positives, part = proposal(img, bboxes, detector)
    logger.info('%d gt_bboxes', len(bboxes))
    logger.info('%d negatives, %d positives, %d part', len(negatives),
                len(positives), len(part))
    for i, (data[1], bbox_target) in enumerate(positives):
        cv2.imwrite(longPath + 'tmp/pos/%03d.png' % i, data)
    for i, (data) in enumerate(negatives):
        cv2.imwrite(longPath + 'tmp/neg/%03d.png' % i, data[0])
        #pdb.set_trace()
    for i, (data[1], bbox_target) in enumerate(part):
        cv2.imwrite('tmp/part/%03d.png' % i, data[0])
    cv2.imwrite('tmp/test.png', img)
Exemple #2
0
def gen_wider():
    logger.info('loading WIDER')
    train_data, val_data = load_wider()
    #train_data, val_data = load_scutbrainwashcheat()
    #train_data, val_data = load_cheat()
    logger.info('total images, train: %d, val: %d', len(train_data),
                len(val_data))
    train_faces = functools.reduce(lambda acc, x: acc + len(x[1]), train_data,
                                   0)
    val_faces = functools.reduce(lambda acc, x: acc + len(x[1]), val_data, 0)
    logger.info('total faces, train: %d, val: %d', train_faces, val_faces)

    def gen(data, db_names):

        for db_name in db_names:
            remove_if_exists(db_name)
        logger.info('fill queues')
        q_in = [multiprocessing.Queue() for i in range(cfg.WORKER_N)]
        q_out = multiprocessing.Queue(1024)
        fill_queues(data, q_in)

        readers = [multiprocessing.Process(target=wider_reader_func, args=(q_in[i], q_out)) \
                   for i in range(cfg.WORKER_N)]
        #pdb.set_trace()
        for p in readers:
            p.start()
        writer = multiprocessing.Process(target=wider_writer_func,
                                         args=(q_out, db_names))
        writer.start()
        for p in readers:
            p.join()

        time.sleep(1)
        q_out.put(('finish', []))
        writer.join()

    logger.info('writing train data, %d images', len(train_data))
    db_names = [
        'data/%snet_positive_train' % cfg.NET_TYPE,
        'data/%snet_negative_train' % cfg.NET_TYPE,
        'data/%snet_part_train' % cfg.NET_TYPE
    ]
    gen(train_data, db_names)
    logger.info('writing val data, %d images', len(val_data))
    db_names = [
        'data/%snet_positive_val' % cfg.NET_TYPE,
        'data/%snet_negative_val' % cfg.NET_TYPE,
        'data/%snet_part_val' % cfg.NET_TYPE
    ]
    gen(val_data, db_names)