class DarknetTests(unittest.TestCase):
    def setUp(self):
        rmtree(TMP_DIR, ignore_errors=True)
        self.wider = Wider(WIDER_DIR)

    def test_sanity(self):
        self.wider.export(TMP_DIR, 'darknet')
Beispiel #2
0
def create_dataset(dataset_name, data_dir):
    if not dataset_name:
        dataset_name = 'mafa'
    if dataset_name == 'widerface':
        from morghulis.widerface import Wider
        ds = Wider(data_dir)
    elif dataset_name == 'fddb':
        from morghulis.fddb import FDDB
        ds = FDDB(data_dir)
    elif dataset_name == 'afw':
        from morghulis.afw import AFW
        ds = AFW(data_dir)
    elif dataset_name == 'pascal_faces':
        from morghulis.pascal_faces import PascalFaces
        ds = PascalFaces(data_dir)
    elif dataset_name == 'mafa':
        from morghulis.mafa import Mafa
        ds = Mafa(data_dir)
    elif dataset_name == 'caltech':
        from morghulis.caltech_faces import CaltechFaces
        ds = CaltechFaces(data_dir)
    elif dataset_name == 'ufdd':
        from morghulis.ufdd import UFDD
        ds = UFDD(data_dir)
    else:
        logging.error('Invalid dataset name %s', dataset_name)
        raise ValueError('Invalid dataset name %s' % dataset_name)
    return ds
Beispiel #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        dest='dataset',
                        action='store',
                        required=True,
                        help='widerface, fddb or afw')
    parser.add_argument('--output_dir',
                        dest='output_dir',
                        action='store',
                        required=True,
                        help='')
    args = parser.parse_args()
    dataset = args.dataset
    output_dir = args.output_dir

    if dataset == 'widerface':
        from morghulis.widerface import Wider
        ds = Wider(output_dir)
    elif dataset == 'fddb':
        from morghulis.fddb import FDDB
        ds = FDDB(output_dir)
    elif dataset == 'afw':
        from morghulis.afw import AFW
        ds = AFW(output_dir)
    elif dataset == 'pascal_faces':
        from morghulis.pascal_faces import PascalFaces
        ds = PascalFaces(output_dir)
    else:
        logging.error('Invalid dataset name %s', dataset)
        raise ValueError('Invalid dataset name %s' % dataset)

    ds.download()
Beispiel #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', dest='dataset', action='store', required=True, help='widerface or fddb')
    parser.add_argument('--format', dest='format', action='store', required=True, help='darknet, tensorflow, caffe or coco')
    parser.add_argument('--data_dir', dest='data_dir', action='store', required=True, help='')
    parser.add_argument('--output_dir', dest='output_dir', action='store', required=True, help='')
    args = parser.parse_args()
    dataset = args.dataset
    _format = args.format
    data_dir = args.data_dir
    output_dir = args.output_dir

    if dataset == 'widerface':
        from morghulis.widerface import Wider
        ds = Wider(data_dir)
    elif dataset == 'fddb':
        from morghulis.fddb import FDDB
        ds = FDDB(data_dir)
    elif dataset == 'afw':
        from morghulis.afw import AFW
        ds = AFW(data_dir)
    else:
        logging.error('Invalid dataset name %s', dataset)

    ds.export(output_dir, _format)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--input',
                        dest='input',
                        action='store',
                        required=True,
                        help='input tf record containing the predictions')
    parser.add_argument('--data_dir',
                        dest='data_dir',
                        action='store',
                        required=True,
                        help='')
    parser.add_argument('--output_dir',
                        dest='output_dir',
                        action='store',
                        required=True,
                        help='')
    args = parser.parse_args()

    tfrecord = args.input
    data_dir = args.data_dir
    output_dir = args.output_dir
    ds = Wider(data_dir)
    events = ds.events()

    for i in read_detections_from(tfrecord):
        event_id = filename.split('_')[0]
        event = events[event_id]
        item_name = os.path.splitext(os.path.basename(filename))[0]
        result_filename = '{}/{}.txt'.format(event, item_name)
        target_file = os.path.join(output_dir, result_filename)
        ensure_dir(target_file)
        with open(target_file, 'w') as dest:
            dest.write('{}\n'.format(item_name))
            total = len(bboxes)
            dest.write('{}\n'.format(total))
            for bbox in bboxes:
                dest.write('{} {} {} {} {}\n'.format(*bbox))
class WiderTests(unittest.TestCase):
    def setUp(self):
        self.wider = Wider(WIDER_DIR)

    def test_train_set(self):
        train_set = [image for image in self.wider.train_set()]
        self.assertEqual(6, len(train_set))

    def test_val_set(self):
        val_set = [image for image in self.wider.val_set()]
        self.assertEqual(4, len(val_set))

    def test_faces(self):
        soldier_drilling = [
            image for image in self.wider.train_set()
            if 'Soldier_Drilling' in image.filename
        ]
        image = soldier_drilling[0]
        self.assertEqual(4, len(image.faces))

    def test_faces(self):
        press_conference = [
            image for image in self.wider.train_set()
            if 'Press_Conference' in image.filename
        ]
        image = press_conference[0]
        self.assertEqual(1, len(image.faces))
        face = image.faces[0]
        self.assertEqual(400, face.x1)
        self.assertEqual(150, face.y1)
        self.assertEqual(208, face.w)
        self.assertEqual(290, face.h)
        self.assertEqual(0, face.blur)
        self.assertEqual(1, face.expression)
        self.assertEqual(2, face.illumination)
        self.assertEqual(0, face.invalid)
        self.assertEqual(4, face.occlusion)
        self.assertEqual(5, face.pose)

    def test_image(self):
        soldier_drilling = [
            image for image in self.wider.train_set()
            if 'Soldier_Drilling' in image.filename
        ]
        image = soldier_drilling[0]
        self.assertEqual(1024, image.width)
        self.assertEqual(682, image.height)
        self.assertEqual('JPEG', image.format)

    def test_events(self):
        events = self.wider.events()
        print(events)
        self.assertEqual(6, len(events))
        self.assertEqual('26--Soldier_Drilling', events['26'])
        self.assertEqual('12--Group', events['12'])
 def setUp(self):
     rmtree(TMP_DIR, ignore_errors=True)
     self.wider = Wider(WIDER_DIR)
     self.caffeExporter = CaffeExporter(self.wider)
Beispiel #8
0
 def setUp(self):
     rmtree(TMP_DIR, ignore_errors=True)
     self.wider = Wider(WIDER_DIR)
     self.darknetExporter = DarknetExporter(self.wider)
 def setUp(self):
     self.wider = Wider(WIDER_DIR)
Beispiel #10
0
 def setUp(self):
     rmtree(TMP_DIR, ignore_errors=True)
     self.wider = Wider(WIDER_DIR)
     self.tfExporter = TensorflowExporter(self.wider)
 def setUp(self):
     rmtree(TMP_DIR, ignore_errors=True)
     self.wider = Wider(WIDER_DIR)