root_path = os.path.abspath('..')
if root_path not in sys.path:
    sys.path.append(root_path)


import config
from faces_loader import load_faces, build_dataset
from ssd_utils import output_encoder
from ssd_utils.metrics import MeanAveragePrecision
from utils import import_by_name, MeanAveragePrecisionCallback

test_image_paths, test_bnd_boxes = load_faces(split='test')

test_data = build_dataset(test_image_paths, test_bnd_boxes,
                          image_size=config.IMAGE_SIZE,
                          batch_size=config.BATCH_SIZE)

meanAP_metric = MeanAveragePrecision()
results = {'architecture': [],
           'train_samples': [],
           'train_type': []}

for run in range(1, config.NUM_RUNS+1):
    results['run_{}'.format(run)] = []

for architecture in config.ARCHITECTURES:
    model_class = import_by_name('ssd_utils.networks.' + architecture)
    model = model_class(num_classes=len(config.CLASSES))
    anchors = model.get_anchors(image_shape=config.IMAGE_SIZE + (3,))
root_path = os.path.abspath(os.path.join('..'))
if root_path not in sys.path:
    sys.path.append(root_path)

import config
from faces_loader import load_faces, build_dataset
from ssd_utils.ssd_loss import SSDLoss
from utils import import_by_name, train_test_split_tensors, MeanAveragePrecisionCallback

# Load train and validation data
train_image_paths, train_bnd_boxes = load_faces(split='train')
valid_image_paths, valid_bnd_boxes = load_faces(split='valid')

valid_data = build_dataset(valid_image_paths, valid_bnd_boxes,
                           image_size=config.IMAGE_SIZE,
                           batch_size=config.BATCH_SIZE)

for run in range(1, config.NUM_RUNS+1):
    weights_dir = 'weights_{}'.format(run)
    history_dir = 'history_{}'.format(run)

    os.makedirs(weights_dir, exist_ok=True)
    os.makedirs(history_dir, exist_ok=True)

    for architecture in config.ARCHITECTURES:
        model_class = import_by_name('ssd_utils.networks.' + architecture)

        model_name = architecture.lower() + '_pretrained'
        model_file = model_name + '.h5'
        model_path = os.path.join(weights_dir, model_file)
Beispiel #3
0
    sys.path.append(root_path)

import config_pretraining_ablation as config
from faces_loader import load_faces, build_dataset
from ssd_utils.ssd_loss import SSDLoss
from utils import import_by_name, train_test_split_tensors, MeanAveragePrecisionCallback

# Load train and validation data
train_image_paths, train_bnd_boxes = load_faces(split='train')
valid_image_paths, valid_bnd_boxes = load_faces(split='valid')
fake_image_paths, fake_bnd_boxes = load_faces(root=os.path.join(
    '..', 'data', 'faces_fake'),
                                              split='fake')

valid_data = build_dataset(valid_image_paths,
                           valid_bnd_boxes,
                           image_size=config.IMAGE_SIZE,
                           batch_size=config.BATCH_SIZE)

for run in range(1, config.NUM_RUNS + 1):
    weights_dir = 'weights_pretraining_ablation_{}'.format(run)
    history_dir = 'history_pretraining_ablation_{}'.format(run)

    os.makedirs(weights_dir, exist_ok=True)
    os.makedirs(history_dir, exist_ok=True)

    for architecture in config.ARCHITECTURES:
        model_class = import_by_name('ssd_utils.networks.' + architecture)

        for prop_fake_samples in config.PROP_FAKE_SAMPLES:
            num_fake_samples = int(prop_fake_samples * config.NUM_REAL_SAMPLES)
            print('\n\nINFO: Considering {} real and {} fake samples'.format(