def main(dataset, net_config, img_h, img_w, _run):
    # Add all of the config into the helper class
    for key in net_config:
        setattr(a, key, net_config[key])

    setattr(a, 'EXP_OUT', EXP_OUT)
    setattr(a, 'RUN_id', _run._id)

    output_dir = create_directories(_run._id, ex)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.99)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        # load the dataset class
        data = get_dataset(dataset['name'])
        data = data(img_h=img_h, img_w=img_w, **dataset)
        cGAN_model = get_model('cascGAN')
        if a.checkpoint is not None:
            ckp = os.path.join(a.EXP_OUT, str(a.checkpoint))
        else:
            ckp = None
        trainFlag = (a.mode == 'train')
        model = cGAN_model(sess,
                           dataset_name=dataset['name'],
                           image_size=img_h,
                           checkpoint_dir=output_dir,
                           data=data,
                           data_desc=data.get_data_description(),
                           is_training=trainFlag,
                           checkpoint=ckp,
                           vgg_checkpoint=net_config['vgg_checkpoint'])

        if a.mode == 'train':
            model.train(a)
        else:
            model.transformDatasets(a, data)
Exemplo n.º 2
0
def evaluate_temperature_scaling(experiments, temperatures):
    for exp_id in experiments:
        exp = ExperimentData(exp_id)
        dataset = exp.get_record()['info']['dataset']
        data = get_dataset(dataset['name'])
        data_description = list(data.get_data_description())
        if exp.get_record()['config']['method'] == 'new_class':
            data_description[2] += 1
        model = get_model(exp.get_record()['config']['modelname'])

        # get the label_flip specs
        label_flip = None
        if 'label_flip' in dataset['augmentation']:
            label_flip = dataset['augmentation']['label_flip']

        def evaluation(parameters):
            with model(data_description=data_description, **parameters) as net:
                import_weights_into_network(net, exp_id)
                return measure_metrics(
                    net,
                    data(**dataset).get_testset(),
                    exp.get_record()['config']['uncertainty_metrics'],
                    label_flip=label_flip)

        result = grid_search(evaluation, {'temperature_scaling': temperatures},
                             exp.get_record()['config']['net_config'])
        info = exp.get_record()['info']
        info['temperature_grid_search'] = force_bson_encodeable(result)
        exp.update_record({'info': info})
Exemplo n.º 3
0
def main(modelname, net_config, evaluation_data, starting_weights, _run):
    """Load weigths from training experiments and evaluate network against specified
    data."""
    model = get_model(modelname)
    with model(**net_config) as net:
        import_weights_into_network(net, starting_weights)
        measurements, confusion_matrix = evaluate(net, evaluation_data)
        _run.info['measurements'] = measurements
        _run.info['confusion_matrix'] = confusion_matrix
Exemplo n.º 4
0
def all_synthia(modelname, net_config, evaluation_data, starting_weights,
                _run):
    """Load weigths from training experiments and evaluate network against specified
    data."""
    model = get_model(modelname)
    with model(**net_config) as net:
        import_weights_into_network(net, starting_weights)
        measurements = evaluate_on_all_synthia_seqs(net, evaluation_data)
        _run.info['measurements'] = measurements
Exemplo n.º 5
0
def train_ambiguous(modelname, net_config, dataset, starting_weights, method,
                    num_iterations, uncertainty_metrics, _run):
    # Set up the directories for diagnostics
    output_dir = create_directories(_run._id, ex)

    data = get_dataset(dataset['name'])
    data_description = list(data.get_data_description())
    num_classes = data_description[2]

    args = False
    if isinstance(method, list):
        args = method[1:]
        method = method[0]
    # augmentate the class labels
    if method == 'flip_classes':
        # randomly map two classes onto each other to make them ambiguos
        classes = np.random.choice(list(range(num_classes)),
                                   size=2,
                                   replace=False)
        dataset.setdefault('augmentation',
                           {})['label_flip'] = (classes[0], classes[1],
                                                np.random.rand())
    elif method == 'new_class':
        # randomly label a given class as a new, nonexisting class
        data_description[2] = num_classes + 1
        if args:
            old_class = args[0]
            print(args)
        else:
            old_class = np.random.choice(list(range(num_classes)))
        dataset.setdefault('augmentation',
                           {})['label_flip'] = (old_class, num_classes,
                                                np.random.rand())
    elif method == 'merge':
        # randomly merge two classes together
        classes = np.random.choice(list(range(num_classes)),
                                   size=2,
                                   replace=False)
        dataset.setdefault('augmentation',
                           {})['label_merge'] = (classes[0], classes[1])
    _run.info.setdefault('dataset', {}).update(dataset)

    model = get_model(modelname)
    with model(data_description=data_description,
               output_dir=output_dir,
               **net_config) as net:
        data = data(**dataset)
        train_network(net, output_dir, data, num_iterations, starting_weights,
                      ex)
        _run.info['measurements'] = measure_metrics(
            net,
            data.get_testset(),
            uncertainty_metrics,
            label_flip=dataset['augmentation'].get('label_flip', None))
        net.close()
    print(threading.enumerate())
Exemplo n.º 6
0
def measure(modelname, net_config, dataset, starting_weights,
            uncertainty_metrics, _run):
    model = get_model(modelname)
    data = get_dataset(dataset['name'])
    data_description = list(data.get_data_description())
    if 'num_classes' in dataset:
        data_description[2] = dataset['num_classes']
    with model(data_description=data_description, **net_config) as net:
        data = data(**dataset)
        import_weights_into_network(net, starting_weights)
        _run.info['measurements'] = measure_metrics(net, data.get_testset(),
                                                    uncertainty_metrics)
Exemplo n.º 7
0
def main(starting_weights, modelname, net_config, evaluation_data,
         search_parameters, _run):
    model = get_model(modelname)

    def evaluation(parameters):
        with model(**parameters) as net:
            import_weights_into_network(net, starting_weights)
            measurements, _ = evaluate(net, evaluation_data)
        return measurements

    _run.info['results'] = grid_search(evaluation, search_parameters,
                                       net_config)
def main(dataset, net_config, _run):
    # Add all of the config into the helper class
    for key in net_config:
        setattr(a, key, net_config[key])

    setattr(a, 'EXP_OUT', EXP_OUT)
    setattr(a, 'RUN_id', _run._id)

    output_dir = create_directories(_run._id, ex)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        data = get_dataset(dataset['name'])
        data = data(dataset['image_input_dir'], **dataset)
        data_id = dataset['image_input_dir'].split('/')[-1].split('_')[0]
        setattr(a, 'DATA_id', data_id)
        disc_model = get_model('simDisc')
        ckp = None
        if net_config['checkpoint'] is not None:
            ckp = os.path.join(a.EXP_OUT, str(net_config['checkpoint']))
        if net_config['feature_extractor'] is not None:
            fe_ckp = os.path.join(a.EXP_OUT,
                                  str(net_config['feature_extractor']))

        model = disc_model(sess=sess,
                           image_size=a.input_image_size,
                           batch_size=a.batch_size,
                           df_dim=a.ndf,
                           input_c_dim=3,
                           checkpoint_dir=output_dir,
                           data=data,
                           momentum=a.batch_momentum,
                           arch=net_config['arch'],
                           checkpoint=ckp,
                           feature_extractor=fe_ckp)
        if a.mode == "train":
            tmp = model.train(a)
            _run.info['predictions'] = tmp
            _run.info['mean_predictions'] = np.mean(tmp, axis=0)
        elif a.mode == "predict":
            input_list = glob.glob(os.path.join(a.predict_dir, "target_*.png"))
            synth_list = glob.glob(os.path.join(a.predict_dir, "synth_*.png"))
            segm_list = glob.glob(os.path.join(a.predict_dir, "input_*.png"))

            input_list.sort(
                key=lambda x: int(x.partition('_')[-1].partition('.')[0]))
            synth_list.sort(
                key=lambda x: int(x.partition('_')[-1].partition('.')[0]))
            segm_list.sort(
                key=lambda x: int(x.partition('_')[-1].partition('.')[0]))

            model.predict(a, input_list, synth_list, segm_list)
Exemplo n.º 9
0
def uncertainty_benchmark(modelname, net_config, dataset, starting_weights,
                          benchmark, uncertainty_metrics, _run):
    model = get_model(modelname)
    data = get_dataset(dataset['name'])
    with model(data_description=data.get_data_description(),
               **net_config) as net:
        data = data(**dataset)
        import_weights_into_network(net, starting_weights)
        for metric in uncertainty_metrics:
            measurements = evaluate_uncertainty(net,
                                                data.get_testset(),
                                                metric,
                                                benchmark=benchmark)
            _run.info.setdefault('measurements', {})[metric] = measurements
Exemplo n.º 10
0
def main(modelname, dataset, net_config, _run):
    # Set up the directories for diagnostics
    output_dir = create_directories(_run._id, ex)

    # load the dataset class, but don't instantiate it
    data = get_dataset(dataset['name'])

    # create the network
    model = get_model(modelname)
    with model(data_description=data.get_data_description(),
               output_dir=output_dir,
               **net_config) as net:
        # now we can load the dataset inside the scope of the network graph
        data = data(**dataset)
        train_and_evaluate(net, output_dir, data)
Exemplo n.º 11
0
def fit_and_evaluate(net_config, evaluation_data, starting_weights, _run):
    """Load weigths from training experiments and evalaute fusion against specified
    data."""
    dataset = get_dataset(evaluation_data['dataset'])

    # evaluate individual experts
    model = get_model(net_config['expert_model'])
    confusion_matrices = {}
    for expert in net_config['num_channels']:
        model_config = deepcopy(net_config)
        model_config['modality'] = expert
        model_config['prefix'] = net_config['prefixes'][expert]
        with model(data_description=dataset.get_data_description(),
                   **model_config) as net:
            data = dataset(**evaluation_data)
            import_weights_into_network(
                net, starting_weights[model_config['prefix']])
            m, conf_mat = net.score(data.get_measureset())
            confusion_matrices[expert] = conf_mat
            print('Evaluated network {} on {} measurement set:'.format(
                expert, evaluation_data['dataset']))
            print("INFO now getting test results")
            m, _ = net.score(data.get_testset())
            print('total accuracy {:.3f} IoU {:.3f}'.format(
                m['total_accuracy'], m['mean_IoU']))
        _run.info.setdefault('measurements', {}).setdefault(expert, m)
    _run.info['confusion_matrices'] = confusion_matrices

    # now evaluate bayes mix
    with BayesFusion(data_description=dataset.get_data_description(),
                     confusion_matrices=confusion_matrices,
                     **net_config) as net:
        data = dataset(**evaluation_data)
        import_weights_into_network(net, starting_weights)
        measurements, confusion_matrix = net.score(data.get_testset())
        _run.info['measurements']['fusion'] = measurements
        _run.info['confusion_matrix'] = confusion_matrix

    print('Evaluated Bayes Fusion on {} data:'.format(
        evaluation_data['dataset']))
    print('total accuracy {:.3f} IoU {:.3f}'.format(
        measurements['total_accuracy'], measurements['mean_IoU']))

    # There seems to be a problem with capturing the print output, flush to be sure
    stdout.flush()
Exemplo n.º 12
0
def main(modelname, net_config, dataset, starting_weights, input_folder, _run):
    # Set up the directories for diagnostics
    output_dir = create_directories(_run._id, ex)

    # load the data for the data description
    data_desc = get_dataset(dataset['name'])

    # load images in input_folder
    eval_image_paths = load_list_path(input_folder)

    # create the network
    model = get_model(modelname)
    with model(data_description=data_desc.get_data_description(),
               output_dir=output_dir,
               **net_config) as net:
        net.import_weights(filepath=starting_weights)
        print("INFO: Imported weights succesfully")
        predict_output(net, output_dir, eval_image_paths, data_desc)
Exemplo n.º 13
0
def evaluate(net_config, evaluation_data, modelname, starting_weights, _run):
    """Load weigths from training experiments and evalaute fusion against specified
    data."""
    data = get_dataset(evaluation_data['dataset'])

    model = get_model(modelname)
    # now evaluate average mix
    with model(data_description=data.get_data_description(),
               **net_config) as net:
        data = data(**evaluation_data)
        import_weights_into_network(net, starting_weights)
        measurements, confusion_matrix = net.score(data.get_set_data(test_set))
        _run.info['measurements'] = measurements
        _run.info['confusion_matrix'] = confusion_matrix

    print('Evaluated on {} data:'.format(evaluation_data['dataset']))
    print('total accuracy {:.3f} IoU {:.3f}'.format(
        measurements['total_accuracy'], measurements['mean_IoU']))

    # There seems to be a problem with capturing the print output, flush to be sure
    stdout.flush()
def main(dataset, net_config, _run):
    # Add all of the config into the helper class
    for key in net_config:
        setattr(a, key, net_config[key])

    setattr(a,'EXP_OUT',EXP_OUT)
    setattr(a,'RUN_id',_run._id)

    output_dir = create_directories(_run._id, ex)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        # load the dataset class
        data = get_dataset(dataset['name'])
        data = data(**dataset)
        cGAN_model = get_model('cGAN')
        if a.checkpoint is not None:
            ckp = os.path.join(a.EXP_OUT,str(a.checkpoint))
        else:
            ckp = None
        model = cGAN_model(sess, image_size=a.input_image_size, batch_size=a.batch_size,
                        output_size=a.input_image_size, dataset_name=dataset['name'],
                        checkpoint_dir=output_dir, data=data,
                        data_desc=data.get_data_description(), momentum=a.batch_momentum,
                        feature_matching=net_config['feature_matching'],
                        L1_lambda=float(a.l1_weight/a.gan_weight), gf_dim=a.ngf,
                        df_dim=a.ndf, use_grayscale=net_config['use_grayscale'],
                        noise_std_dev=a.noise_std_dev,
                        checkpoint=ckp, gen_type=net_config['type'])

        if a.mode == 'train':
            tmp = model.train(a)
            _run.info['predictions'] = tmp
            _run.info['mean_predictions'] = np.mean(tmp, axis=0)
        elif a.mode == 'valid':
            tmp = model.validate(a)
            _run.info['predictions'] = tmp
            _run.info['mean_predictions'] = np.mean(tmp, axis=0)
        else:
            model.transformDatasets(a)
Exemplo n.º 15
0
def main(dataset, net_config, _run):
    # Add all of the config into the helper class
    for key in net_config:
        setattr(a, key, net_config[key])

    setattr(a, 'EXP_OUT', EXP_OUT)
    setattr(a, 'RUN_id', _run._id)

    output_dir = create_directories(_run._id, ex)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        # load the dataset class
        data = get_dataset(dataset['name'])
        data = data(**dataset)
        cycleGAN_model = get_model('cycleGAN')
        if a.checkpoint is not None:
            ckp = os.path.join(a.EXP_OUT, str(a.checkpoint))
        else:
            ckp = None
        model = cycleGAN_model(sess,
                               image_size=a.input_image_size,
                               batch_size=a.batch_size,
                               dataset_name=dataset['name'],
                               checkpoint_dir=output_dir,
                               data=data,
                               data_desc=data.get_data_description(),
                               checkpoint=ckp)

        if a.mode == 'train':
            tmp = model.train(a)
            # _run.info['predictions'] = tmp
            # _run.info['mean_predictions'] = np.mean(tmp, axis=0)
        elif a.mode == 'valid':
            tmp = model.validate(a)
            _run.info['predictions'] = tmp
            _run.info['mean_predictions'] = np.mean(tmp, axis=0)
        else:
            model.transformDatasets(a)
Exemplo n.º 16
0
def uncertainty_parameter_search(modelname, net_config, dataset,
                                 starting_weights, search_parameters,
                                 benchmark, uncertainty_metrics, _run):
    model = get_model(modelname)
    data = get_dataset(dataset['name'])

    def evaluation(parameters):
        with model(data_description=data.get_data_description(),
                   **parameters) as net:
            measure_set = data(**dataset).get_measureset()
            import_weights_into_network(net, starting_weights)
            return {
                metric: evaluate_uncertainty(net,
                                             measure_set,
                                             metric,
                                             benchmark=benchmark,
                                             print_results=False)
                for metric in uncertainty_metrics
            }

    _run.info['results'] = grid_search(evaluation, search_parameters,
                                       net_config)
Exemplo n.º 17
0
def also_load_config(modelname, net_config, evaluation_data, starting_weights,
                     _run):
    """In case of only a single training experiment, we also load the exact network
    config from this experiment as a default"""
    # Load the training experiment
    training_experiment = ExperimentData(starting_weights)

    model_config = training_experiment.get_record()['config']['net_config']
    model_config.update(net_config)
    model_config['gpu_fraction'] = 0.94

    # save this
    print('Running with net_config:')
    print(model_config)

    # Create the network
    model = get_model(modelname)
    with model(**model_config) as net:
        # import the weights
        import_weights_into_network(net, starting_weights)

        measurements, confusion_matrix = evaluate(net, evaluation_data)
        _run.info['measurements'] = measurements
        _run.info['confusion_matrix'] = confusion_matrix
def collect_data(net_config, dataset, starting_weights, save_to, _run):
    data = get_dataset(**dataset)
    model = get_model(net_config['expert_model'])

    predictions = {}
    for expert in net_config['prefixes']:
        model_config = deepcopy(net_config)
        model_config['modality'] = expert
        model_config['prefix'] = net_config['prefixes'][expert]
        with model(data_description=data.get_data_description(),
                   **model_config) as net:
            import_weights_into_network(net, starting_weights[model_config['prefix']])
            predictions['measure_%s' % expert] = net.predict(data.get_measureset())
            predictions['test_%s' % expert] = net.predict(data.get_testset())

    # add also gt labels
    predictions['measure_gt'] = data.get_measureset(tf_dataset=False)['labels']
    predictions['test_gt'] = data.get_testset(tf_dataset=False)['labels']

    # outpath = path.join(save_to, _run._id)
    outpath = save_to
    if not path.exists(outpath):
        mkdir(outpath)
    np.savez_compressed(path.join(outpath, 'predictions.npz'), **predictions)
Exemplo n.º 19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-acc",
                        "--accumulation-steps",
                        type=int,
                        default=1,
                        help="Number of batches to process")
    parser.add_argument("--seed", type=int, default=42, help="Random seed")
    parser.add_argument("-v", "--verbose", action="store_true")
    parser.add_argument("--fast", action="store_true")
    parser.add_argument("-dd",
                        "--data-dir",
                        type=str,
                        required=True,
                        help="Data directory for INRIA sattelite dataset")
    parser.add_argument("-m",
                        "--model",
                        type=str,
                        default="resnet34_fpncat128",
                        help="")
    parser.add_argument("-b",
                        "--batch-size",
                        type=int,
                        default=8,
                        help="Batch Size during training, e.g. -b 64")
    parser.add_argument("-e",
                        "--epochs",
                        type=int,
                        default=100,
                        help="Epoch to run")
    # parser.add_argument('-es', '--early-stopping', type=int, default=None, help='Maximum number of epochs without improvement')
    # parser.add_argument('-fe', '--freeze-encoder', type=int, default=0, help='Freeze encoder parameters for N epochs')
    # parser.add_argument('-ft', '--fine-tune', action='store_true')
    parser.add_argument("-lr",
                        "--learning-rate",
                        type=float,
                        default=1e-3,
                        help="Initial learning rate")
    parser.add_argument(
        "--disaster-type-loss",
        type=str,
        default=None,  # [["ce", 1.0]],
        action="append",
        nargs="+",
        help="Criterion for classifying disaster type",
    )
    parser.add_argument(
        "--damage-type-loss",
        type=str,
        default=None,  # [["bce", 1.0]],
        action="append",
        nargs="+",
        help=
        "Criterion for classifying presence of building with particular damage type",
    )

    parser.add_argument("-l",
                        "--criterion",
                        type=str,
                        default=None,
                        action="append",
                        nargs="+",
                        help="Criterion")
    parser.add_argument("--mask4",
                        type=str,
                        default=None,
                        action="append",
                        nargs="+",
                        help="Criterion for mask with stride 4")
    parser.add_argument("--mask8",
                        type=str,
                        default=None,
                        action="append",
                        nargs="+",
                        help="Criterion for mask with stride 8")
    parser.add_argument("--mask16",
                        type=str,
                        default=None,
                        action="append",
                        nargs="+",
                        help="Criterion for mask with stride 16")
    parser.add_argument("--mask32",
                        type=str,
                        default=None,
                        action="append",
                        nargs="+",
                        help="Criterion for mask with stride 32")
    parser.add_argument("--embedding", type=str, default=None)

    parser.add_argument("-o",
                        "--optimizer",
                        default="RAdam",
                        help="Name of the optimizer")
    parser.add_argument(
        "-c",
        "--checkpoint",
        type=str,
        default=None,
        help="Checkpoint filename to use as initial model weights")
    parser.add_argument("-w",
                        "--workers",
                        default=8,
                        type=int,
                        help="Num workers")
    parser.add_argument("-a",
                        "--augmentations",
                        default="safe",
                        type=str,
                        help="Level of image augmentations")
    parser.add_argument("--transfer", default=None, type=str, help="")
    parser.add_argument("--fp16", action="store_true")
    parser.add_argument("--size", default=512, type=int)
    parser.add_argument("--fold", default=0, type=int)
    parser.add_argument("-s",
                        "--scheduler",
                        default="multistep",
                        type=str,
                        help="")
    parser.add_argument("-x", "--experiment", default=None, type=str, help="")
    parser.add_argument("-d",
                        "--dropout",
                        default=0.0,
                        type=float,
                        help="Dropout before head layer")
    parser.add_argument("-pl", "--pseudolabeling", type=str, required=True)
    parser.add_argument("-wd",
                        "--weight-decay",
                        default=0,
                        type=float,
                        help="L2 weight decay")
    parser.add_argument("--show", action="store_true")
    parser.add_argument("--dsv", action="store_true")
    parser.add_argument("--balance", action="store_true")
    parser.add_argument("--only-buildings", action="store_true")
    parser.add_argument("--freeze-bn", action="store_true")
    parser.add_argument("--crops",
                        action="store_true",
                        help="Train on random crops")
    parser.add_argument("--post-transform", action="store_true")

    args = parser.parse_args()
    set_manual_seed(args.seed)

    data_dir = args.data_dir
    num_workers = args.workers
    num_epochs = args.epochs
    learning_rate = args.learning_rate
    model_name = args.model
    optimizer_name = args.optimizer
    image_size = args.size, args.size
    fast = args.fast
    augmentations = args.augmentations
    fp16 = args.fp16
    scheduler_name = args.scheduler
    experiment = args.experiment
    dropout = args.dropout
    segmentation_losses = args.criterion
    verbose = args.verbose
    show = args.show
    accumulation_steps = args.accumulation_steps
    weight_decay = args.weight_decay
    fold = args.fold
    balance = args.balance
    only_buildings = args.only_buildings
    freeze_bn = args.freeze_bn
    train_on_crops = args.crops
    enable_post_image_transform = args.post_transform
    disaster_type_loss = args.disaster_type_loss
    train_batch_size = args.batch_size
    embedding_criterion = args.embedding
    damage_type_loss = args.damage_type_loss
    pseudolabels_dir = args.pseudolabeling

    # Compute batch size for validaion
    if train_on_crops:
        valid_batch_size = max(1,
                               (train_batch_size *
                                (image_size[0] * image_size[1])) // (1024**2))
    else:
        valid_batch_size = train_batch_size

    run_train = num_epochs > 0

    model: nn.Module = get_model(model_name, dropout=dropout).cuda()

    if args.transfer:
        transfer_checkpoint = fs.auto_file(args.transfer)
        print("Transfering weights from model checkpoint", transfer_checkpoint)
        checkpoint = load_checkpoint(transfer_checkpoint)
        pretrained_dict = checkpoint["model_state_dict"]

        transfer_weights(model, pretrained_dict)

    if args.checkpoint:
        checkpoint = load_checkpoint(fs.auto_file(args.checkpoint))
        unpack_checkpoint(checkpoint, model=model)

        print("Loaded model weights from:", args.checkpoint)
        report_checkpoint(checkpoint)

    if freeze_bn:
        torch_utils.freeze_bn(model)
        print("Freezing bn params")

    runner = SupervisedRunner(input_key=INPUT_IMAGE_KEY, output_key=None)
    main_metric = "weighted_f1"
    cmd_args = vars(args)

    current_time = datetime.now().strftime("%b%d_%H_%M")
    checkpoint_prefix = f"{current_time}_{args.model}_{args.size}_fold{fold}"

    if fp16:
        checkpoint_prefix += "_fp16"

    if fast:
        checkpoint_prefix += "_fast"

    if pseudolabels_dir:
        checkpoint_prefix += "_pseudo"

    if train_on_crops:
        checkpoint_prefix += "_crops"

    if experiment is not None:
        checkpoint_prefix = experiment

    log_dir = os.path.join("runs", checkpoint_prefix)
    os.makedirs(log_dir, exist_ok=False)

    config_fname = os.path.join(log_dir, f"{checkpoint_prefix}.json")
    with open(config_fname, "w") as f:
        train_session_args = vars(args)
        f.write(json.dumps(train_session_args, indent=2))

    default_callbacks = [
        CompetitionMetricCallback(input_key=INPUT_MASK_KEY,
                                  output_key=OUTPUT_MASK_KEY,
                                  prefix="weighted_f1"),
        ConfusionMatrixCallback(
            input_key=INPUT_MASK_KEY,
            output_key=OUTPUT_MASK_KEY,
            class_names=[
                "land", "no_damage", "minor_damage", "major_damage",
                "destroyed"
            ],
            ignore_index=UNLABELED_SAMPLE,
        ),
    ]

    if show:
        default_callbacks += [
            ShowPolarBatchesCallback(draw_predictions,
                                     metric=main_metric + "_batch",
                                     minimize=False)
        ]

    train_ds, valid_ds, train_sampler = get_datasets(
        data_dir=data_dir,
        image_size=image_size,
        augmentation=augmentations,
        fast=fast,
        fold=fold,
        balance=balance,
        only_buildings=only_buildings,
        train_on_crops=train_on_crops,
        crops_multiplication_factor=1,
        enable_post_image_transform=enable_post_image_transform,
    )

    if run_train:
        loaders = collections.OrderedDict()
        callbacks = default_callbacks.copy()
        criterions_dict = {}
        losses = []

        unlabeled_train = get_pseudolabeling_dataset(
            data_dir,
            include_masks=True,
            image_size=image_size,
            augmentation="medium_nmd",
            train_on_crops=train_on_crops,
            enable_post_image_transform=enable_post_image_transform,
            pseudolabels_dir=pseudolabels_dir,
        )

        train_ds = train_ds + unlabeled_train

        print("Using online pseudolabeling with ", len(unlabeled_train),
              "samples")

        loaders["train"] = DataLoader(
            train_ds,
            batch_size=train_batch_size,
            num_workers=num_workers,
            pin_memory=True,
            drop_last=True,
            shuffle=True,
        )

        loaders["valid"] = DataLoader(valid_ds,
                                      batch_size=valid_batch_size,
                                      num_workers=num_workers,
                                      pin_memory=True)

        # Create losses
        for criterion in segmentation_losses:
            if isinstance(criterion, (list, tuple)) and len(criterion) == 2:
                loss_name, loss_weight = criterion
            else:
                loss_name, loss_weight = criterion[0], 1.0

            cd, criterion, criterion_name = get_criterion_callback(
                loss_name,
                prefix="segmentation",
                input_key=INPUT_MASK_KEY,
                output_key=OUTPUT_MASK_KEY,
                loss_weight=float(loss_weight),
            )
            criterions_dict.update(cd)
            callbacks.append(criterion)
            losses.append(criterion_name)
            print(INPUT_MASK_KEY, "Using loss", loss_name, loss_weight)

        if args.mask4 is not None:
            for criterion in args.mask4:
                if isinstance(criterion, (list, tuple)):
                    loss_name, loss_weight = criterion
                else:
                    loss_name, loss_weight = criterion, 1.0

                cd, criterion, criterion_name = get_criterion_callback(
                    loss_name,
                    prefix="mask4",
                    input_key=INPUT_MASK_KEY,
                    output_key=OUTPUT_MASK_4_KEY,
                    loss_weight=float(loss_weight),
                )
                criterions_dict.update(cd)
                callbacks.append(criterion)
                losses.append(criterion_name)
                print(OUTPUT_MASK_4_KEY, "Using loss", loss_name, loss_weight)

        if args.mask8 is not None:
            for criterion in args.mask8:
                if isinstance(criterion, (list, tuple)):
                    loss_name, loss_weight = criterion
                else:
                    loss_name, loss_weight = criterion, 1.0

                cd, criterion, criterion_name = get_criterion_callback(
                    loss_name,
                    prefix="mask8",
                    input_key=INPUT_MASK_KEY,
                    output_key=OUTPUT_MASK_8_KEY,
                    loss_weight=float(loss_weight),
                )
                criterions_dict.update(cd)
                callbacks.append(criterion)
                losses.append(criterion_name)
                print(OUTPUT_MASK_8_KEY, "Using loss", loss_name, loss_weight)

        if args.mask16 is not None:
            for criterion in args.mask16:
                if isinstance(criterion, (list, tuple)):
                    loss_name, loss_weight = criterion
                else:
                    loss_name, loss_weight = criterion, 1.0

                cd, criterion, criterion_name = get_criterion_callback(
                    loss_name,
                    prefix="mask16",
                    input_key=INPUT_MASK_KEY,
                    output_key=OUTPUT_MASK_16_KEY,
                    loss_weight=float(loss_weight),
                )
                criterions_dict.update(cd)
                callbacks.append(criterion)
                losses.append(criterion_name)
                print(OUTPUT_MASK_16_KEY, "Using loss", loss_name, loss_weight)

        if args.mask32 is not None:
            for criterion in args.mask32:
                if isinstance(criterion, (list, tuple)):
                    loss_name, loss_weight = criterion
                else:
                    loss_name, loss_weight = criterion, 1.0

                cd, criterion, criterion_name = get_criterion_callback(
                    loss_name,
                    prefix="mask32",
                    input_key=INPUT_MASK_KEY,
                    output_key=OUTPUT_MASK_32_KEY,
                    loss_weight=float(loss_weight),
                )
                criterions_dict.update(cd)
                callbacks.append(criterion)
                losses.append(criterion_name)
                print(OUTPUT_MASK_32_KEY, "Using loss", loss_name, loss_weight)

        if disaster_type_loss is not None:
            callbacks += [
                ConfusionMatrixCallback(
                    input_key=DISASTER_TYPE_KEY,
                    output_key=DISASTER_TYPE_KEY,
                    class_names=DISASTER_TYPES,
                    ignore_index=UNKNOWN_DISASTER_TYPE_CLASS,
                    prefix=f"{DISASTER_TYPE_KEY}/confusion_matrix",
                ),
                AccuracyCallback(
                    input_key=DISASTER_TYPE_KEY,
                    output_key=DISASTER_TYPE_KEY,
                    prefix=f"{DISASTER_TYPE_KEY}/accuracy",
                    activation="Softmax",
                ),
            ]

            for criterion in disaster_type_loss:
                if isinstance(criterion, (list, tuple)):
                    loss_name, loss_weight = criterion
                else:
                    loss_name, loss_weight = criterion, 1.0

                cd, criterion, criterion_name = get_criterion_callback(
                    loss_name,
                    prefix=DISASTER_TYPE_KEY,
                    input_key=DISASTER_TYPE_KEY,
                    output_key=DISASTER_TYPE_KEY,
                    loss_weight=float(loss_weight),
                    ignore_index=UNKNOWN_DISASTER_TYPE_CLASS,
                )
                criterions_dict.update(cd)
                callbacks.append(criterion)
                losses.append(criterion_name)
                print(DISASTER_TYPE_KEY, "Using loss", loss_name, loss_weight)

        if damage_type_loss is not None:
            callbacks += [
                # MultilabelConfusionMatrixCallback(
                #     input_key=DAMAGE_TYPE_KEY,
                #     output_key=DAMAGE_TYPE_KEY,
                #     class_names=DAMAGE_TYPES,
                #     prefix=f"{DAMAGE_TYPE_KEY}/confusion_matrix",
                # ),
                AccuracyCallback(
                    input_key=DAMAGE_TYPE_KEY,
                    output_key=DAMAGE_TYPE_KEY,
                    prefix=f"{DAMAGE_TYPE_KEY}/accuracy",
                    activation="Sigmoid",
                    threshold=0.5,
                )
            ]

            for criterion in damage_type_loss:
                if isinstance(criterion, (list, tuple)):
                    loss_name, loss_weight = criterion
                else:
                    loss_name, loss_weight = criterion, 1.0

                cd, criterion, criterion_name = get_criterion_callback(
                    loss_name,
                    prefix=DAMAGE_TYPE_KEY,
                    input_key=DAMAGE_TYPE_KEY,
                    output_key=DAMAGE_TYPE_KEY,
                    loss_weight=float(loss_weight),
                )
                criterions_dict.update(cd)
                callbacks.append(criterion)
                losses.append(criterion_name)
                print(DAMAGE_TYPE_KEY, "Using loss", loss_name, loss_weight)

        if embedding_criterion is not None:
            cd, criterion, criterion_name = get_criterion_callback(
                embedding_criterion,
                prefix="embedding",
                input_key=INPUT_MASK_KEY,
                output_key=OUTPUT_EMBEDDING_KEY,
                loss_weight=1.0,
            )
            criterions_dict.update(cd)
            callbacks.append(criterion)
            losses.append(criterion_name)
            print(OUTPUT_EMBEDDING_KEY, "Using loss", embedding_criterion)

        callbacks += [
            CriterionAggregatorCallback(prefix="loss", loss_keys=losses),
            OptimizerCallback(accumulation_steps=accumulation_steps,
                              decouple_weight_decay=False),
        ]

        optimizer = get_optimizer(optimizer_name,
                                  get_optimizable_parameters(model),
                                  learning_rate,
                                  weight_decay=weight_decay)
        scheduler = get_scheduler(scheduler_name,
                                  optimizer,
                                  lr=learning_rate,
                                  num_epochs=num_epochs,
                                  batches_in_epoch=len(loaders["train"]))
        if isinstance(scheduler, CyclicLR):
            callbacks += [SchedulerCallback(mode="batch")]

        print("Train session    :", checkpoint_prefix)
        print("  FP16 mode      :", fp16)
        print("  Fast mode      :", args.fast)
        print("  Epochs         :", num_epochs)
        print("  Workers        :", num_workers)
        print("  Data dir       :", data_dir)
        print("  Log dir        :", log_dir)
        print("Data             ")
        print("  Augmentations  :", augmentations)
        print("  Train size     :", len(loaders["train"]), len(train_ds))
        print("  Valid size     :", len(loaders["valid"]), len(valid_ds))
        print("  Image size     :", image_size)
        print("  Train on crops :", train_on_crops)
        print("  Balance        :", balance)
        print("  Buildings only :", only_buildings)
        print("  Post transform :", enable_post_image_transform)
        print("  Pseudolabels   :", pseudolabels_dir)
        print("Model            :", model_name)
        print("  Parameters     :", count_parameters(model))
        print("  Dropout        :", dropout)
        print("Optimizer        :", optimizer_name)
        print("  Learning rate  :", learning_rate)
        print("  Weight decay   :", weight_decay)
        print("  Scheduler      :", scheduler_name)
        print("  Batch sizes    :", train_batch_size, valid_batch_size)
        print("  Criterion      :", segmentation_losses)
        print("  Damage type    :", damage_type_loss)
        print("  Disaster type  :", disaster_type_loss)
        print(" Embedding      :", embedding_criterion)

        # model training
        runner.train(
            fp16=fp16,
            model=model,
            criterion=criterions_dict,
            optimizer=optimizer,
            scheduler=scheduler,
            callbacks=callbacks,
            loaders=loaders,
            logdir=os.path.join(log_dir, "opl"),
            num_epochs=num_epochs,
            verbose=verbose,
            main_metric=main_metric,
            minimize_metric=False,
            checkpoint_data={"cmd_args": cmd_args},
        )

        # Training is finished. Let's run predictions using best checkpoint weights
        best_checkpoint = os.path.join(log_dir, "main", "checkpoints",
                                       "best.pth")

        model_checkpoint = os.path.join(log_dir, "main", "checkpoints",
                                        f"{checkpoint_prefix}.pth")
        clean_checkpoint(best_checkpoint, model_checkpoint)

        del optimizer, loaders
def main(modelname, net_config, gan_config, disc_config, datasetSem,
         datasetGAN, datasetDisc, starting_weights, flag_measure, output_mat,
         flag_entropy, thresholds, start, _run):
    for key in gan_config:
        setattr(a, key, gan_config[key])
    for key in disc_config:
        setattr(b, key, disc_config[key])
    setattr(a, 'EXP_OUT', EXP_OUT)
    setattr(a, 'RUN_id', _run._id)
    setattr(b, 'EXP_OUT', EXP_OUT)
    setattr(b, 'RUN_id', _run._id)
    disc_data_path = os.path.join(datasetDisc['image_input_dir'],
                                  str(gan_config['checkpoint']) + "_full")
    data_id = str(gan_config['checkpoint'])
    setattr(b, 'DATA_id', data_id)
    # Set up the directories for diagnostics
    output_dir = create_directories(_run._id, ex)

    # load the data for the data description
    data_desc = get_dataset(datasetSem['name'])

    model = get_model(modelname)
    net = model(data_description=data_desc.get_data_description(),
                output_dir=output_dir,
                **net_config)
    # net.import_weights(filepath=starting_weights)
    print("INFO: SemSegNet Imported weights succesfully")

    GAN_graph = tf.Graph()
    with GAN_graph.as_default():
        # create the network
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        GAN_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        if gan_config['type'] == 'cascRef':
            dataGAN = get_dataset('cityscapes_cascGAN')
            cGAN_model = get_model('cascGAN')
            if a.checkpoint is not None:
                ckp = os.path.join(a.EXP_OUT, str(a.checkpoint))
            modelGAN = cGAN_model(
                GAN_sess,
                dataset_name='cityscapes_cascGAN',
                image_size=disc_config['input_image_size'],
                checkpoint_dir=output_dir,
                data_desc=dataGAN.get_data_description(),
                is_training=False,
                checkpoint=ckp,
                vgg_checkpoint=
                "/cluster/work/riner/users/haldavid/Checkpoints/VGG_Model/imagenet-vgg-verydeep-19.mat"
            )
        else:
            # load the dataset class
            dataGAN = get_dataset(datasetGAN['name'])
            # data = data(**datasetGAN)
            cGAN_model = get_model('cGAN')
            modelGAN = cGAN_model(
                GAN_sess,
                checkpoint_dir=output_dir,
                data_desc=dataGAN.get_data_description(),
                feature_matching=gan_config['feature_matching'],
                checkpoint=os.path.join(a.EXP_OUT, str(a.checkpoint)),
                gen_type=gan_config['type'],
                use_grayscale=gan_config['use_grayscale'])
        print("INFO: Generative model imported weights succesfully")

    Disc_graph = tf.Graph()
    with Disc_graph.as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        sessD = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        dataD = get_dataset(datasetDisc['name'])
        dataD = dataD(disc_data_path, **datasetDisc)
        disc_model = get_model('simDisc')

        disc_checkpoint = None
        if disc_config['checkpoint'] is not None:
            disc_checkpoint = os.path.join(a.EXP_OUT,
                                           str(disc_config['checkpoint']))
        modelDiff = disc_model(sess=sessD,
                               checkpoint_dir=output_dir,
                               pos_weight=disc_config['pos_weight'],
                               data=dataD,
                               arch=disc_config['arch'],
                               use_grayscale=disc_config['use_grayscale'],
                               checkpoint=disc_checkpoint,
                               use_segm=disc_config['use_segm'],
                               batch_size=disc_config['batch_size'],
                               feature_extractor=os.path.join(
                                   a.EXP_OUT, str(a.checkpoint)))

        if disc_config['checkpoint'] is None:
            print("INFO: Begin training simDisc")
            tmp = modelDiff.train(b)
            _run.info['simDisc_predictions'] = tmp
            _run.info['simDisc_mean_predictions'] = np.mean(tmp, axis=0)
            _run.info['simDisc_stdDev'] = np.std(tmp, axis=0)
            print("INFO: Finished training simDisc")
        else:
            print("INFO: Init and loaded checpoint for simDisc")

    if flag_measure:
        benchmarks = ['measure']
    else:
        benchmarks = ['wilddash', 'posneg', 'valid', 'measure']
    data_SemSeg = data_desc(**datasetSem)

    _run.info['thresholds'] = thresholds

    ###########################################################################
    # mapping from Deeplab classes to Adapnet classes
    original_labelinfo = {
        0: {
            'name': 'road',
            'mapping': 'road'
        },
        1: {
            'name': 'sidewalk',
            'mapping': 'sidewalk'
        },
        2: {
            'name': 'building',
            'mapping': 'building'
        },
        3: {
            'name': 'wall',
            'mapping': 'building'
        },
        4: {
            'name': 'fence',
            'mapping': 'fence'
        },
        5: {
            'name': 'pole',
            'mapping': 'pole'
        },
        6: {
            'name': 'traffic light',
            'mapping': 'void'
        },
        7: {
            'name': 'traffic sign',
            'mapping': 'traffic sign'
        },
        8: {
            'name': 'vegetation',
            'mapping': 'vegetation'
        },
        9: {
            'name': 'terrain',
            'mapping': 'vegetation'
        },
        10: {
            'name': 'sky',
            'mapping': 'sky'
        },
        11: {
            'name': 'person',
            'mapping': 'person'
        },
        12: {
            'name': 'rider',
            'mapping': 'person'
        },
        13: {
            'name': 'car',
            'mapping': 'vehicle'
        },
        14: {
            'name': 'truck',
            'mapping': 'vehicle'
        },
        15: {
            'name': 'bus',
            'mapping': 'vehicle'
        },
        16: {
            'name': 'train',
            'mapping': 'vehicle'
        },
        17: {
            'name': 'motorcycle',
            'mapping': 'vehicle'
        },
        18: {
            'name': 'bicycle',
            'mapping': 'bicycle'
        },
        255: {
            'name': 'void',
            'mapping': 'void'
        }
    }

    labelinfo = {
        0: {
            'name': 'void',
            'color': [0, 0, 0]
        },
        1: {
            'name': 'sky',
            'color': [70, 130, 180]
        },
        2: {
            'name': 'building',
            'color': [70, 70, 70]
        },
        3: {
            'name': 'road',
            'color': [128, 64, 128]
        },
        4: {
            'name': 'sidewalk',
            'color': [244, 35, 232]
        },
        5: {
            'name': 'fence',
            'color': [190, 153, 153]
        },
        6: {
            'name': 'vegetation',
            'color': [107, 142, 35]
        },
        7: {
            'name': 'pole',
            'color': [153, 153, 153]
        },
        8: {
            'name': 'vehicle',
            'color': [0, 0, 142]
        },
        9: {
            'name': 'traffic sign',
            'color': [220, 220, 0]
        },
        10: {
            'name': 'person',
            'color': [220, 20, 60]
        },
        11: {
            'name': 'bicycle',
            'color': [119, 11, 32]
        }
    }

    label_lookup = [
        next(i for i in labelinfo if labelinfo[i]['name'] == k['mapping'])
        for _, k in original_labelinfo.items()
    ]

    base_path = path.join(DATA_BASEPATH, 'fishyscapes_newfog')
    if 'TMPDIR' in environ:
        print('INFO loading dataset into machine ... ')
        # first load the zipfile into a closer memory location, then load all the
        # images
        zip = zipfile.ZipFile(path.join(base_path, 'testset.zip'), 'r')
        localtmp = environ['TMPDIR']
        zip.extractall(localtmp)
        zip.close()
        base_path = localtmp

    print('DONE loading dataset into machine ... ')

    ###########################################################################

    set_size = 1000
    h_orig = 1024
    w_orig = 2048

    sub_size = 100

    semseg_path = "/cluster/work/riner/users/blumh/fishyscapes_deeplab_predictions_newfog"
    out_path = "/cluster/work/riner/users/blumh/resultsDH"

    for k in range(start, (start + 2)):
        kb = k * sub_size
        if k > 0:
            print('Done %d images' % (kb))
            stdout.flush()
        img_array = np.zeros((sub_size, 256, 256, 3))
        segm_array = np.zeros((sub_size, 256, 256, 3))
        for i in range(sub_size):
            img = cv2.imread(
                path.join(base_path, 'testset',
                          str(i + kb) + '_rgb.png'))
            dl_labels = np.expand_dims(cv2.imread(
                path.join(semseg_path,
                          str(i + kb) + '_predict.png'))[:, :, 0],
                                       axis=0)

            cs_labels = np.asarray(label_lookup, dtype='int32')[dl_labels]

            lookup = np.array([
                labelinfo[i]['color']
                for i in range(max(labelinfo.keys()) + 1)
            ]).astype(int)
            segm = np.array(lookup[cs_labels[:]]).astype('uint8')[..., ::-1]

            #mask = cv2.imread(path.join(base_path, str(i)+'_mask.png'), cv2.IMREAD_ANYDEPTH)
            # blob['labels'] = cv2.imread(labels_filename, cv2.IMREAD_ANYDEPTH)
            # # apply label mapping
            # blob['labels'] = np.asarray(self.label_lookup, dtype='int32')[blob['labels']]

            img_array[i, ...] = cv2.resize(img, (256, 256),
                                           interpolation=cv2.INTER_LINEAR)
            segm_array[i, ...] = cv2.resize(segm[0, ...], (256, 256),
                                            interpolation=cv2.INTER_NEAREST)

        with GAN_sess.as_default():
            with GAN_graph.as_default():
                synth_images = modelGAN.transform(a, segm_array)

        with sessD.as_default():
            with Disc_graph.as_default():
                simMat = modelDiff.transform(img_array, synth_images,
                                             segm_array)

        for i in range(sub_size):
            # filename = path.join(out_path,str(i+kb)+'_rgb.png')
            # cv2.imwrite(filename,cv2.resize(img_array[i,...], (2048, 1024),interpolation=cv2.INTER_LINEAR))
            # filename = path.join(out_path,str(i+kb)+'_segm.png')
            # cv2.imwrite(filename,cv2.resize(segm_array[i,...], (2048, 1024),interpolation=cv2.INTER_NEAREST))
            filename = path.join(out_path, str(i + kb) + '_dissim.png')
            cv2.imwrite(filename, simMat[i, ...])
            filename = path.join(out_path, str(i + kb) + '_dissim.npy')
            np.save(
                filename,
                cv2.resize(simMat[i, ...], (2048, 1024),
                           interpolation=cv2.INTER_LINEAR))
Exemplo n.º 21
0
def main(modelname, net_config, gan_config, disc_config, datasetSem,
         datasetGAN, datasetDisc, starting_weights, flag_measure, output_mat,
         flag_entropy, thresholds, start, _run):
    for key in gan_config:
        setattr(a, key, gan_config[key])
    for key in disc_config:
        setattr(b, key, disc_config[key])
    setattr(a, 'EXP_OUT', EXP_OUT)
    setattr(a, 'RUN_id', _run._id)
    setattr(b, 'EXP_OUT', EXP_OUT)
    setattr(b, 'RUN_id', _run._id)
    disc_data_path = os.path.join(datasetDisc['image_input_dir'],
                                  str(gan_config['checkpoint']) + "_full")
    data_id = str(gan_config['checkpoint'])
    setattr(b, 'DATA_id', data_id)
    # Set up the directories for diagnostics
    output_dir = create_directories(_run._id, ex)

    # load the data for the data description
    data_desc = get_dataset(datasetSem['name'])

    model = get_model(modelname)
    net = model(data_description=data_desc.get_data_description(),
                output_dir=output_dir,
                **net_config)
    net.import_weights(filepath=starting_weights)
    print("INFO: SemSegNet Imported weights succesfully")

    GAN_graph = tf.Graph()
    with GAN_graph.as_default():
        # create the network
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        GAN_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        if gan_config['type'] == 'cascRef':
            dataGAN = get_dataset('cityscapes_cascGAN')
            cGAN_model = get_model('cascGAN')
            if a.checkpoint is not None:
                ckp = os.path.join(a.EXP_OUT, str(a.checkpoint))
            modelGAN = cGAN_model(
                GAN_sess,
                dataset_name='cityscapes_cascGAN',
                image_size=disc_config['input_image_size'],
                checkpoint_dir=output_dir,
                data_desc=dataGAN.get_data_description(),
                is_training=False,
                checkpoint=ckp,
                vgg_checkpoint=
                "/cluster/work/riner/users/haldavid/Checkpoints/VGG_Model/imagenet-vgg-verydeep-19.mat"
            )
        else:
            # load the dataset class
            dataGAN = get_dataset(datasetGAN['name'])
            # data = data(**datasetGAN)
            cGAN_model = get_model('cGAN')
            modelGAN = cGAN_model(
                GAN_sess,
                checkpoint_dir=output_dir,
                data_desc=dataGAN.get_data_description(),
                feature_matching=gan_config['feature_matching'],
                checkpoint=os.path.join(a.EXP_OUT, str(a.checkpoint)),
                gen_type=gan_config['type'],
                use_grayscale=gan_config['use_grayscale'])
        print("INFO: Generative model imported weights succesfully")

    Disc_graph = tf.Graph()
    with Disc_graph.as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        sessD = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        dataD = get_dataset(datasetDisc['name'])
        dataD = dataD(disc_data_path, **datasetDisc)
        disc_model = get_model('simDisc')

        disc_checkpoint = None
        if disc_config['checkpoint'] is not None:
            disc_checkpoint = os.path.join(a.EXP_OUT,
                                           str(disc_config['checkpoint']))
        modelDiff = disc_model(sess=sessD,
                               checkpoint_dir=output_dir,
                               pos_weight=disc_config['pos_weight'],
                               data=dataD,
                               arch=disc_config['arch'],
                               use_grayscale=disc_config['use_grayscale'],
                               checkpoint=disc_checkpoint,
                               use_segm=disc_config['use_segm'],
                               batch_size=disc_config['batch_size'],
                               feature_extractor=os.path.join(
                                   a.EXP_OUT, str(a.checkpoint)))

        if disc_config['checkpoint'] is None:
            print("INFO: Begin training simDisc")
            tmp = modelDiff.train(b)
            _run.info['simDisc_predictions'] = tmp
            _run.info['simDisc_mean_predictions'] = np.mean(tmp, axis=0)
            _run.info['simDisc_stdDev'] = np.std(tmp, axis=0)
            print("INFO: Finished training simDisc")
        else:
            print("INFO: Init and loaded checpoint for simDisc")

    if flag_measure:
        benchmarks = ['measure']
    else:
        benchmarks = ['wilddash', 'posneg', 'valid', 'measure']
    data_SemSeg = data_desc(**datasetSem)

    # thresholds = [0.2,0.4,0.6,0.8]
    # thresholds = [0.85,0.9,0.95,0.99]
    _run.info['thresholds'] = thresholds
    for set in benchmarks:
        if set == "measure":
            dataset = data_SemSeg.get_measureset(tf_dataset=False)
        elif set == "valid":
            dataset = data_SemSeg.get_validation_set(tf_dataset=False)
        else:
            data = get_dataset(set)
            head, _ = os.path.split(datasetDisc['image_input_dir'])
            data = data(os.path.join(head, set))
            dataset = data.get_validation_set(tf_dataset=False)

        sem_seg_images, rgb_images, masks, gt_seg_images, seg_seg_labels, output_probs = predict_output(
            net, output_dir, dataset, data_SemSeg, flag_entropy,
            net_config['num_classes'])

        with GAN_sess.as_default():
            with GAN_graph.as_default():
                if gan_config['type'] == 'cascRef':
                    synth_images = modelGAN.transform(a, seg_seg_labels)
                else:
                    synth_images = modelGAN.transform(a, sem_seg_images)

        with sessD.as_default():
            with Disc_graph.as_default():
                simMat = modelDiff.transform(rgb_images, synth_images,
                                             sem_seg_images)


############################################################################################
# Perhaps need to also give number of patches per dimension.
        simMatSSIM = computePatchSSIM(rgb_images, synth_images,
                                      datasetDisc['ppd'])

        ############################################################################################

        temp_iou = computeIOU(simMat, masks, thresholds)
        temp_pr = computePRvalues(simMat, masks, thresholds)
        _run.info[set + '_IOU'] = temp_iou
        _run.info[set + '_PRvals'] = temp_pr
        _run.info[set + '_F1score'] = 2 * np.asarray(temp_pr[1]) * np.asarray(
            temp_pr[2]) / (np.asarray(temp_pr[1]) + np.asarray(temp_pr[2]))

        # _run.info[set+'_SSIM_IOU'] = computeIOU(simMatSSIM, masks, thresholds)
        # _run.info[set+'_SSIM_PRvals'] = computePRvalues(simMatSSIM, masks)

        if flag_entropy and set is not 'posneg':
            entropy = ShannonEntropy(output_probs)
            # _run.info[set+'_meanVarEntropyOoD'] = [np.mean(entropy[masks.astype(bool)]),np.var(entropy[masks.astype(bool)],ddof=1)]
            # _run.info[set+'_meanVarEntropyID'] = [np.mean(entropy[~masks.astype(bool)]),np.var(entropy[~masks.astype(bool)],ddof=1)]
            temp_iou = computeIOU(entropy, masks, thresholds)
            temp_pr = computePRvalues(entropy, masks, thresholds)
            _run.info[set + '_entropy_IOU'] = temp_iou
            _run.info[set + '_entropy_PRvals'] = temp_pr
            _run.info[set + '_entropy_F1score'] = 2 * np.asarray(
                temp_pr[1]) * np.asarray(temp_pr[2]) / (
                    np.asarray(temp_pr[1]) + np.asarray(temp_pr[2]))

        k = masks.shape[0]

        if output_mat and set is not 'posneg':

            if not os.path.exists(os.path.join(output_dir, set)):
                os.makedirs(os.path.join(output_dir, set))
            if set is 'measure':
                k = 25

            matrix_path = os.path.join(output_dir, set, "mskMat.npy")
            np.save(matrix_path, masks[0:k, ...])

            matrix_path = os.path.join(output_dir, set, "simMat.npy")
            np.save(matrix_path, simMat[0:k, ...])

            # matrix_path = os.path.join(output_dir,set,"ssmMat.npy")
            # np.save(matrix_path, simMatSSIM[0:k, ...])

            matrix_path = os.path.join(output_dir, set, "rgbMat.npy")
            np.save(matrix_path, rgb_images[0:k, ...])

            matrix_path = os.path.join(output_dir, set, "synMat.npy")
            np.save(matrix_path, synth_images[0:k, ...])

            matrix_path = os.path.join(output_dir, set, "semMat.npy")
            np.save(matrix_path, sem_seg_images[0:k, ...])

            matrix_path = os.path.join(output_dir, set, "gtsMat.npy")
            np.save(matrix_path, gt_seg_images[0:k, ...])

            if flag_entropy:
                matrix_path = os.path.join(output_dir, set, "entMat.npy")
                np.save(matrix_path, entropy)
def main(modelname, net_config, gan_config, disc_config, datasetSem,
         datasetGAN, datasetDisc, starting_weights, flag_measure, output_mat,
         flag_entropy, thresholds, start, _run):
    for key in gan_config:
        setattr(a, key, gan_config[key])
    for key in disc_config:
        setattr(b, key, disc_config[key])
    setattr(a, 'EXP_OUT', EXP_OUT)
    setattr(a, 'RUN_id', _run._id)
    setattr(b, 'EXP_OUT', EXP_OUT)
    setattr(b, 'RUN_id', _run._id)
    disc_data_path = os.path.join(datasetDisc['image_input_dir'],
                                  str(gan_config['checkpoint']) + "_full")
    data_id = str(gan_config['checkpoint'])
    setattr(b, 'DATA_id', data_id)
    # Set up the directories for diagnostics
    output_dir = create_directories(_run._id, ex)

    # load the data for the data description
    data_desc = get_dataset(datasetSem['name'])

    model = get_model(modelname)
    net = model(data_description=data_desc.get_data_description(),
                output_dir=output_dir,
                **net_config)
    # net.import_weights(filepath=starting_weights)
    print("INFO: SemSegNet Imported weights succesfully")

    GAN_graph = tf.Graph()
    with GAN_graph.as_default():
        # create the network
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        GAN_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        if gan_config['type'] == 'cascRef':
            dataGAN = get_dataset('cityscapes_cascGAN')
            cGAN_model = get_model('cascGAN')
            if a.checkpoint is not None:
                ckp = os.path.join(a.EXP_OUT, str(a.checkpoint))
            modelGAN = cGAN_model(
                GAN_sess,
                dataset_name='cityscapes_cascGAN',
                image_size=disc_config['input_image_size'],
                checkpoint_dir=output_dir,
                data_desc=dataGAN.get_data_description(),
                is_training=False,
                checkpoint=ckp,
                vgg_checkpoint=
                "/cluster/work/riner/users/haldavid/Checkpoints/VGG_Model/imagenet-vgg-verydeep-19.mat"
            )
        else:
            # load the dataset class
            dataGAN = get_dataset(datasetGAN['name'])
            # data = data(**datasetGAN)
            cGAN_model = get_model('cGAN')
            modelGAN = cGAN_model(
                GAN_sess,
                checkpoint_dir=output_dir,
                data_desc=dataGAN.get_data_description(),
                feature_matching=gan_config['feature_matching'],
                checkpoint=os.path.join(a.EXP_OUT, str(a.checkpoint)),
                gen_type=gan_config['type'],
                use_grayscale=gan_config['use_grayscale'])
        print("INFO: Generative model imported weights succesfully")

    Disc_graph = tf.Graph()
    with Disc_graph.as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        sessD = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        dataD = get_dataset(datasetDisc['name'])
        dataD = dataD(disc_data_path, **datasetDisc)
        disc_model = get_model('simDisc')

        disc_checkpoint = None
        if disc_config['checkpoint'] is not None:
            disc_checkpoint = os.path.join(a.EXP_OUT,
                                           str(disc_config['checkpoint']))
        modelDiff = disc_model(sess=sessD,
                               checkpoint_dir=output_dir,
                               pos_weight=disc_config['pos_weight'],
                               data=dataD,
                               arch=disc_config['arch'],
                               use_grayscale=disc_config['use_grayscale'],
                               checkpoint=disc_checkpoint,
                               use_segm=disc_config['use_segm'],
                               batch_size=disc_config['batch_size'],
                               feature_extractor=os.path.join(
                                   a.EXP_OUT, str(a.checkpoint)))

        if disc_config['checkpoint'] is None:
            print("INFO: Begin training simDisc")
            tmp = modelDiff.train(b)
            _run.info['simDisc_predictions'] = tmp
            _run.info['simDisc_mean_predictions'] = np.mean(tmp, axis=0)
            _run.info['simDisc_stdDev'] = np.std(tmp, axis=0)
            print("INFO: Finished training simDisc")
        else:
            print("INFO: Init and loaded checpoint for simDisc")

    if flag_measure:
        benchmarks = ['measure']
    else:
        benchmarks = ['wilddash', 'posneg', 'valid', 'measure']
    data_SemSeg = data_desc(**datasetSem)

    _run.info['thresholds'] = thresholds

    ###########################################################################
    # mapping from Mapillary classes to Deeplab classes
    if start == 0:
        with open('/cluster/work/riner/users/haldavid/config.json') as f:
            config = json.load(f)
    else:
        with open(
                '/Volumes/Netti HD /Master Thesis/mapillary/config.json') as f:
            config = json.load(f)

    def map_class(name):
        """Map a Mapillary class onto one of the 19 cityscapes classes or void."""
        direct_mapping = {
            'construction--barrier--fence': 'fence',
            'construction--barrier--wall': 'wall',
            'construction--flat--road': 'road',
            'construction--flat--sidewalk': 'sidewalk',
            'construction--structure--building': 'building',
            'human--person': 'person',
            'nature--sky': 'sky',
            'nature--terrain': 'terrain',
            'nature--vegetation': 'vegetation',
            'object--support--pole': 'pole',
            'object--support--utility-pole': 'pole',
            'object--traffic-light': 'traffic light',
            'object--traffic-sign--front': 'traffic sign',
            'object--vehicle--bicycle': 'bicycle',
            'object--vehicle--bus': 'bus',
            'object--vehicle--car': 'car',
            'object--vehicle--motorcycle': 'motorcycle',
            'object--vehicle--on-rails': 'train',
            'object--vehicle--truck': 'truck',
        }
        if name in direct_mapping:
            return direct_mapping[name]
        elif name.startswith('human--rider'):
            return 'rider'
        elif name.startswith('marking'):
            return 'road'
        else:
            return 'void'

    original_labels_mapi = {
        i: {
            'name': v['name'],
            'color': v['color'],
            'mapping': map_class(v['name'])
        }
        for i, v in enumerate(config['labels'])
    }

    # array to look up the label_id of a given color
    color_map = np.ndarray(shape=(256**3), dtype='int32')
    color_map[:] = -1
    for c, v in original_labels_mapi.items():
        rgb = v['color']
        rgb = rgb[0] * 65536 + rgb[1] * 256 + rgb[2]
        color_map[rgb] = c

    # apply same label mapping as for original cityscapes
    labelinfo_mapi = {
        -1: {
            'name': 'void',
            'color': [0, 0, 0]
        },
        0: {
            'name': 'road',
            'color': [128, 64, 128]
        },
        1: {
            'name': 'sidewalk',
            'color': [244, 35, 232]
        },
        2: {
            'name': 'building',
            'color': [70, 70, 70]
        },
        3: {
            'name': 'wall',
            'color': [70, 70, 70]
        },
        4: {
            'name': 'fence',
            'color': [190, 153, 153]
        },
        5: {
            'name': 'pole',
            'color': [153, 153, 153]
        },
        6: {
            'name': 'traffic light',
            'color': [0, 0, 0]
        },
        7: {
            'name': 'traffic sign',
            'color': [220, 220, 0]
        },
        8: {
            'name': 'vegetation',
            'color': [107, 142, 35]
        },
        9: {
            'name': 'terrain',
            'color': [107, 142, 35]
        },
        10: {
            'name': 'sky',
            'color': [70, 130, 180]
        },
        11: {
            'name': 'person',
            'color': [220, 20, 60]
        },
        12: {
            'name': 'rider',
            'color': [220, 20, 60]
        },
        13: {
            'name': 'car',
            'color': [0, 0, 142]
        },
        14: {
            'name': 'truck',
            'color': [0, 0, 142]
        },
        15: {
            'name': 'bus',
            'color': [0, 0, 142]
        },
        16: {
            'name': 'train',
            'color': [0, 0, 142]
        },
        17: {
            'name': 'motorcycle',
            'color': [0, 0, 142]
        },
        18: {
            'name': 'bicycle',
            'color': [119, 11, 32]
        }
    }

    label_lookup_mapi = [
        next(i for i in labelinfo_mapi
             if labelinfo_mapi[i]['name'] == v['mapping'])
        for v in original_labels_mapi.values()
    ]

    lookup_mapi = np.array([
        labelinfo_mapi[i]['color']
        for i in range(max(labelinfo_mapi.keys()) + 1)
    ]).astype(int)

    ###########################################################################
    # mapping from Deeplab classes to Adapnet classes
    original_labelinfo_dl = {
        0: {
            'name': 'road',
            'mapping': 'road'
        },
        1: {
            'name': 'sidewalk',
            'mapping': 'sidewalk'
        },
        2: {
            'name': 'building',
            'mapping': 'building'
        },
        3: {
            'name': 'wall',
            'mapping': 'building'
        },
        4: {
            'name': 'fence',
            'mapping': 'fence'
        },
        5: {
            'name': 'pole',
            'mapping': 'pole'
        },
        6: {
            'name': 'traffic light',
            'mapping': 'void'
        },
        7: {
            'name': 'traffic sign',
            'mapping': 'traffic sign'
        },
        8: {
            'name': 'vegetation',
            'mapping': 'vegetation'
        },
        9: {
            'name': 'terrain',
            'mapping': 'vegetation'
        },
        10: {
            'name': 'sky',
            'mapping': 'sky'
        },
        11: {
            'name': 'person',
            'mapping': 'person'
        },
        12: {
            'name': 'rider',
            'mapping': 'person'
        },
        13: {
            'name': 'car',
            'mapping': 'vehicle'
        },
        14: {
            'name': 'truck',
            'mapping': 'vehicle'
        },
        15: {
            'name': 'bus',
            'mapping': 'vehicle'
        },
        16: {
            'name': 'train',
            'mapping': 'vehicle'
        },
        17: {
            'name': 'motorcycle',
            'mapping': 'vehicle'
        },
        18: {
            'name': 'bicycle',
            'mapping': 'bicycle'
        },
        255: {
            'name': 'void',
            'mapping': 'void'
        }
    }

    labelinfo = {
        0: {
            'name': 'void',
            'color': [0, 0, 0]
        },
        1: {
            'name': 'sky',
            'color': [70, 130, 180]
        },
        2: {
            'name': 'building',
            'color': [70, 70, 70]
        },
        3: {
            'name': 'road',
            'color': [128, 64, 128]
        },
        4: {
            'name': 'sidewalk',
            'color': [244, 35, 232]
        },
        5: {
            'name': 'fence',
            'color': [190, 153, 153]
        },
        6: {
            'name': 'vegetation',
            'color': [107, 142, 35]
        },
        7: {
            'name': 'pole',
            'color': [153, 153, 153]
        },
        8: {
            'name': 'vehicle',
            'color': [0, 0, 142]
        },
        9: {
            'name': 'traffic sign',
            'color': [220, 220, 0]
        },
        10: {
            'name': 'person',
            'color': [220, 20, 60]
        },
        11: {
            'name': 'bicycle',
            'color': [119, 11, 32]
        }
    }

    label_lookup_dl = [
        next(i for i in labelinfo if labelinfo[i]['name'] == k['mapping'])
        for _, k in original_labelinfo_dl.items()
    ]

    lookup = np.array([
        labelinfo[i]['color'] for i in range(max(labelinfo.keys()) + 1)
    ]).astype(int)

    # base_path = path.join(DATA_BASEPATH, 'fishyscapes_newfog')
    # if 'TMPDIR' in environ:
    #     print('INFO loading dataset into machine ... ')
    #     # first load the zipfile into a closer memory location, then load all the
    #     # images
    #     zip = zipfile.ZipFile(path.join(base_path, 'testset.zip'), 'r')
    #     localtmp = environ['TMPDIR']
    #     zip.extractall(localtmp)
    #     zip.close()
    #     base_path = localtmp
    #
    # print('DONE loading dataset into machine ... ')

    ###########################################################################

    sub_size = 100

    if start == 0:
        input_path = "/cluster/work/riner/users/blumh/mapillary_evaluation_set"
        out_path = path.join(
            "/cluster/work/riner/users/haldavid/MapillaryResultsCropped",
            disc_config['arch'])
    else:
        input_path = "/Users/David/Downloads/mapillary_evaluation_set"
        out_path = path.join("/Users/David/Desktop/out", disc_config['arch'])

    if not os.path.exists(out_path):
        os.makedirs(out_path)
        #N H W C
    img_array = np.zeros((sub_size, 256, 256, 3))
    segm_array = np.zeros((sub_size, 256, 256, 3))
    gt_array = np.zeros((sub_size, 256, 256, 3))
    mask_array = np.zeros((sub_size, 256, 256))
    out_mask_array = np.zeros((sub_size, 256, 256))

    for i in range(sub_size):
        #RGB
        img = cv2.imread(path.join(input_path, str(i) + '_rgb.png'))
        #
        dl_labels = np.expand_dims(cv2.imread(
            path.join(input_path,
                      str(i) + '_predict.png'))[:, :, 0],
                                   axis=0)
        cs_labels = np.asarray(label_lookup_dl, dtype='int32')[dl_labels]
        segm = np.array(lookup[cs_labels[:]]).astype('uint8')[..., ::-1]

        gt_labels = cv2.imread(path.join(input_path,
                                         str(i) + '_segm.png'))[..., ::-1]
        gt_labels = gt_labels.dot(np.array([65536, 256, 1], dtype='int32'))
        gt_segm = color_map[gt_labels]
        # apply mapping

        gt_segm = np.asarray(label_lookup_mapi, dtype='int32')[gt_segm]

        gt_segm = np.array(lookup_mapi[gt_segm[:]]).astype('uint8')[..., ::-1]

        mask = cv2.imread(path.join(input_path, str(i) + '_mask.png'))[..., 0]

        #Crop all in the same manor 4:3 we crop to 4:2
        crop_margin = int(img.shape[0] / 6)
        img = img[crop_margin:-crop_margin, ...]
        segm = segm[0, crop_margin:-crop_margin, ...]
        gt_segm = gt_segm[crop_margin:-crop_margin, ...]
        mask = mask[crop_margin:-crop_margin, ...]

        img_array[i, ...] = cv2.resize(img, (256, 256),
                                       interpolation=cv2.INTER_LINEAR)
        segm_array[i, ...] = cv2.resize(segm, (256, 256),
                                        interpolation=cv2.INTER_NEAREST)
        gt_array[i, ...] = cv2.resize(gt_segm, (256, 256),
                                      interpolation=cv2.INTER_NEAREST)
        mask_array[i, ...] = cv2.resize(mask, (256, 256),
                                        interpolation=cv2.INTER_NEAREST)

    with GAN_sess.as_default():
        with GAN_graph.as_default():
            synth_images, pred_descriminator = modelGAN.transform_withD(
                a, segm_array, img_array)

    with sessD.as_default():
        with Disc_graph.as_default():
            simMat = modelDiff.transform(img_array, synth_images, segm_array)

    for i in range(sub_size):
        gt_mask = error_mask(segm_array[i], gt_array[i])
        out_mask = np.logical_or(gt_mask, mask_array[i])
        out_mask_array[i, ...] = out_mask

        # filename = path.join(out_path,str(i)+'_dissim.npy')
        # np.save(filename,cv2.resize(simMat[i,...], (2048, 1024),interpolation=cv2.INTER_LINEAR))
        # if disc_config['arch']=='arch13':
        #     filename = path.join(out_path,str(i)+'_discsim.npy')
        #     np.save(filename,cv2.resize(pred_descriminator[i,...], (2048, 1024),interpolation=cv2.INTER_NEAREST))
        #     filename = path.join(out_path,str(i)+'_mask.npy')
        #     np.save(filename,cv2.resize(out_mask, (2048, 1024),interpolation=cv2.INTER_NEAREST))
        #     filename = path.join(out_path,str(i)+'_rgb.npy')
        #     np.save(filename,cv2.resize(img_array[i,...], (2048, 1024),interpolation=cv2.INTER_LINEAR))

    filename = path.join(out_path, str(i) + '_dissim.npy')
    np.save(filename, simMat)
    if disc_config['arch'] == 'arch13':
        filename = path.join(out_path, str(i) + '_discsim.npy')
        np.save(filename, pred_descriminator)
        filename = path.join(out_path, str(i) + '_mask.npy')
        np.save(filename, out_mask_array)
        filename = path.join(out_path, str(i) + '_rgb.npy')
        np.save(filename, img_array)
        filename = path.join(out_path, str(i) + '_synth.npy')
        np.save(filename, synth_images)
        filename = path.join(out_path, str(i) + '_segm.npy')
        np.save(filename, segm_array)
        filename = path.join(out_path, str(i) + '_gt.npy')
        np.save(filename, gt_array)
Exemplo n.º 23
0
def main(modelname, net_config, gan_config, disc_config, datasetSem,
         datasetGAN, datasetDisc, starting_weights, input_folder, sets, _run):
    for key in gan_config:
        setattr(a, key, gan_config[key])

    setattr(a, 'EXP_OUT', EXP_OUT)
    setattr(a, 'RUN_id', _run._id)

    # Set up the directories for diagnostics
    output_dir = create_directories(_run._id, ex)

    # load the data for the data description
    data_desc = get_dataset(datasetSem['name'])
    dataFlag = (input_folder is not None)

    if dataFlag:
        # load images in input_folder
        eval_image_paths = load_list_path(input_folder)
    elif sets == "measure":
        data = data_desc(**datasetSem)
        dataset = data.get_measureset(tf_dataset=False)
    else:
        data = data_desc(**datasetSem)
        dataset = data.get_validation_set(tf_dataset=False)

    model = get_model(modelname)
    net = model(data_description=data_desc.get_data_description(),
                output_dir=output_dir,
                **net_config)
    net.import_weights(filepath=starting_weights)
    print("INFO: Imported weights succesfully")

    GAN_graph = tf.Graph()
    with GAN_graph.as_default():
        # create the network
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        GAN_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        # load the dataset class
        dataGAN = get_dataset(datasetGAN['name'])
        # data = data(**datasetGAN)
        cGAN_model = get_model('cGAN')
        modelGAN = cGAN_model(GAN_sess,
                              checkpoint_dir=output_dir,
                              data_desc=dataGAN.get_data_description(),
                              checkpoint=os.path.join(a.EXP_OUT,
                                                      str(a.checkpoint)))
        print("INFO: GAN Imported weights succesfully")

    # tf.reset_default_graph()
    Disc_graph = tf.Graph()
    with Disc_graph.as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        sessD = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        dataD = get_dataset(datasetDisc['name'])
        dataD = dataD(datasetDisc['image_input_dir'], **datasetDisc)
        disc_model = get_model('simDisc')
        modelDiff = disc_model(sess=sessD,
                               checkpoint_dir=output_dir,
                               data=dataD,
                               is_training=False,
                               checkpoint=os.path.join(
                                   a.EXP_OUT, str(disc_config['checkpoint'])))
        print("INFO: Disc Imported weights succesfully")

    ss_run_id = starting_weights.split('/')[-2]
    gan_run_id = str(a.checkpoint)
    folder_name = ss_run_id + "_" + gan_run_id + "_" + str(
        disc_config['checkpoint']) + "_" + sets
    base_output_path = os.path.join(a.file_output_dir, folder_name)
    if not os.path.exists(base_output_path):
        os.makedirs(base_output_path)

    if dataFlag:
        sem_seg_images, rgb_images = predict_output(net,
                                                    output_dir,
                                                    eval_image_paths,
                                                    data_desc,
                                                    dataFlag=dataFlag)
    else:
        sem_seg_images, rgb_images, masks, sem_seg_GT = predict_output(
            net, output_dir, dataset, data, dataFlag=dataFlag)
        matrix_path = os.path.join(base_output_path, "mskMat.npy")
        np.save(matrix_path, masks)
        matrix_path = os.path.join(base_output_path, "gtsMat.npy")
        np.save(matrix_path, sem_seg_GT)
    print("Done with prediction of semantic segmentation")

    with GAN_sess.as_default():
        with GAN_graph.as_default():
            synth_images = modelGAN.transform(a, sem_seg_images)
            print("Done with prediction of GAN")

    with sessD.as_default():
        with Disc_graph.as_default():
            simMat = modelDiff.transform(rgb_images, synth_images,
                                         sem_seg_images)

    matrix_path = os.path.join(base_output_path, "simMat.npy")
    np.save(matrix_path, simMat)

    matrix_path = os.path.join(base_output_path, "rgbMat.npy")
    np.save(matrix_path, rgb_images)

    matrix_path = os.path.join(base_output_path, "synMat.npy")
    np.save(matrix_path, synth_images)

    matrix_path = os.path.join(base_output_path, "semMat.npy")
    np.save(matrix_path, sem_seg_images)
Exemplo n.º 24
0
def model_from_checkpoint(model_checkpoint: str,
                          tta: Optional[str] = None,
                          activation_after="model",
                          model=None,
                          report=True,
                          classifiers=True) -> Tuple[nn.Module, Dict]:
    checkpoint = torch.load(model_checkpoint, map_location="cpu")
    model_name = model or checkpoint["checkpoint_data"]["cmd_args"]["model"]

    score = float(checkpoint["epoch_metrics"]["valid"]["weighted_f1"])
    loc = float(
        checkpoint["epoch_metrics"]["valid"]["weighted_f1/localization_f1"])
    dmg = float(checkpoint["epoch_metrics"]["valid"]["weighted_f1/damage_f1"])
    fold = int(checkpoint["checkpoint_data"]["cmd_args"]["fold"])

    if report:
        print(model_checkpoint, model_name)
        report_checkpoint(checkpoint)

    model = get_model(model_name, pretrained=False, classifiers=classifiers)

    model.load_state_dict(checkpoint["model_state_dict"], strict=False)
    del checkpoint

    if activation_after == "model":
        model = ApplySoftmaxTo(model, OUTPUT_MASK_KEY)

    if tta == "multiscale":
        print(f"Using {tta}")
        model = MultiscaleTTA(model,
                              outputs=[OUTPUT_MASK_KEY],
                              size_offsets=[-256, -128, +128, +256],
                              average=True)

    if tta == "flip":
        print(f"Using {tta}")
        model = HFlipTTA(model, outputs=[OUTPUT_MASK_KEY], average=True)

    if tta == "flipscale":
        print(f"Using {tta}")
        model = HFlipTTA(model, outputs=[OUTPUT_MASK_KEY], average=True)
        model = MultiscaleTTA(model,
                              outputs=[OUTPUT_MASK_KEY],
                              size_offsets=[-256, -128, +128, +256],
                              average=True)

    if tta == "multiscale_d4":
        print(f"Using {tta}")
        model = D4TTA(model, outputs=[OUTPUT_MASK_KEY], average=True)
        model = MultiscaleTTA(model,
                              outputs=[OUTPUT_MASK_KEY],
                              size_offsets=[-256, -128, +128, +256],
                              average=True)

    if activation_after == "tta":
        model = ApplySoftmaxTo(model, OUTPUT_MASK_KEY)

    info = {
        "model": fs.id_from_fname(model_checkpoint),
        "model_name": model_name,
        "fold": fold,
        "score": score,
        "localization": loc,
        "damage": dmg,
    }
    return model, info