예제 #1
0
def main(direc):
    path_plotting = '/hdd/dropbox/Dropbox/grad/results/bn_repr/scatter'
    maybe_makedirs(path_plotting)
    log.debug('Save all plots to %s' % path_plotting)

    experiments = [
        '/hdd/logs_overnight_training/newer/overnight_0403/fullgta5-ema/extract/fullgta5-ema',
        '/hdd/logs_overnight_training/newer/overnight_0403/fulluda-custombatch-lambda-0.005-ema/extract/fulluda-custombatch-lambda-0.005-ema'
    ]

    embeddings_and_labels = read_embeddings(experiments, subsample=3000)

    plot_embeddings(embeddings_and_labels,
                    experiment_names=['GTA5-single', 'UADA'],
                    path_plotting=path_plotting)
def main(direc):
    path = direc
    path_to_problem_def = '/home/mps/Documents/rob/training22/problem_gta5_19.json'
    path_plotting = join(path, 'plots/')
    maybe_makedirs(path_plotting)
    log.debug('Save all plots to %s' % path_plotting)

    segm_label_names, segm_colors = parse_problem_def(path_to_problem_def)

    all_experiments = find_all_experiments(path)
    for i, experiment_name in enumerate(all_experiments):
        log.debug('\n\nExperiment %15s %5i/%5i' % (experiment_name, i, len(all_experiments)))
        embeddings, decisions, domain_labels, domain_label_names, segm_labels = \
            read_embeddings(path, experiment_name)

        acc = train_knn(embeddings, domain_labels)
        print(acc)

        retrievals = retrieve_plot(embeddings, domain_labels, domain_label_names)
        np.savetxt(fname=join(path, 'retrievals.csv'), X=retrievals, delimiter=',')
예제 #3
0
def write_settings_to_file(settings, log_dir_provided=None):
    """
    Writes all the settings to a file 'settings.txt' in the log_dir
    :param settings:
    :param log_dir_provided
    :return:
    """

    log_dir = log_dir_provided if log_dir_provided else settings.log_dir
    # vars(args).items() returns (key,value) tuples from args.__dict__
    # and sorted uses first element of tuples to sort
    settings_dict = collections.OrderedDict(sorted(vars(settings).items()))

    maybe_makedirs(log_dir, force_dir=True)

    # write configuration for future reference
    settings_filename = join(log_dir, 'settings.txt')
    # assert not exists(settings_filename), (f"Previous settings.txt found in "
    #                                        f"{log_dir}. Rename it manually and restart training.")
    with open(settings_filename, 'w') as f:
        for k, v in enumerate(settings_dict):
            print(f"{k:2} : {v} : {settings_dict[v]}", file=f)
예제 #4
0
def main(argv):
    t_start = time.time()
    # Parse the arguments
    ssargs = SemanticSegmentationArguments()
    ssargs.add_evaluate_arguments()
    args = ssargs.parse_args(argv)

    _add_extra_args(args)

    # vars(args).items() returns (key,value) tuples from args.__dict__
    # and sorted uses first element of tuples to sort
    args_dict = collections.OrderedDict(sorted(vars(args).items()))
    params = args

    # Set up the logger
    logger = setup_logger(args.log_dir, 'eval')
    logger.warning('Hello evaluation')
    logger.debug('\n'.join(('%30s : %s' % (key, value) for key, value in args_dict.items())))

    # Prepare the labels to be used for printing
    labels = params.evaluation_problem_def['cids2labels']
    void_exists = -1 in params.evaluation_problem_def['lids2cids']
    labels = labels[:-1] if void_exists else labels

    eval_fn = eval_fn_original

    system = SemanticSegmentation({'eval': eval_fn}, model_fn, params)
    all_metrics = system.evaluate()

    # Print and save the confusion matrix
    output_filename = params.confusion_matrix_filename if params.confusion_matrix_filename \
        else os.path.join(system.eval_res_dir, 'confusion_matrix.txt')
    maybe_makedirs(output_filename)
    with open(output_filename, 'w') as f:
        print_all_metrics(all_metrics, labels, printfile=f)
        print('print confusion matrix in %s' % output_filename)
    print(f'Took {time.time() - t_start} seconds to run')
예제 #5
0
def main(dataset, tvt_set, output_dims, reshape, base_path):

    assert tvt_set in ['train', 'val', 'test']

    print(
        f"work on dataset {dataset} and split {tvt_set} and reshape {reshape} to {output_dims}"
    )
    if dataset == 'cityscapes':
        train_path = join(base_path, f'leftImg8bit/{tvt_set}/*/*.png')
        train_image_fnames = glob.glob(train_path)
        print('debug:train_image_fnames:', len(train_image_fnames))
        train_label_fnames = [
            ef.replace('leftImg8bit.png', 'gtFine_labelIds.png')
            for ef in train_image_fnames
        ]
        train_label_fnames = [
            lf.replace('leftImg8bit/', 'gtFine/') for lf in train_label_fnames
        ]
        print('debug:', train_image_fnames[0], train_label_fnames[0])

        image_fnames = train_image_fnames
        label_fnames = train_label_fnames

        tfrecord_fname = join(base_path,
                              f'new_tfrecords/{tvt_set}Fine.tfrecords')
    elif dataset == 'camvid':
        train_path = join(base_path, 'LabeledApproved_full')
        train_label_fnames = glob.glob(train_path + '/*.png')
        print('debug:train_image_fnames:', len(train_label_fnames))
        train_image_fnames = [
            ef.replace('_L', '') for ef in train_label_fnames
        ]
        train_image_fnames = [
            ef.replace('LabeledApproved_full', '701_StillsRaw_full')
            for ef in train_image_fnames
        ]
        print('debug:', train_image_fnames[0], train_label_fnames[0])

        image_fnames = train_image_fnames
        label_fnames = train_label_fnames

        tfrecord_fname = join(base_path, 'tfrecords/trainFine.tfrecords')
    elif dataset == 'gta5':
        # path_out = '/home/mps/Documents/rob/datasets/gta5'
        split = loadmat('split.mat')[tvt_set + 'Ids']
        split = np.squeeze(split).tolist()

        if tvt_set == 'test':
            split = split[:-4]

        image_fnames = glob.glob(join(base_path, 'images/*.png'))
        label_fnames = [ef.replace('images', 'labels') for ef in image_fnames]

        image_fnames = [image_fnames[i] for i in split]
        label_fnames = [label_fnames[i] for i in split]

        tfrecord_fname = join(base_path,
                              f'new_tfrecords/{tvt_set}Fine.tfrecords')

    elif dataset == 'mapillary':
        print(f'start on Mapillary{tvt_set}')
        if tvt_set == 'val':
            return
        tvt_folder = 'training' if tvt_set == 'train' else 'validation'
        image_fnames = glob.glob(join(base_path, tvt_folder, 'images',
                                      '*.jpg'))
        label_fnames = [
            imf.replace('images', 'labels').replace('.jpg', '.png')
            for imf in image_fnames
        ]

        tfrecord_fname = join(base_path,
                              f'new_tfrecords/{tvt_set}Fine.tfrecords')

    elif dataset == 'apollo':
        if tvt_set == 'train':
            return

        image_fnames, label_fnames = get_apollo_im_label_fnames(base_path)

        tfrecord_fname = join(base_path,
                              f'new_tfrecords/{tvt_set}Fine.tfrecords')
        maybe_makedirs(tfrecord_fname)

    elif dataset == 'wilddash':
        image_fnames = glob.glob(join(base_path, 'wd_val_01/*_100000.png'))
        label_fnames = [
            im_fname.replace('.png', '_labelIds.png')
            for im_fname in image_fnames
        ]

        tfrecord_fname = join(base_path, 'valFine.tfrecords')
    elif dataset == 'bdd':
        image_fnames = glob.glob(join(base_path, 'images/val/*.jpg'))
        label_fnames = [
            im_fname.replace('images',
                             'labels').replace('.jpg', '_train_id.png')
            for im_fname in image_fnames
        ]

        for x, y in zip(image_fnames, label_fnames):
            print(x)
            print(y)
            print('\n')
        tfrecord_fname = join(base_path, 'tfrecords_384/valFine.tfrecords')
    else:
        assert False

    with tf.python_io.TFRecordWriter(tfrecord_fname) as tfrecord_writer:
        packet = {
            'image_fnames': image_fnames,
            'label_fnames': label_fnames,
            'format': 'png',
            'dims': output_dims
        }
        add_to_tfrecords(packet, tfrecord_writer, reshape=reshape)
예제 #6
0
def plot_comparison_table(filenames,
                          plot_keys,
                          coloring_types,
                          out_dir,
                          dims=(768, 384)):
    """

    :param filenames: dict of filenames. Key is the label type and value is a list of path to labels
    :param plot_keys: which keys of the dict filenames to plot
    :param coloring_types: for each plot_key this should mention what coloring type is used
    :param out_dir: output dir to save the comparison figure. if empty string, then display interactively
    :param dims: dimensions to resize all images and labels too. Note that in format (WIDTH, HEIGHT)
    :return:
    """
    # Some basic assertions to start with
    assert 'Image' in filenames, f'Your filenames dict must at least contain Image'
    for color_type in coloring_types:
        assert color_type in ['lid', 'cid', 'lid_map', 'lid_apo']
    assert len(filenames['Image']) > 0
    assert len(plot_keys) == len(coloring_types)
    if dims[0] < dims[1]:
        print(
            f'WARNING: the dims are interpreted as width={dims[0]} and height={dims[1]}'
        )
    assert out_dir != ''
    out_dir_pred = join(out_dir, 'preds/')
    maybe_makedirs(out_dir_pred, force_dir=True)
    print(
        f'we have filenames for keys {filenames.keys()} and we are plotting {plot_keys}'
    )

    # Set up constants for plotting
    num_col = len(plot_keys) + 1  # +1 for the images column
    num_row = 5

    # Make a pyplot figure
    f, axarr = plt.subplots(num_row, num_col)

    count_row = 0  # Count in which row we are plotting
    for filename in filenames['Image']:
        # Code is structured as follows:
        # -1 Loop over image filenames
        # -2 extract the image code
        # -3 make sure that all other keys also have an image with that code
        # -4 only if all other filenames are found, then plot the row
        im_code = basename(filename).replace('_leftImg8bit.png',
                                             '').replace('.jpg', '')

        def find_other_filenames():
            """
            Find all the filenames whose code appears in all plot_keys
            :return:
            """
            comparison_fnames = []
            for key in plot_keys:
                matching_fnames = list(
                    filter(lambda filepath: im_code in filepath,
                           filenames[key]))

                if len(matching_fnames) == 0:
                    print(
                        f'Image code {im_code} not found in list with key {key}'
                    )
                    return None
                elif len(matching_fnames) == 1:
                    comparison_fnames.append(matching_fnames[0])
                else:
                    assert False, f'Found multiple hits on code {im_code}'
            return comparison_fnames

        comparison_files = find_other_filenames()
        if comparison_files is None:
            # Apparently, the image code was not found in all methods
            continue
        else:
            count_row += 1  # Only increment the count_row if we actually found a match in all plot_keys
        if count_row > num_row:
            break

        input_image = Image.open(filename).resize(dims)
        axarr[count_row - 1, 0].imshow(input_image)
        input_image.save(join(out_dir_pred, f'{count_row}_image.png'))
        del input_image

        for n_col, (fname_comparison, color_type) in enumerate(
                zip(comparison_files, coloring_types)):
            label = Image.open(fname_comparison).resize(dims)

            # Convert the labels according to their color type and palette
            if color_type == 'lid':
                label_array = np.take(palette_lid, np.array(label), axis=0)
            elif color_type == 'lid_map':
                lids2cids_map = [
                    -1, -1, -1, 4, -1, -1, 3, -1, -1, -1, -1, -1, -1, 0, -1, 1,
                    -1, 2, -1, 11, 12, 12, 12, -1, -1, -1, -1, 10, -1, 9, 8,
                    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5,
                    -1, 5, 6, -1, 7, -1, 18, -1, 15, 13, -1, 17, 16, -1, -1,
                    14, -1, -1, -1, -1
                ]
                label_cid = np.take(lids2cids_map, np.array(label), axis=0)
                label_array = np.take(palette_cid, label_cid, axis=0)
            elif color_type == 'cid':
                label_array = np.take(palette_cid, np.array(label), axis=0)
            elif color_type == 'lid_apo':
                with open(
                        '/home/mps/Documents/rob/datasets/problem_uda_apollo.json'
                ) as fp:
                    lids2cids_apo = json.load(fp)['lids2cids']
                label_cid = np.take(lids2cids_apo, np.array(label), axis=0)
                label_array = np.take(palette_cid, label_cid, axis=0)
            else:
                assert False, 'Color type not recognised'

            axarr[count_row - 1, n_col + 1].imshow(label_array)
            Image.fromarray((label_array * 255).astype(np.uint8)).save(
                join(out_dir_pred, f'{count_row}_{n_col}.png'))

    # All the pyplot magic :)
    col_names = ['Image'] + plot_keys
    for n_row, axrow in enumerate(axarr):
        for n_col, ax in enumerate(axrow):
            if n_row == 0:
                ax.set_title(col_names[n_col])

            ax.get_xaxis().set_ticklabels([])
            ax.get_yaxis().set_ticklabels([])

            ax.tick_params(
                which='both',  # both major and minor ticks are affected
                bottom='off',  # ticks along the bottom edge are off
                top='off',  # ticks along the top edge are off
                left='off',
                right='off',
                labelbottom='off')  # labels along the bottom edge are off
    plt.tight_layout()
    plt.subplots_adjust(wspace=0, hspace=0)

    # Either show image or save it
    if out_dir is '':
        plt.show()
    else:
        print(f'Start saving to {out_dir}')
        f.savefig(join(out_dir, 'comparison.pdf'),
                  dpi=1000,
                  format='pdf',
                  transparent=True)
예제 #7
0
                              ['lid', 'lid', 'lid'],
                              out_dir=out_dir)
    elif dataset == 'mapillary':
        filenames['Image'] = glob(
            '/hdd/datasets/mapillary/validation/images/*.jpg')
        filenames['Label'] = list(
            map(lambda x: im2lbl_path(x, 'mapillary'), filenames['Image']))
        filenames['Source-only'] = glob(
            '/hdd/logs_overnight_training/newer/overnight_0403/fullgta5-ema/predictions_mapillary/*.png'
        )
        filenames['UADA'] = glob(
            '/hdd/logs_overnight_training/newer/overnight_0403/fulluda-custombatch-lambda-0.005-ema/predictions_mapillary/*.png'
        )

        out_dir = '/hdd/dropbox/Dropbox/grad/results/comparison_figures/mapillary_unseen'
        maybe_makedirs(out_dir, force_dir=True)

        plot_comparison_table(filenames, ['Label', 'Source-only', 'UADA'],
                              ['lid_map', 'cid', 'cid'],
                              out_dir=out_dir)

    elif dataset == 'apollo':
        import random
        random.seed(124)
        image_fnames, label_fnames = get_apollo_im_label_fnames(
            '/hdd/datasets/apolloscape/original')
        filenames['Image'] = random.sample(image_fnames, 200)
        filenames['Label'] = label_fnames
        filenames['Source-only'] = glob(
            '/hdd/logs_overnight_training/newer/overnight_0509/fullgta-1/predict_apollo/*.png'
        )
예제 #8
0
def main(basedir):
    out_dir = join(basedir, 'movie')
    maybe_makedirs(out_dir, force_dir=True)

    predict_dirs = sorted([x[0] for x in os.walk(basedir) if 'predictions' in x[0]])
    num_preds = len(predict_dirs)

    image_fnames = glob.glob(join(basedir, '*.png'))
    image_codedict = {code: filename for code, filename in get_im_code(image_fnames)}

    num_fnames = len(image_fnames)

    predictions_codedicts = [{code: filename for code, filename in get_im_code(glob.glob(join(predict_dir, '*.png')))}
                             for predict_dir in predict_dirs]

    all_fnames = []
    for code, im_filename in image_codedict.items():
        if all((code in codedict for codedict in predictions_codedicts)):
            all_fnames.append([code, im_filename] + [codedict[code] for codedict in predictions_codedicts])

    for num_row, fname_row in enumerate(all_fnames):
        frame_num = fname_row[0][-5:]
        if not frame_num.isdigit():
            frame_num = 'UNK'
        if 'stuttgart_00' in basedir and int(frame_num) > 400:
            continue

        plt.figure()
        f, axarr = plt.subplots(2, 2, figsize=(12, 8))
        f.subplots_adjust(wspace=0, hspace=0)
        axarr[0, 1].imshow(Image.open(fname_row[1]).resize(dims))
        axarr[0, 1].set_title('Input image')
        axarr[0, 1].legend(handles=custom_handles, loc=7, bbox_to_anchor=(0.9, -1.43), ncol=10, fontsize='small', markerscale=5)


        base_font = 14
        axarr[0, 0].imshow(zero_image)
        axarr[0, 0].text(10, 50, 'Domain Agnostic Normalization Layer', fontdict={'family': 'serif', 'size': base_font})
        axarr[0, 0].text(10, 70, 'for Unsupervised Adversarial Domain adaptation', fontdict={'family': 'serif', 'size': base_font})
        if False:
            axarr[0,0].text(10, 110, 'R. Romijnders, P. Meletis, G. Dubbelman', fontdict={'family': 'serif', 'size': int(0.8*base_font)})
            axarr[0, 0].text(10, 130, 'Mobile Perception Systems (SPS-VCA)', fontdict={'family': 'serif', 'size': int(0.8*base_font)})
            axarr[0, 0].text(10, 150, 'TU/e Eindhoven', fontdict={'family': 'serif', 'size': int(0.8*base_font)})
        else:
            axarr[0, 0].text(10, 110, 'Info removed for blind review', fontdict={'family': 'serif', 'size': int(0.8 * base_font)})
        axarr[0, 0].text(10, 170, 'June, 2018', fontdict={'family': 'serif', 'size': int(0.8*base_font)})
        axarr[0, 0].text(300, 190, f'frame {frame_num}', fontdict={'family': 'serif', 'size': int(0.7*base_font)})

        for num_pred in range(num_preds):
            label = np.array(Image.open(fname_row[num_pred+2]).resize(dims))
            # Mask out the Mercedes
            label = np.clip(label + mercedes_mask, 0, 20)
            label_array = np.take(palette_cid, label, axis=0)
            axarr[1, num_pred].imshow(label_array)

        for n_row, axrow in enumerate(axarr):
            for n_col, ax in enumerate(axrow):
                ax.get_xaxis().set_ticklabels([])
                ax.get_yaxis().set_ticklabels([])
                if n_row == 1:
                    ax.set_title(col_names[n_col])

                ax.tick_params(
                    which='both',  # both major and minor ticks are affected
                    bottom='off',  # ticks along the bottom edge are off
                    top='off',  # ticks along the top edge are off
                    left='off',
                    right='off',
                    labelbottom='off')  # labels along the bottom edge are off
        plt.tight_layout()
        f.savefig(join(out_dir, f'{fname_row[0]}.png'), format='png')
        plt.close('all')
        print(f'{num_row:5.0f} out of {num_fnames:6.0f}')
예제 #9
0
    def evaluate(self):
        log.debug(f"\nWriting results in {self._settings.eval_res_dir}.\n")
        maybe_makedirs(self._settings.eval_res_dir)

        write_settings_to_file(self._settings, self._settings.eval_res_dir)

        if self._settings.enable_xla:
            session_config = tf.ConfigProto()
            session_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
        else:
            session_config = None

        runconfig = tf.estimator.RunConfig(model_dir=self._settings.log_dir,
                                           session_config=session_config,
                                           keep_checkpoint_max=2)

        # create a local estimator
        self._create_estimator(runconfig)

        # get labels needed for online printing
        labels = self._settings.evaluation_problem_def['cids2labels']
        void_exists = -1 in self._settings.evaluation_problem_def['lids2cids']
        labels = labels[:-1] if void_exists else labels

        def yield_all_metrics():
            for i, checkpoint_path in enumerate(
                    self.get_all_model_checkpoint_paths()):
                log.debug('Checkpoint %i/%i' %
                          (i, self._settings.eval_all_ckpts))
                # metrics contains only confusion matrix for now (and loss and global step)
                metrics = self._estimator.evaluate(
                    input_fn=self._input_fns['eval'],
                    steps=self._settings.num_eval_steps,
                    # if None latest in model_dir will be used
                    checkpoint_path=checkpoint_path,
                    name=split(self._settings.eval_res_dir)[1][-2:])

                # deal with void in evaluation lids2cids
                if -1 in self._settings.evaluation_problem_def['lids2cids']:
                    assert set(metrics.keys()) == {
                        'global_step', 'loss', 'confusion_matrix'
                    }, ('internal error: only confusion matrix metric is supported for mapping to '
                        'a new problem definition for now. Change to training problem definition.'
                        )
                    metrics['confusion_matrix'] = metrics[
                        'confusion_matrix'][:-1, :-1]

                # transform to different evaluation problem definition
                # if self._settings.training_problem_def != self._settings.evaluation_problem_def:
                #     metrics = self._map_metrics_to_evaluation_problem_def(metrics)
                #
                #     # deal with void in training_cids2evaluation_cids
                #     if -1 in self._settings.evaluation_problem_def['training_cids2evaluation_cids']:
                #         assert set(metrics.keys()) == {'global_step', 'loss', 'confusion_matrix'}, (
                #             'internal error: only confusion matrix metric is supported for mapping to '
                #             'a new problem definition for now. Change to training problem definition.')
                #         metrics['confusion_matrix'] = metrics['confusion_matrix'][:-1, :-1]

                # online print the summary of metrics to terminal
                print_metrics_from_confusion_matrix(
                    metrics['confusion_matrix'], labels)
                yield metrics

        return list(yield_all_metrics())