Beispiel #1
0
    def __init__(self, data={}, n_gpus=1, data_shape=None, **config):
        self.datasets = data
        self.data_shape = data_shape
        self.n_gpus = n_gpus
        self.graph = tf.get_default_graph()
        self.name = self.__class__.__name__.lower()  # get child name

        # Update config
        self.config = dict_update(self.default_config,
                                  getattr(self, 'default_config', {}))
        self.config = dict_update(self.config, config)

        required = getattr(self, 'required_config_keys', [])
        if self.datasets:
            required += self.required_baseconfig
        for r in required:
            assert r in self.config, 'Required configuration entry: \'{}\''.format(
                r)
        assert set(self.datasets) <= self.dataset_names, \
            'Unknown dataset name: {}'.format(set(self.datasets)-self.dataset_names)
        assert n_gpus > 0, 'TODO: CPU-only training is currently not supported.'

        if data_shape is None:
            self.data_shape = {
                i: s['shape']
                for i, s in self.input_spec.items()
            }

        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            self._build_graph()
    def _model(self, inputs, mode, **config):
        assert mode != Mode.TRAIN

        config_path = Path(DATA_PATH, 'weights', config['config']).as_posix()
        with open(config_path, 'rb') as f:
            original_config = pickle.load(f).__dict__
        config = tools.dict_update(original_config, config)
        namespace = Namespace(**config)

        image = inputs['image'] / 255.0
        ops = build_networks(namespace, image, False)
        ops = {k: tf.expand_dims(v, axis=0) for k, v in ops.items()}
        ret = {
            'keypoints': ops['kpts'],
            'scores': ops['scores'],
            'descriptors': ops['feats']
        }
        return ret
    def __init__(self, **config):
        # Update config
        self.config = dict_update(getattr(self, 'default_config', {}), config)

        self.dataset = self._init_dataset(**self.config)
        self.split_names = getattr(self, 'split_names',
                                   self.default_split_names)

        self.tf_splits = {}
        self.tf_it = {}
        self.tf_next = {}
        with tf.device('/cpu:0'):
            for n in self.split_names:
                self.tf_splits[n] = self._get_data(self.dataset, n,
                                                   **self.config)
                prefetched = self.tf_splits[n].prefetch(
                    self.config.get('prefetch', 1))
                self.tf_it[n] = prefetched.make_initializable_iterator()
                self.tf_next[n] = self.tf_it[n].get_next()
        self.end_set = tf.errors.OutOfRangeError

        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=sess_config)
def homography_adaptation(image, net, config):
    """Perfoms homography adaptation.
    Inference using multiple random warped patches of the same input image for robust
    predictions.
    Arguments:
        image: A `Tensor` with shape `[N, H, W, 1]`.
        net: A function that takes an image as input, performs inference, and outputs the
            prediction dictionary.
        config: A configuration dictionary containing optional entries such as the number
            of sampled homographies `'num'`, the aggregation method `'aggregation'`.
    Returns:
        A dictionary which contains the aggregated detection probabilities.
    """

    probs = net(image)['prob']
    counts = tf.ones_like(probs)
    images = image

    probs = tf.expand_dims(probs, axis=-1)
    counts = tf.expand_dims(counts, axis=-1)
    images = tf.expand_dims(images, axis=-1)

    shape = tf.shape(image)[1:3]
    config = dict_update(homography_adaptation_default_config, config)

    def step(i, probs, counts, images):
        # Sample image patch
        H = sample_homography(shape, **config['homographies'])
        H_inv = invert_homography(H)
        warped = H_transform(image, H, interpolation='BILINEAR')
        count = H_transform(tf.expand_dims(tf.ones(tf.shape(image)[:3]), -1),
                            H_inv,
                            interpolation='NEAREST')[..., 0]

        # Predict detection probabilities
        warped_shape = tf.to_int32(
            tf.to_float(shape) * config['homographies']['patch_ratio'])
        input_warped = tf.image.resize_images(warped, warped_shape)
        prob = net(input_warped)['prob']
        prob = tf.image.resize_images(tf.expand_dims(prob, axis=-1),
                                      shape)[..., 0]
        prob_proj = H_transform(tf.expand_dims(prob, -1),
                                H_inv,
                                interpolation='BILINEAR')[..., 0]

        probs = tf.concat([probs, tf.expand_dims(prob_proj, -1)], axis=-1)
        counts = tf.concat([counts, tf.expand_dims(count, -1)], axis=-1)
        images = tf.concat([images, tf.expand_dims(warped, -1)], axis=-1)
        return i + 1, probs, counts, images

    _, probs, counts, images = tf.while_loop(
        lambda i, p, c, im: tf.less(i, config['num'] - 1),
        step, [0, probs, counts, images],
        parallel_iterations=1,
        back_prop=False,
        shape_invariants=[
            tf.TensorShape([]),
            tf.TensorShape([None, None, None, None]),
            tf.TensorShape([None, None, None, None]),
            tf.TensorShape([None, None, None, 1, None])
        ])

    counts = tf.reduce_sum(counts, axis=-1)
    max_prob = tf.reduce_max(probs, axis=-1)
    mean_prob = tf.reduce_sum(probs, axis=-1) / counts

    if config['aggregation'] == 'max':
        prob = max_prob
    elif config['aggregation'] == 'sum':
        prob = mean_prob
    else:
        raise ValueError('Unkown aggregation method: {}'.format(
            config['aggregation']))

    if config['filter_counts']:
        prob = tf.where(tf.greater_equal(counts, config['filter_counts']),
                        prob, tf.zeros_like(prob))

    return {
        'prob': prob,
        'counts': counts,
        'mean_prob': mean_prob,
        'input_images': images,
        'H_probs': probs
    }  # debug
    with open(args.config, 'r') as f:
        config = yaml.load(f)
    keys = '*' if args.keys == '*' else args.keys.split(',')

    if args.as_dataset:
        base_dir = Path(DATA_PATH, export_name)
    else:
        base_dir = Path(EXPER_PATH, 'exports')
        base_dir = Path(base_dir, ((exper_name + '/') if exper_name else '') +
                        export_name)
    base_dir.mkdir(parents=True, exist_ok=True)

    if exper_name:
        # Update only the model config (not the dataset)
        with open(Path(EXPER_PATH, exper_name, 'config.yaml'), 'r') as f:
            config['model'] = tools.dict_update(
                yaml.load(f)['model'], config.get('model', {}))
        checkpoint_path = Path(EXPER_PATH, exper_name)
        if config.get('weights', None):
            checkpoint_path = Path(checkpoint_path, config['weights'])
    else:
        if config.get('weights', None):
            checkpoint_path = Path(DATA_PATH, 'weights', config['weights'])
        else:
            checkpoint_path = None
            logging.info('No weights provided.')
    logging.info(f'Starting export with configuration:\n{pformat(config)}')

    with get_model(config['model']['name'])(data_shape={
            'image': [None, None, None, config['model']['image_channels']]
    },
                                            **config['model']) as net: