Beispiel #1
0
    def init_config(self, config_file: str) -> None:
        # Load configuration file from disk
        config_file_path = Path(config_file)
        log.check(
            Path(config_file).exists(),
            message=f'Config file "{config_file_path}" doesn\'t exits',
        )
        with config_file_path.open() as config_file_fh:
            try:
                config_raw = yaml.safe_load(config_file_fh)
            except yaml.YAMLError as exc:
                log.error(
                    f'Configuration file "{config_file_path}" parse error\n{exc}'
                )

        # Set the config file we loaded
        self.config.config_file = str(config_file_path)
        # Load config from file
        for key, val in config_raw.items():
            # Don't load non_file_fields
            if key in self.config.non_file_fields:
                continue
            try:
                getattr(self.config, key)
            except AttributeError:
                # Warn when rubbish config is loaded
                log.warn(f'Loaded unknown configuration "{key}"')
            setattr(self.config, key, val)
        print("Finished Loading Config")
        return
Beispiel #2
0
    def create(cls, session, path=None, name=None, size=None):
        """
        Factory method for creating an OpticalFlowModel.
        USAGE:

        This will create the 'latest' model:
        >>> model = OpticalFlowModel.create(session)

        This will create a model for a given name:
        >>> model = OpticalFlowModel.create(session, name='pwc_512x512')

        This will create a model for a specific local path:
        >>> model = OpticalFlowModel.create(session,
                path='/home/whoami/mymodels/pwc_512x512/')
        """
        log.check(
            path is None or name is None,
            message=('Do not specify both the |path| and |name|. '
                     'It is succifient to specify just one of the two. '))
        if path is None:
            if name is None:
                path = ModelPathRegistry().get_latest()
            else:
                path = ModelPathRegistry().get(name)
        # TODO(vasiliy): size must be estimated in some kind of smart
        # way, either by this factory method (e.g. using an extra options
        # arg), or by the model itself.
        # Or the TODO at the top of the file should be addressed.
        if size is None:
            log.warning('Size is hardcoded to a default value! Until the '
                        'associated TODO is addressed, your results will not '
                        'be great.')
            size = (256, 256)
        return cls(session, size, path)
Beispiel #3
0
 def get_raw_flow(self, level):
     """
     Returns an estimate of flow without context network applied to it.
     Larger levels correspond to finer-resolution estimates.
     """
     log.check(level >= 0 and level < len(self.estimator_net))
     return self.estimator_net[level].get_flow()
Beispiel #4
0
def _maybe_add_batch_norm_or_activation_fn(entree,
                                           use_batch_norm=False,
                                           is_training=None,
                                           activation_fn=tf.nn.leaky_relu,
                                           reuse=None,
                                           scope=None,
                                           bn_epsilon=1e-2,
                                           bn_decay=0.9):
    """
    Either returns the provided activation function (e.g. ReLU) applied to the
    input, or applies batch norm and activation function and returns it.
    """
    if use_batch_norm:
        log.check(is_training is not None,
                  ('You specified that batch_norm should be used, but '
                   '"is_training" parameter was not set!'))
        return batch_norm(entree,
                          epsilon=bn_epsilon,
                          decay=bn_decay,
                          activation_fn=activation_fn,
                          is_training=is_training,
                          reuse=reuse,
                          scope=scope,
                          updates_collections=None)
    else:
        return activation_fn(entree)
Beispiel #5
0
    def __init__(self, session, size, model_path):
        self.session = session
        self.height = size[1]
        self.width = size[0]
        self.x1 = tf.placeholder(dtype=tf.uint8,
                                 shape=[None, self.height, self.width, 3],
                                 name='x1')
        self.x2 = tf.placeholder(dtype=tf.uint8,
                                 shape=[None, self.height, self.width, 3],
                                 name='x2')
        # TODO: figure out how to do this in a nicer way. Some network were
        # trained without batch_norm -- in those cases, is_training is not
        # needed.
        self.is_training = None  #tf.placeholder(dtype=tf.bool, name='is_training')

        model_options_filename = os.path.join(model_path, 'checkpoint',
                                              'options.pkl')
        model_checkpoint_filename = os.path.join(model_path, 'checkpoint',
                                                 'model.ckpt')
        log.check(os.path.exists(model_options_filename),
                  "Did not find '{}'".format(model_options_filename))
        log.check(
            os.path.exists(os.path.dirname(model_checkpoint_filename)),
            "Did not find '{}'".format(
                os.path.dirname(model_checkpoint_filename)))

        pwc_options = PWCNetOptions.load(model_options_filename,
                                         self.is_training)
        self.net = PWCNet(x1=self.x1, x2=self.x2, options=pwc_options)
        self.output_flow_op = self.net.get_output_flow()
        tf.train.Saver().restore(self.session, model_checkpoint_filename)
 def next(self):
     log.check(not self._done,
               "Should check 'done()' prior to calling this function!")
     image = cv2.imread(self.filenames[0])
     image = cv2.resize(image, (int(
         image.shape[1] * self._scale), int(image.shape[0] * self._scale)))
     self.filenames = self.filenames[1:]
     self._done = len(self.filenames) == 0
     return image
Beispiel #7
0
 def _verify_shapes(c1, c2, uv):
     log.check_eq(c1.shape[1:], c2.shape[1:],\
         "Shape of the two feature maps must match!")
     log.check_eq(len(set([c1.dtype, c2.dtype, uv.dtype])), 1, \
         "All datatypes must match")
     log.check(all([4 == len(nd) for nd in [c1.shape, c2.shape, uv.shape]]), \
             "Must be passed as NxHxWxC")
     log.check_eq(uv.shape[3], 2, "Did not pass flow where expected flow?")
     log.check_eq(2 * int(uv.shape[1]), c1.shape[1])
     log.check_eq(2 * int(uv.shape[2]), c1.shape[2])
def main():
    args = parse_args()
    # Load a pair of images or video.
    if args.video is not None:
        feeder = VideoFeeder(args.video, scale=args.input_scale)
    else:
        feeder = ImageFeeder([args.image1, args.image2],
                             scale=args.input_scale)

    # Peek at the first image (this is not necessary, but useful to figure out
    # the size.
    image1 = feeder.next()
    log.check(image1 is not None,
              "Could not get any images from specified input!")

    # Normally one can/should create a network without trying to infer the size
    # using image dimensions.
    if args.size is None:
        args.size = guess_network_size(width=image1.shape[1],
                                       height=image1.shape[0])

    session_config = tf.ConfigProto()
    session_config.gpu_options.allow_growth = True
    sess = tf.Session(config=session_config)

    # Expensive: usually should do only once for the entire video sequence, or
    # for a collection of images.
    print("Loading a model from {}".format(args.model))
    flow_solver = OpticalFlowModel.create(session=sess,
                                          name=args.model,
                                          size=args.size)

    while not feeder.done():
        image2 = feeder.next()

        t1 = time.time()
        flow = flow_solver.run(image1, image2)
        t2 = time.time()
        print("Elapsed {} secs".format(t2 - t1))

        uv = compute_flow_color(u=flow[:, :, 0], v=flow[:, :, 1])
        if args.visualize:
            cv2.imshow("images", np.concatenate([image1, image2], axis=1))
            cv2.imshow("image", uv)
            key = cv2.waitKey(args.wait_time)
            if key == 27:  # escape to exit right away.
                exit(1)

        if args.output_filename is not None:
            cv2.imwrite(args.output_filename, uv)
            print("Wrote output to {}".format(args.output_filename))

        image1 = image2
Beispiel #9
0
def _endpoint_huber_loss_at_scale(prediction, groundtruth, weights=None):
    log.check(weights is None)
    HUBER_DELTA = 0.25
    if not prediction.shape == groundtruth.shape:
        sz = [prediction.shape[1], prediction.shape[2]]
        groundtruth_scaled = resample_flow(groundtruth, prediction.shape)
        return tf.reduce_mean(
            tf.losses.huber_loss(prediction,
                                 groundtruth_scaled,
                                 delta=HUBER_DELTA))
    else:
        return tf.reduce_mean(
            tf.losses.huber_loss(prediction, groundtruth, delta=HUBER_DELTA))
    def next(self):
        log.check(not self._done,
                  "Should check 'done()' prior to calling this function!")

        retval, image = self.cap.retrieve()
        image = cv2.resize(image, (int(
            image.shape[1] * self._scale), int(image.shape[0] * self._scale)))
        log.check(
            retval,
            "Failed to grab error from video. This is a logical error in VideoFeeder"
        )
        self._done = not self.cap.grab()
        return image
Beispiel #11
0
 def make_variable_dict(mappings, aliases):
     builtin_vars = dict()
     for k, v in mappings:
         glog.check(k not in builtin_vars, "Duplicated builtin variable %s" % k)
         builtin_vars[k] = v
     alias_vars = dict()
     for new, old in aliases:
         if new in builtin_vars:
             glog.warning("Aliasing builtin variable %s to %s" % (new, old))
         glog.check(new not in alias_vars, "Duplicated alias for %s" % new)
         alias_vars[new] = builtin_vars[old]
     builtin_vars.update(alias_vars)
     return builtin_vars
    def setup_from_vertice(self,
                           vertices,
                           time_segment,
                           derivative_to_optimise,
                           max_continuity,
                           weights=np.ones(10)):
        """
        Setup polynomial trajectory generation problem using input vertice.

        :param vertice: input vertice, set of vertex node containing constraints.
        :type vertice: vertice class
        :param time_segment: Target time stamp for each vertex.
        :type time_segment: list, np.array
        :param derivative_to_optimise: Max derivative to take into account.
        :type derivative_to_optimise: int
        :param max_continuity: Derivative degree until which continuity will be ensure.
        :type max_continuity: int
        :param weights: Weight of each derivative.
        :type weights: list, np.array 

        """
        log.check(
            derivative_to_optimise >= symbols.POSITION
            and derivative_to_optimise <= self.max_derivative_to_optimize)

        self.vertices = vertices
        self.time_segment = time_segment

        self.Nv = vertices.size()  # Nb of vertex
        self.Ns = self.Nv - 1  # Nb of segment

        self.derivative_to_optimize = derivative_to_optimise
        self.nb_variables = self.Ns * (self.N + 1)
        self.max_continuity = max_continuity

        self.polyCoeffSet = np.zeros((self.dim, self.N + 1, self.Ns))
        self.set_weight_mask(weights)
        self.Q = self._setup_cost_matrix(derivative_to_optimise)
        self.A, self.b = self._setup_constraints_matrix(max_continuity)

        self.opts_settings = {
            'ipopt.max_iter': 100,
            'ipopt.print_level': 0,
            'print_time': 0,
            'ipopt.acceptable_tol': 1e-8,
            'ipopt.acceptable_obj_change_tol': 1e-6
        }
Beispiel #13
0
    def _initialize_flow_estimators(pyramid1, pyramid2, options):
        num_levels = pyramid1.num_levels()
        options = PWCNet._ensure_estimator_options_isdict(options, num_levels)
        log.check(isinstance(options, dict))

        batch_size = tf.shape(pyramid1.get_level(0))[0]
        # get the shape of the last level in the pyramid, and downscale it by
        # 2 to get the initial (null) flow shape.
        num_levels = pyramid1.num_levels()

        # Reverse levels, since this is how we wil lbe iterating over them
        # below.
        pyramid_levels_to_use = sorted(options.keys())
        pyramid_levels_to_use.reverse()

        smallest_level_shape = \
            pyramid1.get_level(pyramid_levels_to_use[0]).shape.as_list()
        initial_flow_shape = [
            batch_size,
            smallest_level_shape[1] / 2,
            smallest_level_shape[2] / 2,
            2,
        ]
        # Initialize zero flow.
        last_flow = tf.zeros(initial_flow_shape, dtype=tf.float32)
        # The very last (0-th) level for the pyramid is not used in the paper,
        # instead the flow is just upsampled by 2x. If it were used, the input
        # feature maps would have to be raw images (i.e. NxNx3).
        estimator_net = []
        for lvl in pyramid_levels_to_use:
            if isinstance(options, dict):
                opts = options[lvl]
            else:
                opts = options
            estimator_net.append(
                OpticalFlowEstimator(x1=pyramid1.get_level(lvl),
                                     x2=pyramid2.get_level(lvl),
                                     uv=last_flow,
                                     opt=opts))
            last_flow = estimator_net[-1].get_flow()
        return estimator_net
Beispiel #14
0
def create_dataset(kind, path):
    """ 
    'Factory'-like function for creating a dataset of the specified kind. 
    USAGE:
    >>> datasets = [
            create_dataset('FlyingChairsDataset', '/data/flying_chairs/'),
            create_dataset('FlyingChairs2Dataset', '/data/flying_chairs2/'),
            create_dataset('SintelDataset', '/data/sintel/'),
        ]
    >>> for d in datasets:
            print d.train_files(), d.val_files(), d.all_files()
    """
    log.check(kind in valid_datasets())
    if kind == FlyingChairsDataset.__name__:
        return FlyingChairsDataset(path)
    if kind == FlyingChairs2Dataset.__name__:
        return FlyingChairs2Dataset(path)
    if kind == SDHomDataset.__name__:
        return SDHomDataset(path)
    if kind == SintelDataset.__name__:
        return SintelDataset(path)
    return sys.modules[kind](path)
Beispiel #15
0
import glog as log

log.info("It works.")
log.warn("Something not ideal")
log.error("Something went wrong")
log.fatal("AAAAAAAAAAAAAAA!")

log.check(False)
Beispiel #16
0
 def test_check(self):
     log.check(True)
     self.assertRaises(log.FailedCheckException, log.check, False)
Beispiel #17
0
import glog as log

log.info("It works.")
log.warn("Something not ideal")
log.error("Something went wrong")
log.fatal("AAAAAAAAAAAAAAA!")

log.check(False)
 def __init__(self, filename, scale=1.0):
     self.cap = cv2.VideoCapture(filename)
     log.check(self.cap.isOpened())
     # If first 'grab' doesn't succeed, then we're done :(
     self._done = not self.cap.grab()
     self._scale = scale
Beispiel #19
0
 def test_check(self):
     log.check(True)
     self.assertRaises(log.FailedCheckException, log.check, False)
     return