コード例 #1
0
    def run_test(self, test_loader, loss_fn):
        logger.check_eq(self.done, False, 'Done already!')
        if self.cuda:
            self.net.cuda()
            print('run() is using cuda')
        else:
            print('run() is not using cuda')

        #assert(0)

        logger.info('Network Architecture:')
        print(str(self.net))
        sys.stdout.flush()
        self.count_parameter_num(True)

        logger.info('{} Hyperparameters:'.format(
            self.solver.__class__.__name__))

        sys.stdout.flush()
        self.count_parameter_num(True)

        #self.test(epoch=0, loader=test_loader, loss_fn=loss_fn)
        #for epoch in range(1, self.total_epochs+1): # Main loop of runner. Test after each training epoch.
        time0 = time.time()

        self.test(epoch=1, loader=test_loader, loss_fn=loss_fn)
        print('time for one epoch is: ', time.time() - time0)
        self.invoke_epoch_callback()

        #self.snapshot('')
        #self.save_stats()
        self.done = True
コード例 #2
0
ファイル: utils.py プロジェクト: BlenderCN-Org/flowthrone
def read_pfm(filename):
    """ Reads binary .pfm file into a numpy array (cv::Mat) """
    fid = open(filename, 'rb')
    color = None
    width = None
    height = None
    scale = None
    endian = None

    header = fid.readline().rstrip()
    log.check_eq(header, 'PF', 'Maybe this is not a PFM file?')

    dim_match = re.match(r'^(\d+)\s(\d+)\s$', fid.readline())
    if dim_match:
        width, height = map(int, dim_match.groups())
    else:
        raise Exception('Malformed PFM header.')

    scale = float(fid.readline().rstrip())
    if scale < 0:  # little-endian
        endian = '<'
    else:
        endian = '>'  # big-endian

    data = np.fromfile(fid, endian + 'f')
    shape = (height, width, 3)

    data = np.reshape(data, shape)
    data = np.flipud(data)
    return data[:, :, 0:2]
コード例 #3
0
    def run(self, train_loader, test_loader, loss_fn):
        
        try:
            from visualize import make_dot
            y = self.net.forward(Variable(torch.from_numpy(test_loader.dataset[0]['image'])))
            g = make_dot(y)
            g.engine='dot'
            g.format='pdf'
            print(g.render(filename=os.path.join(self.log_dir, 'net.gv')))
        except:
            logger.warn('failed to draw net.')
        

        logger.check_eq(self.done, False, 'Done already!')
        if self.cuda:
            self.net.cuda()

        logger.info('Network Architecture:')
        print(str(self.net))
        sys.stdout.flush()

        logger.info('{} Hyperparameters:'.format(self.solver.__class__.__name__))
        print(str(self.solver.defaults))
        sys.stdout.flush()

        logger.info('Initial test with random initialized parameters:')
        self.test(epoch=0, loader=test_loader, loss_fn=loss_fn)
        for epoch in range(1, self.total_epochs+1):
            self.train(epoch=epoch, loader=train_loader, loss_fn=loss_fn)
            self.test(epoch=epoch, loader=test_loader, loss_fn=loss_fn)
            self.invoke_epoch_callback()
        self.save_stats()
        self.done=True
コード例 #4
0
    def discrete_nl_dynamics_rk4(self, x0, u, noise=False):
        """
        Perform a time step in continuous quaternion dynamic based on RGK4 integrator.

        :param x0: state
        :type x0: ca.MX
        :param u: control input
        :type u: ca.Mx
        :param noise: add noise on computed dot(x0)
        :type noise: Boolean
        :return: dot(x0), time derivative
        :rtype: ca.DM
        """

        log.check_eq(x0.shape[0], self.Nx,
                     "Wrong state dimension in RGK4 integrator.")
        log.check_eq(u.shape[0], self.Nu,
                     "Wrong input dimension in RGK4 integrator.")

        out = self.rk4_integrator(x=x0, u=ca.vertcat(u))

        if not noise:
            return out["xDot"]
        else:
            return self._add_noise(out["xDot"])
コード例 #5
0
 def _verify_shapes(c1, c2, uv):
     log.check_eq(c1.shape[1:], c2.shape[1:],\
         "Shape of the two feature maps must match!")
     log.check_eq(len(set([c1.dtype, c2.dtype, uv.dtype])), 1, \
         "All datatypes must match")
     log.check(all([4 == len(nd) for nd in [c1.shape, c2.shape, uv.shape]]), \
             "Must be passed as NxHxWxC")
     log.check_eq(uv.shape[3], 2, "Did not pass flow where expected flow?")
     log.check_eq(2 * int(uv.shape[1]), c1.shape[1])
     log.check_eq(2 * int(uv.shape[2]), c1.shape[2])
コード例 #6
0
 def _make_network_spine(self, inp):
     log.check_eq(len(self.options.NUM_FILTERS),
                  len(self.options.NUM_DILATIONS),
                  'NUM_FILTERS and NUM_DILATIONS must be equally sized!')
     layers = [inp]
     for i in range(len(self.options.NUM_FILTERS)):
         layers.append(
             self._conv2d(layers[-1],
                          num_outputs=self.options.NUM_FILTERS[i],
                          rate=self.options.NUM_DILATIONS[i]))
     return layers
コード例 #7
0
 def __init__(self, data_path, shuffle_point_order='no', rand_rot=True):
     self.shuffle_point_order = shuffle_point_order
     logger.info('loading' + data_path)
     self.all_data = np.load(data_path)['data']
     self.rand_rot = rand_rot
     if shuffle_point_order == 'preprocess':
         [np.random.shuffle(pc) for pc in self.all_data]
     logger.info('loaded: data ' + str(self.all_data.shape))
     logger.check_eq(len(self.all_data.shape), 3,
                     'data field should of size BxNx3!')
     logger.check_eq(self.all_data.shape[-1], 3,
                     'data field the last dimension size should be 3!')
コード例 #8
0
    def _get_angular_error_loss(net, loss_weights, groundtruths, weights=None):
        """ Returns scaled angular error losses. """
        losses = []
        log.check_eq(len(groundtruths), len(loss_weights))
        log.check_eq(len(net.estimator_net), len(loss_weights) - 1, \
            "You do not have an appropriate number of loss weights.")
        RELATIVE_WEIGHT = 5.0

        with tf.name_scope('angular_loss'):
            for i, w in enumerate(loss_weights):
                if i < len(loss_weights) - 1:
                    prediction = net.estimator_net[i].get_flow()
                else:
                    if net.options.use_context_net is False:
                        log.warn(
                            'Context network is not set up, so there is no ' +
                            'need to penalize flow at the finest resolution.')
                        break
                    prediction = net.get_output_flow()

                dim = prediction.shape.as_list()[1]
                gt_at_scale = groundtruths[dim]
                loss_name = '{}x{}'.format(dim, dim)

                log.check_eq(gt_at_scale.shape.as_list()[1],
                             prediction.shape.as_list()[1])
                log.check_eq(gt_at_scale.shape.as_list()[2],
                             prediction.shape.as_list()[2])

                loss = tf_utils.angular_loss_at_scale(
                    prediction, gt_at_scale, weights) * w * RELATIVE_WEIGHT
                tf.summary.scalar(loss_name, loss)
                losses.append(loss)
        return losses
コード例 #9
0
    def __init__(self, pkl_path, shuffle_point_order='no'):
        self.shuffle_point_order = shuffle_point_order

        logger.info('loading: ' + pkl_path)
        # with open(pkl_path) as f:

        raw_data = np.load(pkl_path, encoding='bytes').item()
        self.all_data = raw_data[b'data']  #[BxNx3]
        if shuffle_point_order == 'preprocess':
            for i in xrange(self.all_data.shape[0]):
                np.random.shuffle(self.all_data[i])
        self.all_label = np.asarray(raw_data[b'label'], dtype=np.int64)

        logger.info('pkl loaded: data ' + str(self.all_data.shape) +
                    ', label ' + str(self.all_label.shape))

        logger.check_eq(len(self.all_data.shape), 3,
                        'data field should of size BxNx3!')
        logger.check_eq(self.all_data.shape[-1], 3,
                        'data field the last dimension size should be 3!')
        logger.check_eq(len(self.all_label.shape), 1,
                        'label field should be one dimensional!')
        logger.check_eq(
            self.all_data.shape[0], self.all_label.shape[0],
            'data field and label field should have the same size along the first dimension!'
        )
コード例 #10
0
    def _initialize_scaled_groundtruth(self, pwcnet, groundtruth):
        # Initializes a map from size (height or width) to 'groundtruth' flow.
        self.groundtruth = {}

        dim = groundtruth.shape.as_list()[1]
        self.groundtruth[dim] = groundtruth

        # At this point we can apply some scaling factors to the flow.
        for estimator in pwcnet.estimator_net:
            p_shape = estimator.get_flow().shape.as_list()
            scale_x = p_shape[1] / float(groundtruth.shape.as_list()[1])
            scale_y = p_shape[2] / float(groundtruth.shape.as_list()[2])
            log.check_eq(scale_x, scale_y)

            dim = p_shape[1]
            self.groundtruth[dim] = tf_utils.resample_flow(
                groundtruth, p_shape)
コード例 #11
0
    def _add_noise(self, x):
        """
        Add Gaussian noise input state.

        :param x: state vector
        :type x: ca.DM
        :return: Noisy state vector
        :rtype: ca.DM
        """
        log.check_eq(
            x.shape[0], self.W.shape[0],
            "State dimension different than provided noise vector dimension.")

        for idx in range(self.W.shape[0]):
            w = (2.0 * self.W[idx] * np.random.random_sample() - self.W[idx])
            x[idx] = x[idx] + w

        return x
コード例 #12
0
    def __init__(self):
        self.node_eval_threads = {}
        self.topic_eval_threads = {}
        self.eval_rate_s = rospy.get_param('~eval_rate_s', default=0.5)

        self.node_names = rospy.get_param('~node_names')
        for i in range(0, len(self.node_names)):
            self.node_names[i] = rosgraph.names.script_resolve_name(
                '/', self.node_names[i])
        node_eval_mode = rospy.get_param('~node_eval_mode')
        if self.node_names is not None or node_eval_mode is not None:
            glog.check_eq(len(self.node_names), len(node_eval_mode))
            self.eval_mode = {}
            for name, mode in zip(self.node_names, node_eval_mode):
                self.eval_mode[name] = mode
            for node_name in self.node_names:
                self.node_eval_threads[node_name] = {}
        self.plot_dir = os.path.join(
            rospy.get_param('~plot_dir', '.'),
            datetime.datetime.now().strftime('%x').replace('/', '-') + '-' +
            datetime.datetime.now().strftime('%X').replace(':', '-'))
        if not os.path.exists(self.plot_dir):
            os.mkdir(self.plot_dir)
        print("Saving results to " + self.plot_dir)

        self.topic_names = rospy.get_param('~topic_names', default=None)
        topic_eval_mode = rospy.get_param('~topic_eval_mode', default=None)
        if self.topic_names is not None or self.topic_names is not None:
            glog.check_eq(len(self.topic_names), len(topic_eval_mode))
            self.topic_eval_mode = {}
            for name, mode in zip(self.topic_names, topic_eval_mode):
                self.topic_eval_mode[name] = mode
            for topic in self.topic_names:
                self.topic_eval_threads[topic] = {}

        self.sys_eval_mode = rospy.get_param('~sys_eval_mode', default=None)
        self.sys_eval_threads = {}

        self.master = rosgraph.Master(ID)
        self.node_pid = {}
        self.plot_threads = {}
        self.start_eval()
コード例 #13
0
    def _get_endpoint_error_loss(net,
                                 loss_weights,
                                 groundtruths,
                                 weights=None,
                                 loss_type=None):
        """
        Returns endpoint error loss. Options are:
         - L2
         - Huber
         - L2 weighted by uncertainty, like eqn 8 in:
           https://arxiv.org/pdf/1703.04977.pdf
        """
        losses = []
        log.check_eq(len(groundtruths), len(loss_weights))
        log.check_eq(len(net.estimator_net) + 1, len(loss_weights), \
            ("You do not have an appropriate number of loss weights. "
             "Should have {}".format(1 + len(net.estimator_net))))
        with tf.name_scope('endpoint_loss'):
            for i, w in enumerate(loss_weights):
                if i < len(loss_weights) - 1:
                    prediction = net.estimator_net[i].get_flow()
                else:
                    if net.options.use_context_net is False:
                        log.warn(
                            'Context network is not set up, so there is no ' +
                            'need to penalize flow at the finest resolution.')
                        break
                    prediction = net.get_output_flow()

                dim = prediction.shape.as_list()[1]
                loss_name = '{}x{}'.format(dim, dim)

                gt_at_scale = groundtruths[dim]
                log.check_eq(gt_at_scale.shape.as_list()[1],
                             prediction.shape.as_list()[1])
                log.check_eq(gt_at_scale.shape.as_list()[2],
                             prediction.shape.as_list()[2])

                if loss_type == 'HUBER':
                    loss = tf_utils.endpoint_huber_loss_at_scale(
                        prediction, gt_at_scale, weights) * w
                elif loss_type == 'L2':
                    loss = tf_utils.endpoint_loss_at_scale(
                        prediction, gt_at_scale, weights) * w
                else:
                    log.fatal("Unrecognized loss type -- should specify "
                              "{'HUBER', 'L2' 'WEIGHTED'}.")
                tf.summary.scalar(loss_name, loss)
                losses.append(loss)
        return losses
コード例 #14
0
    def run(self, train_loader, test_loader, loss_fn):
        logger.check_eq(self.done, False, 'Done already!')
        if self.cuda:
            self.net.cuda()
            print('run() is using cuda')
        else:
            print('run() is not using cuda')

        logger.info('Network Architecture:')
        print(str(self.net))
        sys.stdout.flush()
        self.count_parameter_num(True)

        logger.info('{} Hyperparameters:'.format(
            self.solver.__class__.__name__))

        sys.stdout.flush()
        self.count_parameter_num(True)

        #self.test(epoch=0, loader=test_loader, loss_fn=loss_fn)
        for epoch in range(
                1, self.total_epochs +
                1):  # Main loop of runner. Test after each training epoch.
            self.train_epoch = epoch
            time0 = time.time()
            self.train(epoch=epoch, loader=train_loader, loss_fn=loss_fn)
            print('time for one epoch is: ', time.time() - time0)

            if (epoch - 1) % self.epoch_per_snapshot == 0:
                self.test(epoch=epoch, loader=test_loader, loss_fn=loss_fn)
            self.invoke_epoch_callback()

        np.save(os.path.join(self.log_dir, 'batches_loss_train'),
                self.batches_loss)
        np.save(os.path.join(self.log_dir, 'batches_loss_test'),
                self.batches_loss_test)

        self.snapshot('')
        self.save_stats()
        self.done = True
コード例 #15
0
def main(args):
    files = get_input_files(args)
    print "Found {} files.".format(len(files))
    target_size = [args.target_size, args.target_size]

    record_writer = tf.python_io.TFRecordWriter(args.output_filename)
    remaining_files = set(range(len(files)))
    verbose = False
    for idx in range(args.num_examples):
        if len(remaining_files) == 0:
            print "Exhausted all files; will reset sampling from beginning."
            remaining_files = set(range(len(files)))
        fn_idx = random.choice(list(remaining_files))
        remaining_files -= {fn_idx}
        if idx % 100 == 0:
            print "Writing example {} to {}".format(idx, args.output_filename)

        triplet = dataset_utils.read_triplet(\
                files[fn_idx][0], files[fn_idx][1], files[fn_idx][2])
        flow, img1, img2 = dataset_utils.generate_example(
            triplet, target_size, scale_range=[0.1, .25])

        img1 = img1.astype(dtype=np.uint8)
        img2 = img2.astype(dtype=np.uint8)
        flow = flow.astype(dtype=np.float32)

        for item in [img1, img2, flow]:
            log.check_eq(args.target_size, item.shape[0])
            log.check_eq(args.target_size, item.shape[1])
       
        if verbose:
            print "Flow range x: {:.2f}, {:.2f}, y: {:.2f}, {:.2f}".format(
                    np.min(flow[:,:,0]), np.max(flow[:,:,0]),
                    np.min(flow[:,:,1]), np.max(flow[:,:,1]))

        example = dataset_utils.as_tf_example(img1, img2, flow)
        record_writer.write(example)
    print "Wrote {} examples to {}".format(args.num_examples,
                                           args.output_filename)
コード例 #16
0
    def _make_network_spine(self, entree):
        """
        Returns a sequence of layers forming the 'spine' of the network.
        The input layer is the first element in the returned list, and the last
        element is the the output 'layer'. Spatial dimensions of the feature
        map are preserved.
        """
        outputs = [entree]
        for outs in self.options.NUM_FILTERS[0:-1]:
            outputs.append(
                self._conv2d_rectified(outputs[-1], num_outputs=outs))
        # Add the final layer without ReLU following it.
        log.check_eq(
            2,
            self.options.NUM_FILTERS[-1],
            message=('Last filter in the list must have "num_outputs=2", '
                     'since its output is optical flow field.'))

        outputs.append(
            self._conv2d(entree=outputs[-1],
                         num_outputs=self.options.NUM_FILTERS[-1]))

        return outputs
コード例 #17
0
 def _verify_input(entree):
     log.check_eq(len(entree.shape), 4, \
         ("Input must be NHWC (i.e. must have 4 dimensions) -- "
          "slim behaves poorly otherwise."))
     log.check_eq(entree.shape[-1], 3, "Input should have three channels.")
コード例 #18
0
#!/usr/bin/python

# import 库
try:
    import sys
    import glog
except ImportError as e:
    print(e)
    print("please install glog first")

# 校验大版本号必须=3
PYTHON_VERSION = sys.version_info[0]
glog.check_eq(PYTHON_VERSION, 3, "only support python version 3, your version is %d" % PYTHON_VERSION)

class 

class Blob(object):
    def __init__(self, shape, data_format):
        self._shape = shape
        self._data_format = data_format

class Backbone(object):
    def __init__(self, input_blob_list):
        self._input_blob_list = input_blob_list

if __name__ == "__main__":
    shape = 
コード例 #19
0
 def test_check_eq(self):
     log.check_eq(1, 1)
     self.assertRaises(log.FailedCheckException, log.check_eq, 1, 2)
     return
コード例 #20
0
    logfile = os.path.join(args.model_path, 'log.txt')

    ########################################
    # construct the net
    ########################################
    net = model.FCN32(num_classes=1)
    if not args.no_cuda:
        net.cuda()

    if args.resume >= 0:
        tmp = torch.load(
            os.path.join(args.model_path,
                         'model_epoch{:04d}.pth'.format(args.resume)))
        net.load_state_dict(tmp['net'])
        start_epoch = tmp['epoch'] + 1
        glog.check_eq(args.resume, tmp['epoch'], 'epoch number inconsistent')
        glog.info(
            'model trained up to epoch {:4d} with loss={:0.4f} loaded'.format(
                start_epoch - 1, tmp['loss']))
    else:
        start_epoch = 0
        best_acc = 0.0
        glog.info('train a model from scratch')
        if os.path.exists(logfile):
            os.remove(logfile)  # clean up the log file
    net.train()  # training mode

    ########################################
    # load data and make it multiple of batch size
    ########################################
    transform = transforms.Compose([
コード例 #21
0
ファイル: glog_test.py プロジェクト: benley/python-glog
 def test_check_eq(self):
     log.check_eq(1, 1)
     self.assertRaises(log.FailedCheckException, log.check_eq, 1, 2)
コード例 #22
0
 def _verify_inputs(x1, x2, groundtruth, weights=None):
     log.check_eq(4, len(x1.shape))
     log.check_eq(4, len(x2.shape))
     log.check_eq(4, len(groundtruth.shape))
     log.check_eq(x1.shape[1:4], x2.shape[1:4])
     log.check_eq(x1.shape[1:3], groundtruth.shape[1:3])
     log.check_eq(2, groundtruth.shape[3])
     if weights is not None:
         log.check_eq(4, len(weights.shape))
         dlog.check_eq(x1.shape[1:3], weights.shape[1:3])
コード例 #23
0
 def __init__(self, N, dim):
     log.check_eq(N % 2, 0)
     super(TrajectoryTools, self).__init__()
     self.N = N
     self.dim = dim
     self.max_derivative_to_optimize = N / 2.0 - 1
コード例 #24
0
 def _verify_left_is_half_scale_of_right(shape1, shape2):
     log.check_eq(len(shape1), len(shape2))
     log.check_eq(len(shape1), 4)
     log.check_eq(shape1[1], shape2[1] / 2)
     log.check_eq(shape1[2], shape2[2] / 2)
コード例 #25
0
 def _verify_input(x1, x2):
     log.check_eq(x1.dtype, tf.uint8)
     log.check_eq(x2.dtype, tf.uint8)
     log.check_eq(4, len(x1.shape))
     log.check_eq(4, len(x2.shape))
     log.check_eq(x1.shape[1:4].as_list(), x2.shape[1:4].as_list())
     log.check_eq(3, x1.shape[3], message="Must be a three channel image.")
コード例 #26
0
 def _verify_input(entree, uv):
     log.check_eq(4, len(entree.shape))
     log.check_eq(4, len(uv.shape))
     log.check_eq(uv.shape[1:3], entree.shape[1:3])
     log.check_eq(2, uv.shape[3])