Ejemplo n.º 1
0
    def __init__(self, options):
        super(Projections, self).__init__()
        M, M_inv = get_homography(resize=options.resize, no_mapping=False)
        self.M, self.M_inv = torch.from_numpy(M), torch.from_numpy(M_inv)
        start = 160
        delta = 10
        num_heights = (720-start)//delta
        self.y_d = (torch.arange(start,720,delta)-80).double() / 2.5
        self.ones = torch.ones(num_heights).double()
        self.y_prime = (self.M[1,1:2]*self.y_d + self.M[1,2:])/(self.M[2,1:2]*self.y_d+self.M[2,2:])
        self.y_eval = 255 - self.y_prime
        self.Y = torch.stack((self.y_eval**2, self.y_eval, self.ones), 1)

        if options.order == 0:
            self.Y = self.tensor_ones
        elif options.order == 1:
            self.Y = torch.stack((self.y_eval, self.ones), 1)
        elif options.order == 2:
            self.Y = torch.stack((self.y_eval**2, self.y_eval, self.ones), 1)
        elif options.order == 3:
            self.Y = torch.stack((self.y_eval**3, self.y_eval**2, self.y_eval, self.ones), 1)
        else:
            raise NotImplementedError(
                    'Requested order {} for polynomial fit is not implemented'.format(options.order))
        self.Y = self.Y.unsqueeze(0).repeat(options.batch_size, 1, 1)
        self.ones = torch.ones(options.batch_size, num_heights, 1).double()
        self.y_prime = self.y_prime.unsqueeze(0).repeat(options.batch_size, 1).unsqueeze(2)
        self.M_inv = self.M_inv.unsqueeze(0).repeat(options.batch_size, 1, 1)

        # use gpu
        self.M = self.M.cuda()
        self.M_inv = self.M_inv.cuda()
        self.y_prime = self.y_prime.cuda()
        self.Y = self.Y.cuda()
        self.ones = self.ones.cuda()
Ejemplo n.º 2
0
    def __init__(self, args):
        super().__init__()
        self.nclasses = args.nclasses

        # define sizes and perspective transformation
        resize = args.resize
        size = torch.Size([args.batch_size, args.nclasses, args.resize, 2*args.resize])
        M, _ = get_homography(args.resize, args.no_mapping)
        M = torch.from_numpy(M).unsqueeze_(0).expand([args.batch_size, 3, 3]).float()

        # Define network
        out_channels = args.nclasses + int(not args.end_to_end)

        self.net = Networks.define_model(mod=args.mod, layers=args.layers, 
                                         in_channels=args.channels_in,
                                         out_channels=out_channels, 
                                         pretrained=args.pretrained, pool=args.pool)
        # Init activation
        self.activation = activation_layer(args.activation_layer, args.no_cuda)
        # Init grid generator
        self.grid = ProjectiveGridGenerator(size, M, args.no_cuda)
        # Init LS layer
        self.ls_layer = Weighted_least_squares(size, args.nclasses, args.order, 
                args.no_cuda, args.reg_ls, args.use_cholesky)

        # mask configuration
        zero_rows = ceil(args.resize*args.mask_percentage)
        self.idx_row = torch.linspace(0, zero_rows-1, zero_rows).long()
        n_row = 13
        self.idx_col1 = Variable(torch.linspace(1, n_row, n_row+1).long())
        self.idx_col2 = Variable(torch.linspace(0, n_row, n_row+1).long())+2*resize-(n_row+1)
        idx_mask = (np.arange(resize)[:, None] < np.arange(2*resize)-(resize+10))*1
        idx_mask = np.flip(idx_mask, 1).copy() + idx_mask
        self.idx_mask = Variable(torch.from_numpy(idx_mask)) \
                .type(torch.ByteTensor).expand(
                        args.batch_size, args.nclasses, resize, 2*resize)

        self.end_to_end = args.end_to_end
        self.pretrained = args.pretrained
        self.classification_branch = args.clas
        if self.classification_branch:
            size_enc = (32, 64)
            chan = 128
            self.line_classification = Classification('line', size=size_enc, 
                    channels_in=chan, resize=resize)
            self.horizon_estimation = Classification('horizon', size=size_enc, 
                    channels_in=chan, resize=resize)

        # Place on GPU if specified
        if not args.no_cuda:
            self.idx_row = self.idx_row.cuda()
            self.idx_col1 = self.idx_col1.cuda()
            self.idx_col2 = self.idx_col2.cuda()
            self.idx_mask = self.idx_mask.cuda()
            if self.classification_branch:
                self.line_classification = self.line_classification.cuda()
                self.horizon_estimation = self.horizon_estimation.cuda()