def run_per_epoch_evaluations(self, data_loader):
        print('fitting linear probe')
        representations = []
        targets = []
        for batch in data_loader:
            batch = [element.to(self.device) for element in batch]
            loss, view2d, view3d = self.process_batch(batch, optim=None)
            representations.append(view2d)
            targets.append(batch[-1])
            if len(representations) * len(
                    view2d) >= self.args.linear_probing_samples:
                break
        representations = torch.cat(representations, dim=0)
        targets = torch.cat(targets, dim=0)
        if len(representations) >= representations.shape[-1]:
            X, _ = torch.lstsq(targets, representations)
            X, _ = torch.lstsq(targets, representations)
            sol = X[:representations.shape[-1]]
            pred = representations @ sol
            mean_absolute_error = (pred - targets).abs().mean()
            self.writer.add_scalar('linear_probe_mae',
                                   mean_absolute_error.item(),
                                   self.optim_steps)
        else:
            raise ValueError(
                f'We have less linear_probing_samples {len(representations)} than the metric dimension {representations.shape[-1]}. Linear probing cannot be used.'
            )

        print('finish fitting linear probe')
Beispiel #2
0
def get_fitted_line(nn_pos):
    '''
    nn_pos - 2 x l, tensor
    '''
    x = nn_pos.new(nn_pos.shape).zero_()  # 2 x l
    x[0] = nn_pos[0]
    x[1, :] = 1.0
    y = nn_pos[1, :].unsqueeze(1)  # l x 1
    try:
        sol, _ = torch.lstsq(y, x.permute(1, 0))[:2]  # a,c
    except RuntimeError:
        sol1 = nn_pos.new(3).zero_()
        sol1[0] = 0
        sol1[1] = 1
        sol1[2] = 0
    else:
        sol1 = nn_pos.new(3).zero_()
        sol1[0] = sol[0]
        sol1[1] = -1
        sol1[2] = sol[1]

    x = nn_pos.new(nn_pos.shape).zero_()  # 2 x l
    x[0] = nn_pos[1]
    x[1, :] = 1.0
    y = nn_pos[0, :].unsqueeze(1)  # l x 1
    try:
        sol, _ = torch.lstsq(y, x.permute(1, 0))[:2]  # b,c
    except RuntimeError:
        sol2 = nn_pos.new(3).zero_()
        sol2[0] = 1
        sol2[1] = 0
        sol2[2] = 0
    else:
        sol2 = nn_pos.new(3).zero_()
        sol2[0] = -1
        sol2[1] = sol[0]
        sol2[2] = sol[1]

    normal = sol1[:2].unsqueeze(1)
    norm = normal.norm()
    loss = nn_pos.mul(normal).sum(dim=0)
    loss = (loss + sol1[2]) / norm
    loss = loss.pow(2)  # l
    loss = loss.sum()
    loss1 = loss.clone()

    normal = sol2[:2].unsqueeze(1)
    norm = normal.norm()
    loss = nn_pos.mul(normal).sum(dim=0)
    loss = (loss + sol2[2]) / norm
    loss = loss.pow(2)  # l
    loss = loss.sum()
    loss2 = loss.clone()

    if loss1 > loss2:
        return sol2
    else:
        return sol1
 def fit(self, X, y):
     X = X.rename(None)
     y = y.rename(None).view(-1, 1)
     assert X.shape[0] == y.shape[0], "Number of X and y rows don't match"
     if self.fit_intercept:
         X = torch.cat([torch.ones(X.shape[0], 1), X], dim=1)
     # Solving Xw = y with Normal equations:
     # X^{T}Xw = X^{T}y
     lhs = X.T @ X
     rhs = X.T @ y
     if self.alpha == 0:
         self.w, _ = torch.lstsq(rhs, lhs)
     else:
         ridge = self.alpha * torch.eye(lhs.shape[0])
         self.w, _ = torch.lstsq(rhs, lhs + ridge)
Beispiel #4
0
    def fit(self, episodes):
        # sequence_length * batch_size x feature_size
        featmat = self._feature(episodes).view(-1, self.feature_size)
        # sequence_length * batch_size x 1
        returns = episodes.returns.view(-1, 1)

        # Remove blank (all-zero) episodes that only exist because episode lengths vary
        flat_mask = episodes.mask.flatten()
        flat_mask_nnz = torch.nonzero(flat_mask)
        featmat = featmat[flat_mask_nnz].view(-1, self.feature_size)
        returns = returns[flat_mask_nnz].view(-1, 1)

        reg_coeff = self._reg_coeff
        XT_y = torch.matmul(featmat.t(), returns)
        XT_X = torch.matmul(featmat.t(), featmat)
        for _ in range(5):
            try:
                coeffs, _ = torch.lstsq(XT_y, XT_X + reg_coeff * self._eye)

                # An extra round of increasing regularization eliminated
                # inf or nan in the least-squares solution most of the time
                if torch.isnan(coeffs).any() or torch.isinf(coeffs).any():
                    raise RuntimeError

                break
            except RuntimeError:
                reg_coeff *= 10
        else:
            raise RuntimeError(
                'Unable to solve the normal equations in '
                '`LinearFeatureBaseline`. The matrix X^T*X (with X the design '
                'matrix) is not full-rank, regardless of the regularization '
                '(maximum regularization: {0}).'.format(reg_coeff))
        self.weight.copy_(coeffs.flatten())
Beispiel #5
0
 def optimize_core(cores, mu, direction):
     sse = 0
     for index in range(cores[mu].shape[1]):
         idx = torch.where(X[:, mu] == index)[0]
         leftside = lefts[mu][0, idx, :]
         rightside = rights[mu][:, idx, 0]
         lhs = rightside.t()[:, :, None]
         rhs = leftside[:, None, :]
         A = torch.reshape(lhs * rhs, [len(idx), -1]) * ws[idx, None]
         b = y[idx] * ws[idx]
         sol = torch.lstsq(b, A)[0][:A.shape[1], :]
         residuals = torch.norm(A.matmul(sol)[:, 0] - b)**2
         cores[mu][:, index, :] = torch.reshape(
             sol, cores[mu][:, index, :].shape)  #.t()
         sse += residuals
     # Update product chains for next core
     if direction == 'right':
         x0.left_orthogonalize(mu)
         lefts[mu + 1] = torch.einsum(
             'ijk,kjl->ijl', (lefts[mu], cores[mu][:, X[:, mu], :]))
     else:
         x0.right_orthogonalize(mu)
         rights[mu - 1] = torch.einsum(
             'ijk,kjl->ijl', (cores[mu][:, X[:, mu], :], rights[mu]))
     return sse
    def _fit_generic(self, x, y):
        assert type(x) == type(y)
        if not hasattr(self, '_X'):
            self._design_matrix(x)
        if isinstance(x, torch.Tensor):
            # by design, returns _X\y as [max(m,n),k], with _X [m,n] and y [m,k]
            # instead of _X\y as [n,k]. when m > n, fills the m-n remaining with RSS
            self._params = torch.lstsq(y.view(-1, 1),
                                       self._X).solution[:self._nfuncs]
        elif isinstance(x, np.ndarray):
            sol, residuals, rank, singular_vals = np.linalg.lstsq(self._X,
                                                                  y.reshape(
                                                                      -1, 1),
                                                                  rcond=None)
            self._params = sol
        else:
            return ValueError(complain, type(x), type(y))

        preds = self._forward(x)
        _sse = _sum_sq_err(y, preds)
        return {
            'sse': _sse,
            'model_score': _parameter_score(self._params),
            'params': self._params
        }
Beispiel #7
0
def lstsq_fit(coords, coords_ref, dist_thr=1.9, ca_dist=3.8):
    """
    Perform a least square fit of coords on coords_ref
    """
    n = coords.shape[0]
    coords_out = torch.clone(coords)
    device = coords_out.device
    coords_out = coords_out.to('cpu')
    assignment, sel = assign_anchors(coords_ref, coords, dist_thr=dist_thr)
    # Not yet implemented on gpu so go to cpu:
    if coords_ref.is_cuda:
        coords_ref = coords_ref.to('cpu')
    if coords.is_cuda:
        coords = coords.to('cpu')
    # Topology
    anchors = coords_ref[assignment]
    pdist = torch.cdist(anchors, anchors)
    sequential = torch.diagonal(pdist, offset=1)
    sigma_ca = 0.1
    topology = torch.exp(-(sequential - ca_dist) / (2 * sigma_ca**2))
    toposel = torch.nonzero(topology > .5, as_tuple=True)[0]
    sel = sel[toposel]
    assignment = assignment[toposel]
    ##########
    if coords[sel].shape[0] > 3:
        X, _ = torch.lstsq(coords_ref[assignment].T, coords[sel].T)
        coords_out[sel] = (coords[sel].T.mm(X[:n])).T
        n_assigned = len(sel)
        print(
            f"lstsq_fit: n_assigned: {n_assigned}/{n} at less than {dist_thr} Å"
        )
    coords_out = coords_out.to(device)
    return coords_out
Beispiel #8
0
def solve(A, b, out=None, bias=True):
    '''
	Solves for x to minimize (Ax-b)^2
	for some matrix A and vector b
	x is returned as a linear layer (either with or without a bias term)
	Will update out if given, otherwise the output will be a new linear layer
	:param A: D x N pytorch tensor
	:param b: N x 1 pytorch tensor
	:param out: instance of torch.nn.Linear(D,1)
	:param bias: learn a bias term in addition to weights
	:return: torch.nn.Linear(D, 1) instance where the weights (and bias) solve Ax=b
	'''
    # A: M x N
    # b: N x 1
    # x: M x 1

    if bias:
        A = torch.cat([A, torch.ones(*(A.size()[:-1] + (1, ))).type_as(A)], -1)

    x, _ = torch.lstsq(b, A)

    if out is None:
        out = nn.Linear(A.size(-1) - 1, b.size(-1), bias=bias).to(A.device)

    out.weight.data.copy_(x[:A.size(-1) - 1].t())

    if bias:
        out.bias.data.copy_(x[A.size(-1) - 1:A.size(-1), 0])

    return out
Beispiel #9
0
def fit_least_squares(tokenizer,
                      sentence_model,
                      word2vec_model,
                      top_k_terms=100):
    """
    Huggingface solved this using

    1. Take the top KK most frequent words V in the vocabulary of a word2vec model
    2. Obtain embeddings for each word using word2vec, Φword(V)
    3. Obtain embeddings for each word using S-BERT, Φsent(V)
    4. Learn a least-squares linear projection matrix with L2 regularization from Φsent(V) to Φword(V)

    Apply this transformation for this
        c^=argc∈Cmax​cos(Φsent​(x)Z,Φsent​(c)Z)
    """

    # top K words
    top_k_words = word2vec_model.index2entity[:top_k_terms]

    # generate word embeddings for top k words
    word_embeddings = [word2vec_model[word] for word in top_k_words]
    # generate sentence embeddings for top k words
    sentence_embeddings = get_sentence_vectors(top_k_words, tokenizer,
                                               sentence_model)
    Z, qr = torch.lstsq(torch.from_numpy(np.array(word_embeddings)),
                        sentence_embeddings)
    # fit least squares
    # Z, residuals, rank, s = np.linalg.lstsq(word_embeddings, sentence_embeddings.detach().numpy())
    return Z, qr  #, residuals, rank, s
Beispiel #10
0
def find_qbezier_fit(pts, t):
    r"""

    Parameters
    ----------
    pts : torch.Tensor
        of shape [pts_n, spatial_dims_n]
    t : torch.Tensor
        of shape [pts_n]

    Returns
    -------
    control_points : torch.Tensor
        of shape [3, spatial_dims_n]
    error : scalar
        MSE of fit
    """
    T = torch.stack([torch.ones_like(t), t, t.pow(2)]).permute(1, 0)
    del t
    M = T.new_tensor([[1, 0, 0], [-2, 2, 0], [1, -2, 1]])
    M = T.matmul(M)
    del T

    try:
        control_points, _ = torch.lstsq(pts, M)
        del pts, M, _
        error = control_points[3:].pow(2).mean().sqrt().item()
        control_points = control_points[:3]
        return control_points, error
    except RuntimeError:
        return None, np.inf
Beispiel #11
0
    def fit(self, episodes):
        # sequence_length * batch_size x feature_size
        featmat = self._feature(episodes).view(-1, self.feature_size)
        # sequence_length * batch_size x 1
        returns = episodes.returns.view(-1, 1)

        reg_coeff = self._reg_coeff
        eye = torch.eye(self.feature_size,
                        dtype=torch.float32,
                        device=self.linear.weight.device)
        for _ in range(5):
            try:
                coeffs, _ = torch.lstsq(
                    torch.matmul(featmat.t(), returns),
                    torch.matmul(featmat.t(), featmat) + reg_coeff * eye)
                break
            except RuntimeError:
                reg_coeff += 10
        else:
            raise RuntimeError(
                'Unable to solve the normal equations in '
                '`LinearFeatureBaseline`. The matrix X^T*X (with X the design '
                'matrix) is not full-rank, regardless of the regularization '
                '(maximum regularization: {0}).'.format(reg_coeff))
        self.linear.weight.data = coeffs.data.t()
Beispiel #12
0
    def lstsq_solver(self, A, B, pt_pairs, uv_pt_pairs):

        # Determine the scalar required to make the vectors meet (per pt pair)
        Y = []
        for i in range(pt_pairs.shape[1]):

            # If there is any nan in A, then skip it
            if torch.isnan(A[i]).any():
                continue

            # If issue encountered solving the linear system of equations, skip it
            try:
                xs, qrs = torch.lstsq(A[i].float(), B[i].float())

                # Selecting the first x to find the intersection
                x = xs[0] * torch.pow(qrs[0], 2)
            except:
                continue

            # Using the determine values the intersection point y = mx + b
            b = pt_pairs[0][i]
            m = uv_pt_pairs[0][i]
            y = x[0] * m + b

            # Storing the vectors intersection point
            Y.append(y)

        # Combining the results
        Y = torch.stack(Y)
        return Y
Beispiel #13
0
 def fit_coeff(self, x, weights, y_actual):
     '''
         Use LSE to solve for coeff: y_actual = coeff * (weighted)x
               x.shape: n_cases * n_in
         weights.shape: n_cases * n_rules
         [ coeff.shape: n_rules * n_out * (n_in+1) ]
               y.shape: n_cases * n_out
     '''
     # Append 1 to each list of input vals, for the constant term:
     x_plus = torch.cat([x, torch.ones(x.shape[0], 1)], dim=1)
     # Shape of weighted_x is n_cases * n_rules * (n_in+1)
     weighted_x = torch.einsum('bp, bq -> bpq', weights, x_plus)
     #print('w,xp:', weights, x_plus)
     #print('wX:', weighted_x)
     # Can't have value 0 for weights, or LSE won't work:
     weighted_x[weighted_x == 0] = 1e-12
     # Squash x and y down to 2D matrices for gels:
     weighted_x_2d = weighted_x.view(weighted_x.shape[0], -1)
     y_actual_2d = y_actual.view(y_actual.shape[0], -1)
     #print('y_actual_2d, weighted_x_2d', y_actual_2d, weighted_x_2d)
     # Use gels to do LSE, then pick out the solution rows:
     try:
         #    coeff_2d, _ = torch.gels(y_actual_2d, weighted_x_2d)
         #except AttributeError:
         coeff_2d, _ = torch.lstsq(y_actual_2d, weighted_x_2d)
     except RuntimeError as e:
         print('Internal error in gels', e)
         print('Weights are:', weighted_x)
         raise e
     coeff_2d = coeff_2d[0:weighted_x_2d.shape[1]]
     # Reshape to 3D tensor: divide by rules, n_in+1, then swap last 2 dims
     self.coeff = coeff_2d.view(weights.shape[1], x.shape[1] + 1, -1) \
         .transpose(1, 2)
Beispiel #14
0
def _update_step(ensemble, observations, g, gamma, Cpp, Cup):
    """
    Update step of the kalman filter
    Calculates the covariances and returns new ensembles
    """
    # return ensemble + (Cup @ np.linalg.lstsq(Cpp+gamma, (observations - g).T)[0]).T
    return torch.mm(Cup, torch.lstsq((observations-g).t(), Cpp+gamma)[0]).t() + ensemble
Beispiel #15
0
def _get_perspective_coeffs(
        startpoints: List[List[int]], endpoints: List[List[int]]
) -> List[float]:
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])

    b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
    res = torch.lstsq(b_matrix, a_matrix)[0]

    output: List[float] = res.squeeze(1).tolist()
    return output
Beispiel #16
0
def least_squares_qr(A, B):
    """Faster lstsq variant that uses PyTorch native QR factorization"""
    m, n = A.shape
    _, k = B.shape
    output, _ = torch.lstsq(B, A)
    X = output[:n]
    residuals = output[n:].square().sum(0)
    return X, residuals
 def fit(self, X: torch.tensor, y: torch.tensor) -> None:
     X = X.rename(None)
     y = y.rename(None).view(-1, 1)
     assert X.shape[0] == y.shape[
         0], "Number of X and y rows don'torch match"
     if self.fit_intercept:
         X = torch.cat([torch.ones(X.shape[0], 1, device=self.device), X],
                       dim=1)
     # Solving X*w = y with Normal equations:
     # X^{torch}*X*w = X^{torch}*y
     lhs = X.T @ X
     rhs = X.T @ y
     if self.alpha == 0:
         self.w, _ = torch.lstsq(rhs, lhs)
     else:
         ridge = self.alpha * torch.eye(lhs.shape[0], device=self.device)
         self.w, _ = torch.lstsq(rhs, lhs + ridge)
Beispiel #18
0
def calIntsAcc(gt_i, pred_i, data_batch=1):
    n, c, h, w = gt_i.shape
    pred_i = pred_i.view(n, c, h, w)
    ref_int = gt_i[:, :3].repeat(1, gt_i.shape[1] // 3, 1, 1)
    gt_i = gt_i / ref_int
    scale = torch.lstsq(gt_i.view(-1, 1), pred_i.view(-1, 1))
    ints_ratio = (gt_i - scale[0][0] * pred_i).abs() / (gt_i + 1e-8)
    ints_error = torch.stack(ints_ratio.split(3, 1), 1).mean(2)
    return {'ints_ratio': ints_ratio.mean().item()}, ints_error.squeeze()
 def step(self, loss):
     grad = self.compute_grad(loss)
     H = self.compute_hessian(grad)
     step, _ = torch.lstsq(H, grad.unsqueeze(1))
     step = step[:,0]
     param_steps = torch.split(step, self.sizes, 0)
     param_steps = [step.view(size) for step,size in zip(param_steps, self.shapes)]
     for p,pstep in zip(self.params, param_steps):
         p.data.add_(-self.lr, pstep)
Beispiel #20
0
 def _get_nu(self, fY, hY):
     """Compute nu (ie lambda) if not provided by the problem's solver.
     That is, solve: hY^T nu = fY^T.
     """
     p = hY.size(1)
     nu = fY.new_zeros(self.b, p)
     for i in range(self.b): # loop over batch
         solution,_ = torch.lstsq(fY[i, :].unsqueeze(-1), hY[i, :, :].t())
         nu[i, :] = solution[:p, :].squeeze() # extract first p values
     return nu
Beispiel #21
0
def lstsq(input, A):
    solution = []
    QR = []
    for i, a in zip(input, A):
        s, q = torch.lstsq(i, a)
        solution += [s]
        QR += [q]
    solution = torch.stack(solution)
    QR = torch.stack(QR)
    return solution, QR
Beispiel #22
0
def wlstsq(A, b, W=None):
    single = len(b.shape) == 1
    if single: b = b[:, None]
    if W is not None:  # Weight matrix is a diagonal matrix with sqrt of the input weights
        W = torch.sqrt(W.reshape(-1, 1))
        A, b = A * W, b * W
    x = torch.lstsq(
        b, A).solution[:A.shape[1], :]  # first n rows contains solution
    if single: x = x.squeeze(1)
    return x
Beispiel #23
0
 def blas_lapack_ops(self):
     m = torch.randn(3, 3)
     a = torch.randn(10, 3, 4)
     b = torch.randn(10, 4, 3)
     v = torch.randn(3)
     return (
         torch.addbmm(m, a, b),
         torch.addmm(torch.randn(2, 3), torch.randn(2, 3),
                     torch.randn(3, 3)),
         torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)),
         torch.addr(torch.zeros(3, 3), v, v),
         torch.baddbmm(m, a, b),
         torch.bmm(a, b),
         torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3),
                            torch.randn(3, 3)),
         # torch.cholesky(a), # deprecated
         torch.cholesky_inverse(torch.randn(3, 3)),
         torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)),
         torch.dot(v, v),
         torch.eig(m),
         torch.geqrf(a),
         torch.ger(v, v),
         torch.inner(m, m),
         torch.inverse(m),
         torch.det(m),
         torch.logdet(m),
         torch.slogdet(m),
         torch.lstsq(m, m),
         torch.lu(m),
         torch.lu_solve(m, *torch.lu(m)),
         torch.lu_unpack(*torch.lu(m)),
         torch.matmul(m, m),
         torch.matrix_power(m, 2),
         # torch.matrix_rank(m),
         torch.matrix_exp(m),
         torch.mm(m, m),
         torch.mv(m, v),
         # torch.orgqr(a, m),
         # torch.ormqr(a, m, v),
         torch.outer(v, v),
         torch.pinverse(m),
         # torch.qr(a),
         torch.solve(m, m),
         torch.svd(a),
         # torch.svd_lowrank(a),
         # torch.pca_lowrank(a),
         # torch.symeig(a), # deprecated
         # torch.lobpcg(a, b), # not supported
         torch.trapz(m, m),
         torch.trapezoid(m, m),
         torch.cumulative_trapezoid(m, m),
         # torch.triangular_solve(m, m),
         torch.vdot(v, v),
     )
    def _fit_mixed(self, x, y, bool_idx):
        ''' Fits a model by only using some basis functions,
            specified by an array with O's and 1's.
        '''
        mask = (bool_idx == 1)
        num_funcs = int(bool_idx.sum())

        if not hasattr(self, '_X'):
            self._design_matrix(x)
        if len(bool_idx) > self._nfuncs or len(bool_idx) < 1:
            return None
        assert type(x) == type(y)

        if isinstance(x, torch.Tensor):
            # by design, returns _X\y as [max(m,n),k], with _X [m,n] and y [m,k]
            # instead of _X\y as [n,k]. when m > n, fills the m-n remaining with RSS
            _X_mask = self._X.t()[mask].t()
            _params = torch.lstsq(y.view(-1, 1), _X_mask).solution[:num_funcs]
        elif isinstance(x, np.ndarray):
            _X_mask = self._X.T[mask].T
            sol, residuals, rank, singular_vals = np.linalg.lstsq(_X_mask,
                                                                  y.reshape(
                                                                      -1, 1),
                                                                  rcond=None)
            _params = sol
        else:
            raise ValueError(complain, type(x), type(y))

        str_func = _index_to_function(num_functions=self._nfuncs,
                                      del_nan=self._isnan)
        self._last_mixed_params = {
            'params':
            _params,
            'bool_idx':
            bool_idx,
            'str_idx':
            [str_func[i] for i in range(self._nfuncs) if bool_idx[i] == 1],
        }
        # return a summary of the fit
        preds = self._forward_mixed(x, bool_idx)
        _sse = _sum_sq_err(y, preds)

        return {
            'sse':
            _sse,
            'model_score':
            _parameter_score(self._last_mixed_params['params']),
            'params':
            self._last_mixed_params['params'],
            'bool_idx':
            bool_idx,
            'str_idx':
            [str_func[i] for i in range(self._nfuncs) if bool_idx[i] == 1],
        }
def match_colors(im_ref, im_q, im_test, ksz, gauss_kernel):
    """ Estimates a color transformation matrix between im_ref and im_q. Applies the estimated transformation to
        im_test
    """
    gauss_kernel = gauss_kernel.to(im_ref.device)
    bi = 5

    # Apply Gaussian smoothing
    im_ref_mean = apply_kernel(im_ref, ksz, gauss_kernel)[:, :, bi:-bi,
                                                          bi:-bi].contiguous()
    im_q_mean = apply_kernel(im_q, ksz, gauss_kernel)[:, :, bi:-bi,
                                                      bi:-bi].contiguous()

    im_ref_mean_re = im_ref_mean.view(*im_ref_mean.shape[:2], -1)
    im_q_mean_re = im_q_mean.view(*im_q_mean.shape[:2], -1)

    # Estimate color transformation matrix by minimizing the least squares error
    c_mat_all = []
    for ir, iq in zip(im_ref_mean_re, im_q_mean_re):
        c = torch.lstsq(ir.t(), iq.t())
        c = c.solution[:3]
        if torch.isinf(c).sum().item() > 0 or torch.isnan(c).sum().item() > 0:
            c = torch.zeros_like(c).float()
        c_mat_all.append(c)

    c_mat = torch.stack(c_mat_all, dim=0)
    im_q_mean_conv = torch.matmul(im_q_mean_re.permute(0, 2, 1),
                                  c_mat).permute(0, 2, 1)
    im_q_mean_conv = im_q_mean_conv.view(im_q_mean.shape)

    err = ((im_q_mean_conv - im_ref_mean) * 255.0).norm(dim=1)

    thresh = 20

    # If error is larger than a threshold, ignore these pixels
    valid = err < thresh

    pad = (im_q.shape[-1] - valid.shape[-1]) // 2
    pad = [pad, pad, pad, pad]
    valid = F.pad(valid, pad)

    upsample_factor = im_test.shape[-1] / valid.shape[-1]
    valid = F.interpolate(valid.unsqueeze(1).float(),
                          scale_factor=upsample_factor,
                          mode='bilinear')
    valid = valid > 0.9

    # Apply the transformation to test image
    im_test_re = im_test.view(*im_test.shape[:2], -1)
    im_t_conv = torch.matmul(im_test_re.permute(0, 2, 1),
                             c_mat).permute(0, 2, 1)
    im_t_conv = im_t_conv.view(im_test.shape)

    return im_t_conv, valid
Beispiel #26
0
    def _make_coeffs(window_length, order):

        half_length, rem = divmod(window_length, 2)
        if rem == 0:
            raise ValueError("window_length must be odd.")

        idx = torch.arange(window_length - half_length - 1, -half_length - 1, -1, dtype=torch.float, device="cpu")
        a = idx ** torch.arange(order + 1, dtype=torch.float, device="cpu").reshape(-1, 1)
        y = torch.zeros(order + 1, dtype=torch.float, device="cpu")
        y[0] = 1.0
        return torch.lstsq(y, a).solution.squeeze()
Beispiel #27
0
 def fit(self, states, returns):
     features = self._features(states)
     reg = self.reg * th.eye(features.size(1))
     reg = reg.to(states.device)
     A = features.t() @ features + reg
     b = features.t() @ returns
     if hasattr(th, 'lstsq'):  # Required for torch < 1.3.0
         coeffs, _ = th.lstsq(b, A)
     else:
         coeffs, _ = th.gels(b, A)
     self.linear.weight.data = coeffs.data.t()
Beispiel #28
0
    def savgol_coeffs(self,
                      window_length,
                      polyorder,
                      deriv=0,
                      delta=1.0,
                      pos=None,
                      use="conv"):
        if polyorder >= window_length:
            raise ValueError("polyorder must be less than window_length.")

        halflen, rem = divmod(window_length, 2)

        if rem == 0:
            raise ValueError("window_length must be odd.")

        if pos is None:
            pos = halflen

        if not (0 <= pos < window_length):
            raise ValueError("pos must be nonnegative and less than "
                             "window_length.")

        if use not in ['conv', 'dot']:
            raise ValueError("`use` must be 'conv' or 'dot'")

        # Form the design matrix A.  The columns of A are powers of the integers
        # from -pos to window_length - pos - 1.  The powers (i.e. rows) range
        # from 0 to polyorder.  (That is, A is a vandermonde matrix, but not
        # necessarily square.)
        x = torch.arange(-pos, window_length - pos, dtype=torch.float32)
        if use == "conv":
            # Reverse so that result can be used in a convolution.
            x = x[::-1]

        order = torch.arange(polyorder + 1).reshape(-1, 1)
        if order.size == 1:
            raise NotImplementedError
            # Avoid spurious DeprecationWarning in numpy 1.8.0 for
            # ``[1] ** [[2]]``, see numpy gh-4145.
            # A = np.atleast_2d(x ** order[0, 0])
        else:
            A = x**order

        # y determines which order derivative is returned.
        y = torch.zeros(polyorder + 1)
        # The coefficient assigned to y[deriv] scales the result to take into
        # account the order of the derivative and the sample spacing.
        y[deriv] = math.factorial(deriv) / (delta**deriv)

        # Find the least-squares solution of A*c = y
        coeffs, _ = torch.lstsq(y, A)

        return coeffs