Esempio n. 1
0
    def __init__(self, height, width, lr = 1, aux_loss = False, ray_tracing = False):
        super(Depth3DGridGen_with_mask, self).__init__()
        self.height, self.width = height, width
        self.aux_loss = aux_loss
        self.lr = lr
        self.ray_tracing = ray_tracing

        self.grid = np.zeros( [self.height, self.width, 3], dtype=np.float32)
        self.grid[:,:,0] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.height), 0), repeats = self.width, axis = 0).T, 0)
        self.grid[:,:,1] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.width), 0), repeats = self.height, axis = 0), 0)
        self.grid[:,:,2] = np.ones([self.height, width])
        self.grid = torch.from_numpy(self.grid.astype(np.float32))

        self.theta = self.grid[:,:,0] * np.pi/2 + np.pi/2
        self.phi = self.grid[:,:,1] * np.pi

        self.x = torch.sin(self.theta) * torch.cos(self.phi)
        self.y = torch.sin(self.theta) * torch.sin(self.phi)
        self.z = torch.cos(self.theta)

        self.grid3d = torch.from_numpy(np.zeros( [self.height, self.width, 4], dtype=np.float32))

        self.grid3d[:,:,0] = self.x
        self.grid3d[:,:,1] = self.y
        self.grid3d[:,:,2] = self.z
        self.grid3d[:,:,3] = self.grid[:,:,2]
Esempio n. 2
0
def test_inference_sgpr():
    N = 1000
    X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
    y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
    kernel = RBF(input_dim=1)
    Xu = torch.arange(0, 5.5, 0.5)

    sgpr = SparseGPRegression(X, y, kernel, Xu)
    sgpr.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)

    Xnew = torch.arange(0, 5.05, 0.05)
    loc, var = sgpr(Xnew, full_cov=False)
    target = 0.5 * torch.sin(3*Xnew)

    assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
Esempio n. 3
0
def test_inference_whiten_vsgp():
    N = 1000
    X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
    y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
    kernel = RBF(input_dim=1)
    Xu = torch.arange(0, 5.5, 0.5)

    vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian(), whiten=True)
    vsgp.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)

    Xnew = torch.arange(0, 5.05, 0.05)
    loc, var = vsgp(Xnew, full_cov=False)
    target = 0.5 * torch.sin(3*Xnew)

    assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
Esempio n. 4
0
    def forward(self, depth, trans0, trans1, rotate):
        self.batchgrid3d = torch.zeros(torch.Size([depth.size(0)]) + self.grid3d.size())

        for i in range(depth.size(0)):
            self.batchgrid3d[i] = self.grid3d

        self.batchgrid3d = Variable(self.batchgrid3d)

        self.batchgrid = torch.zeros(torch.Size([depth.size(0)]) + self.grid.size())

        for i in range(depth.size(0)):
            self.batchgrid[i] = self.grid

        self.batchgrid = Variable(self.batchgrid)

        if depth.is_cuda:
            self.batchgrid = self.batchgrid.cuda()
            self.batchgrid3d = self.batchgrid3d.cuda()


        x_ = self.batchgrid3d[:,:,:,0:1] * depth + trans0.view(-1,1,1,1).repeat(1, self.height, self.width, 1)

        y_ = self.batchgrid3d[:,:,:,1:2] * depth + trans1.view(-1,1,1,1).repeat(1, self.height, self.width, 1)
        z = self.batchgrid3d[:,:,:,2:3] * depth
        #print(x.size(), y.size(), z.size())

        rotate_z = rotate.view(-1,1,1,1).repeat(1,self.height, self.width,1) * np.pi

        x = x_ * torch.cos(rotate_z) - y_ * torch.sin(rotate_z)
        y = x_ * torch.sin(rotate_z) + y_ * torch.cos(rotate_z)


        r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5

        #print(r)
        theta = torch.acos(z/r)/(np.pi/2)  - 1
        #phi = torch.atan(y/x)

        if depth.is_cuda:
            phi = torch.atan(y/(x + 1e-5))  + np.pi * x.lt(0).type(torch.cuda.FloatTensor) * (y.ge(0).type(torch.cuda.FloatTensor) - y.lt(0).type(torch.cuda.FloatTensor))
        else:
            phi = torch.atan(y/(x + 1e-5))  + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor))


        phi = phi/np.pi

        output = torch.cat([theta,phi], 3)
        return output
Esempio n. 5
0
 def _setUp(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     train_yvar = torch.tensor(0.1 ** 2, device=device)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]], device=device, dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(
         self.model_st.likelihood, self.model_st
     )
     self.mll_st = fit_gpytorch_model(self.mll_st, options={"maxiter": 5})
     model_fn = FixedNoiseGP(
         self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y)
     )
     self.model_fn = model_fn.to(device=device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(
         self.model_fn.likelihood, self.model_fn
     )
     self.mll_fn = fit_gpytorch_model(self.mll_fn, options={"maxiter": 5})
Esempio n. 6
0
    def draw(
        self, n: int = 1, out: Optional[Tensor] = None, dtype: torch.dtype = torch.float
    ) -> Optional[Tensor]:
        r"""Draw `n` qMC samples from the standard Normal.

        Args:
            n: The number of samples to draw.
            out: An option output tensor. If provided, draws are put into this
                tensor, and the function returns None.
            dtype: The desired torch data type (ignored if `out` is provided).

        Returns:
            A `n x d` tensor of samples if `out=None` and `None` otherwise.
        """
        # get base samples
        samples = self._sobol_engine.draw(n, dtype=dtype)
        if self._inv_transform:
            # apply inverse transform (values to close to 0/1 result in inf values)
            v = 0.5 + (1 - 1e-10) * (samples - 0.5)
            samples_tf = torch.erfinv(2 * v - 1) * math.sqrt(2)
        else:
            # apply Box-Muller transform (note: [1] indexes starting from 1)
            even = torch.arange(0, samples.shape[-1], 2)
            Rs = (-2 * torch.log(samples[:, even])).sqrt()
            thetas = 2 * math.pi * samples[:, 1 + even]
            cos = torch.cos(thetas)
            sin = torch.sin(thetas)
            samples_tf = torch.stack([Rs * cos, Rs * sin], -1).reshape(n, -1)
            # make sure we only return the number of dimension requested
            samples_tf = samples_tf[:, : self._d]
        if out is None:
            return samples_tf
        else:
            out.copy_(samples_tf)
def _get_random_data(n, **tkwargs):
    train_x1 = torch.linspace(0, 0.95, n + 1, **tkwargs) + 0.05 * torch.rand(
        n + 1, **tkwargs
    )
    train_x2 = torch.linspace(0, 0.95, n, **tkwargs) + 0.05 * torch.rand(n, **tkwargs)
    train_y1 = torch.sin(train_x1 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x1)
    train_y2 = torch.cos(train_x2 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x2)
    return train_x1.unsqueeze(-1), train_x2.unsqueeze(-1), train_y1, train_y2
 def backward(ctx, grad_output):
     input,         = ctx.saved_tensors
     grad_input     = torch.stack((grad_output, torch.zeros_like(grad_output)), dim=len(grad_output.shape))
     phase_input    = angle(input)
     phase_input    = torch.stack((torch.cos(phase_input), torch.sin(phase_input)), dim=len(grad_output.shape))
     grad_input     = multiply_complex(phase_input, grad_input)
     
     return 0.5*grad_input
Esempio n. 9
0
 def _getModel(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     train_y = torch.sin(train_x.view(-1) * (2 * math.pi)) + noise
     model = SingleTaskGP(train_x, train_y)
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=device, dtype=dtype)
Esempio n. 10
0
def neg_michalewicz(X: Tensor) -> Tensor:
    r"""Negative 10-dim Michalewicz test function.

    10-dim function (usually evaluated on hypercube [0, pi]^10):

        `M(x) = sum_{i=1}^10 sin(x_i) (sin(i x_i^2 / pi)^20)`

    Args:
        X: A Tensor of size `10` or `k x 10` (`k` batch evaluations).

    Returns:
        `-M(X)`, the negative value of the Michalewicz function.
    """
    batch = X.ndimension() > 1
    X = X if batch else X.unsqueeze(0)
    a = 1 + torch.arange(10, device=X.device, dtype=X.dtype)
    result = torch.sum(torch.sin(X) * torch.sin(a * X ** 2 / math.pi) ** 20, dim=-1)
    return result if batch else result.squeeze(0)
Esempio n. 11
0
 def forward(ctx, input):
     assert input.shape[-1]==2, "Complex tensor should have real and imaginary parts."
     output         = input.clone()
     amplitude      = torch.exp(input[..., 0])
     # amplitude      = input[..., 0]
     output[..., 0] = amplitude*torch.cos(input[..., 1])
     output[..., 1] = amplitude*torch.sin(input[..., 1])
     
     ctx.save_for_backward(output)
     return output
    def __init__(self, input_dim: int, max_len: int = 5000) -> None:
        super().__init__()

        # Compute the positional encodings once in log space.
        positional_encoding = torch.zeros(max_len, input_dim, requires_grad=False)
        position = torch.arange(0, max_len).unsqueeze(1).float()
        div_term = torch.exp(torch.arange(0, input_dim, 2).float() * -(math.log(10000.0) / input_dim))
        positional_encoding[:, 0::2] = torch.sin(position * div_term)
        positional_encoding[:, 1::2] = torch.cos(position * div_term)
        positional_encoding = positional_encoding.unsqueeze(0)
        self.register_buffer('positional_encoding', positional_encoding)
Esempio n. 13
0
def _get_random_data(batch_shape, num_outputs, n=10, **tkwargs):
    train_x = torch.linspace(0, 0.95, n, **tkwargs).unsqueeze(-1) + 0.05 * torch.rand(
        n, 1, **tkwargs
    ).repeat(batch_shape + torch.Size([1, 1]))
    train_y = torch.sin(train_x * (2 * math.pi)) + 0.2 * torch.randn(
        n, num_outputs, **tkwargs
    ).repeat(batch_shape + torch.Size([1, 1]))

    if num_outputs == 1:
        train_y = train_y.squeeze(-1)
    return train_x, train_y
    def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = torch.nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0., max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer("pe", pe)
Esempio n. 15
0
def _get_random_mt_data(**tkwargs):
    train_x = torch.linspace(0, 0.95, 10, **tkwargs) + 0.05 * torch.rand(10, **tkwargs)
    train_y1 = torch.sin(train_x * (2 * math.pi)) + torch.randn_like(train_x) * 0.2
    train_y2 = torch.cos(train_x * (2 * math.pi)) + torch.randn_like(train_x) * 0.2
    train_i_task1 = torch.full_like(train_x, dtype=torch.long, fill_value=0)
    train_i_task2 = torch.full_like(train_x, dtype=torch.long, fill_value=1)
    full_train_x = torch.cat([train_x, train_x])
    full_train_i = torch.cat([train_i_task1, train_i_task2])
    full_train_y = torch.cat([train_y1, train_y2])
    train_X = torch.stack([full_train_x, full_train_i.type_as(full_train_x)], dim=-1)
    train_Y = full_train_y
    return train_X, train_Y
Esempio n. 16
0
 def __init__(self, dropout, dim, max_len=5000):
     pe = torch.zeros(max_len, dim)
     position = torch.arange(0, max_len).unsqueeze(1)
     div_term = torch.exp(torch.arange(0, dim, 2) *
                          -(math.log(10000.0) / dim))
     pe[:, 0::2] = torch.sin(position * div_term)
     pe[:, 1::2] = torch.cos(position * div_term)
     pe = pe.unsqueeze(1)
     super(PositionalEncoding, self).__init__()
     self.register_buffer('pe', pe)
     self.dropout = nn.Dropout(p=dropout)
     self.dim = dim
Esempio n. 17
0
def add_positional_features(tensor: torch.Tensor,
                            min_timescale: float = 1.0,
                            max_timescale: float = 1.0e4):
    # pylint: disable=line-too-long
    """
    Implements the frequency-based positional encoding described
    in `Attention is all you Need
    <https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .

    Adds sinusoids of different frequencies to a ``Tensor``. A sinusoid of a
    different frequency and phase is added to each dimension of the input ``Tensor``.
    This allows the attention heads to use absolute and relative positions.

    The number of timescales is equal to hidden_dim / 2 within the range
    (min_timescale, max_timescale). For each timescale, the two sinusoidal
    signals sin(timestep / timescale) and cos(timestep / timescale) are
    generated and concatenated along the hidden_dim dimension.

    Parameters
    ----------
    tensor : ``torch.Tensor``
        a Tensor with shape (batch_size, timesteps, hidden_dim).
    min_timescale : ``float``, optional (default = 1.0)
        The smallest timescale to use.
    max_timescale : ``float``, optional (default = 1.0e4)
        The largest timescale to use.

    Returns
    -------
    The input tensor augmented with the sinusoidal frequencies.
    """
    _, timesteps, hidden_dim = tensor.size()

    timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()
    # We're generating both cos and sin frequencies,
    # so half for each.
    num_timescales = hidden_dim // 2
    timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()

    log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(num_timescales - 1)
    inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)

    # Broadcasted multiplication - shape (timesteps, num_timescales)
    scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)
    # shape (timesteps, 2 * num_timescales)
    sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)
    if hidden_dim % 2 != 0:
        # if the number of dimensions is odd, the cos and sin
        # timescales had size (hidden_dim - 1) / 2, so we need
        # to add a row of zeros to make up the difference.
        sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)
    return tensor + sinusoids.unsqueeze(0)
Esempio n. 18
0
 def invert(self, head_coords, segment_len, mean_angle, eigenworms):
     
     angles = torch.matmul(eigenworms, self.eigen_components)
     angles += mean_angle.view(-1, 1)
     
     ske_x = torch.sin(angles).view(-1, self.n_angles, 1)
     ske_y = torch.cos(angles).view(-1, self.n_angles, 1)
     skels_n = torch.cat([ske_x, ske_y], 2)*segment_len.view(-1, 1, 1)
     
     skels_n = torch.cat([head_coords.view(-1, 1, 2),  skels_n], 1)
     skels_n = torch.cumsum(skels_n, dim=1) 
     
     return skels_n
Esempio n. 19
0
 def test_gpytorch_model(self):
     train_X = torch.rand(5, 1)
     train_Y = torch.sin(train_X.squeeze())
     # basic test
     model = SimpleGPyTorchModel(train_X, train_Y)
     test_X = torch.rand(2, 1)
     posterior = model.posterior(test_X)
     self.assertIsInstance(posterior, GPyTorchPosterior)
     self.assertEqual(posterior.mean.shape, torch.Size([2, 1]))
     # test observation noise
     posterior = model.posterior(test_X, observation_noise=True)
     self.assertIsInstance(posterior, GPyTorchPosterior)
     self.assertEqual(posterior.mean.shape, torch.Size([2, 1]))
Esempio n. 20
0
    def __init__(self, _, **kwargs):
        super(PositionalLookupTableEmbeddings, self).__init__(_, **kwargs)
        self.dropout = nn.Dropout(kwargs.get('dropout', 0.1))
        # This could get us in trouble, if in doubt, pick something big
        mxlen = kwargs.get('mxlen', 1000)
        max_timescale = kwargs.get('max_timescale', 1.0e4)

        log_timescale_increment = math.log(max_timescale) / self.dsz
        inv_timescales = torch.exp(torch.arange(0, self.dsz, 2).float() * -log_timescale_increment)

        pe = torch.zeros(mxlen, self.dsz)
        position = torch.arange(0, mxlen).float().unsqueeze(1)
        pe[:, 0::2] = torch.sin(position * inv_timescales)
        pe[:, 1::2] = torch.cos(position * inv_timescales)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)
Esempio n. 21
0
def positional_encodings_like(x, t=None):
    if t is None:
        positions = torch.arange(0, x.size(1))
        if x.is_cuda:
            positions = positions.cuda(x.get_device())
    else:
        positions = t
    encodings = torch.zeros(*x.size()[1:])
    if x.is_cuda:
        encodings = encodings.cuda(x.get_device())
    for channel in range(x.size(-1)):
        if channel % 2 == 0:
            encodings[:, channel] = torch.sin(
                positions / 10000 ** (channel / x.size(2)))
        else:
            encodings[:, channel] = torch.cos(
                positions / 10000 ** ((channel - 1) / x.size(2)))
    return Variable(encodings)
Esempio n. 22
0
 def _setUp(self, double=False, cuda=False, expand=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     if expand:
         self.train_x = self.train_x.expand(-1, 2)
         ics = torch.tensor([[0.5, 1.0]], device=device, dtype=dtype)
     else:
         ics = torch.tensor([[0.5]], device=device, dtype=dtype)
     self.initial_conditions = ics
     self.f_best = self.train_y.max().item()
     model = SingleTaskGP(self.train_x, self.train_y)
     self.model = model.to(device=device, dtype=dtype)
     self.mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
     self.mll = fit_gpytorch_model(self.mll, options={"maxiter": 1})
Esempio n. 23
0
    def forward(self, X, Z=None, diag=False):
        if diag:
            variance = self.get_param("variance")
            return variance.expand(X.shape[0])

        if Z is None:
            Z = X
        X = self._slice_input(X)
        Z = self._slice_input(Z)
        if X.shape[1] != Z.shape[1]:
            raise ValueError("Inputs must have the same number of features.")

        variance = self.get_param("variance")
        lengthscale = self.get_param("lengthscale")
        period = self.get_param("period")

        d = X.unsqueeze(1) - Z.unsqueeze(0)
        scaled_sin = torch.sin(math.pi * d / period) / lengthscale
        return variance * torch.exp(-2 * (scaled_sin ** 2).sum(-1))
Esempio n. 24
0
    def __init__(self, values, env_to_world = torch.eye(4, 4)):
        # Convert to constant texture if necessary
        if isinstance(values, torch.Tensor):
            values = pyredner.Texture(values)

        assert(values.texels.is_contiguous())
        assert(values.texels.dtype == torch.float32)
        if pyredner.get_use_gpu():
            assert(values.texels.is_cuda)
        else:
            assert(not values.texels.is_cuda)

        assert(env_to_world.dtype == torch.float32)

        # Build sampling table
        luminance = 0.212671 * values.texels[:, :, 0] + \
                    0.715160 * values.texels[:, :, 1] + \
                    0.072169 * values.texels[:, :, 2]
        # For each y, compute CDF over x
        sample_cdf_xs_ = torch.cumsum(luminance, dim = 1)
        y_weight = torch.sin(\
        	math.pi * (torch.arange(luminance.shape[0],
                dtype = torch.float32, device = luminance.device) + 0.5) \
             / float(luminance.shape[0]))
        # Compute CDF for x
        sample_cdf_ys_ = torch.cumsum(sample_cdf_xs_[:, -1] * y_weight, dim = 0)
        pdf_norm = (luminance.shape[0] * luminance.shape[1]) / \
        	(sample_cdf_ys_[-1].item() * (2 * math.pi * math.pi))
        # Normalize to [0, 1)
        sample_cdf_xs = (sample_cdf_xs_ - sample_cdf_xs_[:, 0:1]) / \
            torch.max(sample_cdf_xs_[:, (luminance.shape[1] - 1):luminance.shape[1]],
                1e-8 * torch.ones(sample_cdf_xs_.shape[0], 1, device = sample_cdf_ys_.device))
        sample_cdf_ys = (sample_cdf_ys_ - sample_cdf_ys_[0]) / \
            torch.max(sample_cdf_ys_[-1], torch.tensor([1e-8], device = sample_cdf_ys_.device))

        self.values = values
        self.env_to_world = env_to_world
        self.world_to_env = torch.inverse(env_to_world).contiguous()
        self.sample_cdf_ys = sample_cdf_ys.contiguous()
        self.sample_cdf_xs = sample_cdf_xs.contiguous()
        self.pdf_norm = pdf_norm
Esempio n. 25
0
def _h_eigenworms_inv_T(head_x, head_y, segment_l, 
                        mean_angle, eigenworms):
    '''
    Convert the eigen value transformed data into xy coordinates
    '''
    
    
    n_components = eigenworms.size(0)
    angles = torch.mm(eigenworms.view(1, -1), EIGENWORMS_COMPONENTS_T[:n_components])
    angles += mean_angle
    
    ske_x = torch.sin(angles)*segment_l
    ske_x = torch.cat([head_x.view(1,1),  ske_x], 1)
    ske_x = torch.cumsum(ske_x, dim=1) 
    
    ske_y = torch.cos(angles)*segment_l
    ske_y = torch.cat([head_y.view(1,1),  ske_y], 1)
    ske_y = torch.cumsum(ske_y, dim=1) 
    
    
    skels_n = torch.cat((ske_x.view(-1, 1), ske_y.view(-1, 1)), 1)
    
    return skels_n
Esempio n. 26
0
 def _get_model(self, cuda=False, dtype=torch.float):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     state_dict = {
         "mean_module.constant": torch.tensor([-0.0066]),
         "covar_module.raw_outputscale": torch.tensor(1.0143),
         "covar_module.base_kernel.raw_lengthscale": torch.tensor([[-0.99]]),
         "covar_module.base_kernel.lengthscale_prior.concentration": torch.tensor(
             3.0
         ),
         "covar_module.base_kernel.lengthscale_prior.rate": torch.tensor(6.0),
         "covar_module.outputscale_prior.concentration": torch.tensor(2.0),
         "covar_module.outputscale_prior.rate": torch.tensor(0.1500),
     }
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype)
     train_y = torch.sin(train_x * (2 * math.pi))
     noise = torch.tensor(NEI_NOISE, device=device, dtype=dtype)
     train_y += noise
     train_yvar = torch.full_like(train_y, 0.25 ** 2)
     train_x = train_x.view(-1, 1)
     model = FixedNoiseGP(train_X=train_x, train_Y=train_y, train_Yvar=train_yvar)
     model.load_state_dict(state_dict)
     model.to(train_x)
     model.eval()
     return model
Esempio n. 27
0
def init_foveated_lattice(img_shape, scale, n_rings, spacing=0., std=1.,
                          n_rf=None, offset=[0.,0.], min_ecc=0.5,
                          rotate_rings=True, rotate=0., keep_all_RFs=False):
    """
    Creates a foveated lattice of kernel centers (mu) and
    stantard deviations (sigma)

    Parameters
    ----------
    img_shape : tuple
        shape of image
    scale : float
        rate at which receptive field radius scales with eccentricity
    n_rings : int
        number of concentric rings in foveated array
    spacing : float
        spacing between receptive field centers (as fraction of radius)
    std : float
        standard deviation multiplier [default: 1.]
    n_rf : int
        number of RFs per ring [default: None, set to np.pi / scale]
    offset : list of floats
        (x,y) offset from center of image [default: [0.,0.]]
    min_ecc : float
        minimum eccentricity for gaussian rings [default: 1.]
    rotate_rings : bool
        rotate receptive fields between rings [default: False]
    rotate : float
        rotation (in radians, counterclockwise) to apply to the entire array
    keep_all_RFs : boolean
        True/False keep all RFs regardless of whether they are fully contained
        within the image space
        [default: False, remove RFs 1 sigma outside image space]

    Returns
    -------
    mu : torch.Tensor
        kernel x-y coordinate centers with shape (n_kernels, 2) and dtype
        torch.int32
    sigma : torch.Tensor
        kernel standard deviations with shape (n_kernels, 1)

    Examples
    --------
    # generate a "V3" lattice on a 200x200 image
    >>> img_shape = (200,200)
    >>> scale = 0.25
    >>> spacing = 0.15
    >>> min_ecc = 1.
    >>> mu, sigma = init_foveated_lattic(img_shape, scale, spacing, min_ecc)

    Notes
    -----
    sigma will always be >= 1. (pixel space)

    References
    ----------
    (Winawer & Horiguchi, 2015) https://archive.nyu.edu/handle/2451/33887
    """
    assert scale > 0.
    assert min_ecc > 0.

    # get number of receptive fields in ring
    if n_rf is None:
        n_rf = np.floor(np.pi/scale)
    else:
        n_rf = n_rf + 1

    # get angular positions for each receptive field
    angles = 2. * np.pi * torch.linspace(0., 1., int(n_rf))[:-1]
    x_mu = torch.cos(angles)
    y_mu = torch.sin(angles)

    # get base sigma
    base_sigma = torch.as_tensor((1. - spacing) * scale, dtype=torch.float32)

    # eccentricity factor
    eFactor = (1. + scale) / (1. - scale)

    # get rotation angle between each ring
    if rotate_rings:
        rot_angle = torch.as_tensor(np.pi / n_rf, dtype=torch.float32)
        x_mu_rot = torch.cos(rot_angle)*x_mu + torch.sin(rot_angle)*y_mu
        y_mu_rot = -torch.sin(rot_angle)*x_mu + torch.cos(rot_angle)*y_mu

    # append mu, sigma for each eccentricity
    ecc = min_ecc * eFactor
    mu = []
    sigma = []
    for n in range(n_rings):
        if rotate_rings and np.mod(n, 2):
            mu.append(torch.stack([ecc*x_mu_rot, ecc*y_mu_rot], dim=-1))
        else:
            mu.append(torch.stack([ecc*x_mu, ecc*y_mu], dim=-1))
        sigma.append(torch.mul(ecc, base_sigma).repeat(mu[-1].shape[0]))
        ecc *= eFactor
    # set mu, sigma
    mu = torch.as_tensor(torch.cat(mu, dim=0), dtype=torch.float32)
    sigma = torch.cat(sigma, 0).unsqueeze(1)
    # rotate mu
    rx = np.cos(rotate) * mu[:,0] - np.sin(rotate) * mu[:,1]
    ry = np.sin(rotate) * mu[:,0] + np.cos(rotate) * mu[:,1]
    mu = torch.stack([rx,ry], dim=-1)
    # set offset of mu
    mu = mu + torch.as_tensor(offset, dtype=mu.dtype)
    # multiply by std
    sigma = torch.mul(sigma, std)
    # check if mu + sigma (along radial direction from fovea) is in image
    r = torch.sqrt(torch.sum(torch.pow(mu, 2), 1))
    r_sigma = r - sigma.flatten()
    theta = torch.atan2(*mu.t())
    h_w = torch.mul(torch.stack([torch.sin(theta), torch.cos(theta)], 1),
                    r_sigma.reshape(-1,1))
    # remove mu, sigma outside image frame
    center = torch.as_tensor(img_shape, dtype=mu.dtype).unsqueeze(0) / 2.
    keep_idx = torch.prod(torch.lt(torch.abs(h_w), center), -1).bool()
    # add img_shape//2 to mu
    mu = torch.add(mu, center)
    if keep_all_RFs:
        return mu, sigma
    return mu[keep_idx], sigma[keep_idx]
Esempio n. 28
0
    def reset(self, **kwargs):
        """ 
            Function to reset the environment
        """
        self.x1_list = []
        self.y1_list = []
        self.x2_list = []
        self.y2_list = []
        self.x3_list = []
        self.y3_list = []
        self.x4_list = []
        self.y4_list = []
        self.time_step = 1
        self.true_targets_radii = torch.rand(self.num_targets) * 5.0 + 2.0
        self.true_targets_pos = (torch.rand(self.num_targets, 2) *
                                 self.len_workspace)
        self.initial_true_targets_pos = self.true_targets_pos.clone()
        self.estimated_targets_mean = self.true_targets_pos.clone()
        self.estimated_targets_var = torch.zeros(self.num_targets, 2, 2)
        for index in range(0, self.num_targets):
            self.estimated_targets_var[index] = torch.tensor([[1, 0], [0, 1]])
        self.target_motion_omegas = torch.rand(self.num_targets) * 100.0

        self.robot_movement_x = []
        self.robot_movement_y = []
        self.sensors_pos = torch.zeros(self.num_sensors, 2)
        for index in range(0, self.num_sensors):
            rand_angle = torch.rand(1) * 2 * np.pi
            self.sensors_pos[index] = self.true_targets_pos.mean(0) + (
                torch.sqrt(torch.rand(1) + 0.5) * (self.len_workspace / 2) *
                (torch.tensor([torch.cos(rand_angle),
                               torch.sin(rand_angle)])))
            if (self.sensors_pos[index, 0] >= self.len_workspace):
                self.sensors_pos[index, 0] -= (self.sensors_pos[index, 0] -
                                               self.len_workspace + 1)
            if (self.sensors_pos[index, 0] <= 0):
                self.sensors_pos[index, 0] = (-self.sensors_pos[index, 0] + 1)
            if (self.sensors_pos[index, 1] >= self.len_workspace):
                self.sensors_pos[index, 1] -= (self.sensors_pos[index, 1] -
                                               self.len_workspace + 1)
            if (self.sensors_pos[index, 1] <= 0):
                self.sensors_pos[index, 1] = (-self.sensors_pos[index, 1] + 1)

        self.robot_movement_x.append(float(self.sensors_pos[0, 0]))
        self.robot_movement_y.append(float(self.sensors_pos[0, 1]))
        self.x1_list.append(float(self.true_targets_pos[0, 0]))
        self.y1_list.append(float(self.true_targets_pos[0, 1]))
        self.x2_list.append(float(self.true_targets_pos[1, 0]))
        self.y2_list.append(float(self.true_targets_pos[1, 1]))
        self.x3_list.append(float(self.true_targets_pos[2, 0]))
        self.y3_list.append(float(self.true_targets_pos[2, 1]))
        self.x4_list.append(float(self.true_targets_pos[3, 0]))
        self.y4_list.append(float(self.true_targets_pos[3, 1]))

        self.heatmap = torch.zeros(self.len_workspace, self.len_workspace)
        for index in range(0, self.num_targets):
            x = np.linspace(0, self.len_workspace, self.len_workspace)
            y = np.linspace(0, self.len_workspace, self.len_workspace)
            X, Y = np.meshgrid(x, y)
            pos = np.empty(X.shape + (2, ))
            pos[:, :, 0] = X
            pos[:, :, 1] = Y
            rv = multivariate_normal(self.estimated_targets_mean[index],
                                     self.estimated_targets_var[index])
            self.heatmap += rv.pdf(pos)
        true_obs = self.heatmap.flatten()

        self.state = torch.cat(
            (self.sensors_pos[0], torch.tensor(true_obs).float()))
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=self.state.shape,
                                            dtype='float32')
        return self.state, self.sensors_pos, self.estimated_targets_mean, self.true_targets_radii, self.target_motion_omegas
Esempio n. 29
0
def ciede2000_diff(lab1, lab2, device):
    '''
    CIEDE2000 metric to claculate the color distance map for a batch of image tensors defined in CIELAB space

    '''

    lab1 = lab1.to(device)
    lab2 = lab2.to(device)

    L1 = lab1[:, 0, :, :]
    A1 = lab1[:, 1, :, :]
    B1 = lab1[:, 2, :, :]
    L2 = lab2[:, 0, :, :]
    A2 = lab2[:, 1, :, :]
    B2 = lab2[:, 2, :, :]
    kL = 1
    kC = 1
    kH = 1

    mask_value_0_input1 = ((A1 == 0) * (B1 == 0)).float()
    mask_value_0_input2 = ((A2 == 0) * (B2 == 0)).float()
    mask_value_0_input1_no = 1 - mask_value_0_input1
    mask_value_0_input2_no = 1 - mask_value_0_input2
    B1 = B1 + 0.0001 * mask_value_0_input1
    B2 = B2 + 0.0001 * mask_value_0_input2

    C1 = torch.sqrt((A1**2.) + (B1**2.))
    C2 = torch.sqrt((A2**2.) + (B2**2.))

    aC1C2 = (C1 + C2) / 2.
    G = 0.5 * (1. - torch.sqrt((aC1C2**7.) / ((aC1C2**7.) + (25**7.))))
    a1P = (1. + G) * A1
    a2P = (1. + G) * A2
    c1P = torch.sqrt((a1P**2.) + (B1**2.))
    c2P = torch.sqrt((a2P**2.) + (B2**2.))

    h1P = hpf_diff(B1, a1P)
    h2P = hpf_diff(B2, a2P)
    h1P = h1P * mask_value_0_input1_no
    h2P = h2P * mask_value_0_input2_no

    dLP = L2 - L1
    dCP = c2P - c1P
    dhP = dhpf_diff(C1, C2, h1P, h2P)
    dHP = 2. * torch.sqrt(c1P * c2P) * torch.sin(radians(dhP) / 2.)
    mask_0_no = 1 - torch.max(mask_value_0_input1, mask_value_0_input2)
    dHP = dHP * mask_0_no

    aL = (L1 + L2) / 2.
    aCP = (c1P + c2P) / 2.
    aHP = ahpf_diff(C1, C2, h1P, h2P)
    T = 1. - 0.17 * torch.cos(radians(aHP - 39)) + 0.24 * torch.cos(
        radians(2. * aHP)) + 0.32 * torch.cos(
            radians(3. * aHP + 6.)) - 0.2 * torch.cos(radians(4. * aHP - 63.))
    dRO = 30. * torch.exp(-1. * (((aHP - 275.) / 25.)**2.))
    rC = torch.sqrt((aCP**7.) / ((aCP**7.) + (25.**7.)))
    sL = 1. + ((0.015 * ((aL - 50.)**2.)) / torch.sqrt(20. + ((aL - 50.)**2.)))

    sC = 1. + 0.045 * aCP
    sH = 1. + 0.015 * aCP * T
    rT = -2. * rC * torch.sin(radians(2. * dRO))

    #     res_square=((dLP / (sL * kL)) ** 2.) + ((dCP / (sC * kC)) ** 2.) + ((dHP / (sH * kH)) ** 2.) + rT * (dCP / (sC * kC)) * (dHP / (sH * kH))

    res_square = ((dLP /
                   (sL * kL))**2.) + ((dCP / (sC * kC))**2.) * mask_0_no + (
                       (dHP / (sH * kH))**2.) * mask_0_no + rT * (
                           dCP / (sC * kC)) * (dHP / (sH * kH)) * mask_0_no
    mask_0 = (res_square <= 0).float()
    mask_0_no = 1 - mask_0
    res_square = res_square + 0.0001 * mask_0
    res = torch.sqrt(res_square)
    res = res * mask_0_no

    return res
Esempio n. 30
0
 def mse(u, t):
     true_u = torch.sin(t)
     return torch.mean((u - true_u) ** 2)
    def __init__(self,
                 char_embedding_dim: int,
                 out_dim: int,
                 image_feature_dim: int = 512,
                 nheaders: int = 8,
                 nlayers: int = 6,
                 feedforward_dim: int = 2048,
                 dropout: float = 0.1,
                 max_len: int = 100,
                 image_encoder: str = 'resnet50',
                 roi_pooling_mode: str = 'roi_align',
                 roi_pooling_size: Tuple[int, int] = (7, 7)):
        '''
        convert image segments and text segments to node embedding.
        :param char_embedding_dim:
        :param out_dim:
        :param image_feature_dim:
        :param nheaders:
        :param nlayers:
        :param feedforward_dim:
        :param dropout:
        :param max_len:
        :param image_encoder:
        :param roi_pooling_mode:
        :param roi_pooling_size:
        '''
        super().__init__()

        self.dropout = dropout
        assert roi_pooling_mode in [
            'roi_align', 'roi_pool'
        ], 'roi pooling model: {} not support.'.format(roi_pooling_mode)
        self.roi_pooling_mode = roi_pooling_mode
        assert roi_pooling_size and len(
            roi_pooling_size) == 2, 'roi_pooling_size not be set properly.'
        self.roi_pooling_size = tuple(roi_pooling_size)  # (h, w)

        transformer_encoder_layer = nn.TransformerEncoderLayer(
            d_model=char_embedding_dim,
            nhead=nheaders,
            dim_feedforward=feedforward_dim,
            dropout=dropout)
        self.transformer_encoder = nn.TransformerEncoder(
            transformer_encoder_layer, num_layers=nlayers)

        if image_encoder == 'resnet18':
            self.cnn = resnet.resnet18(output_channels=out_dim)
        elif image_encoder == 'resnet34':
            self.cnn = resnet.resnet34(output_channels=out_dim)
        elif image_encoder == 'resnet50':
            self.cnn = resnet.resnet50(output_channels=out_dim)
        elif image_encoder == 'resnet101':
            self.cnn = resnet.resnet101(output_channels=out_dim)
        elif image_encoder == 'resnet152':
            self.cnn = resnet.resnet152(output_channels=out_dim)
        else:
            raise NotImplementedError()

        self.conv = nn.Conv2d(image_feature_dim, out_dim,
                              self.roi_pooling_size)
        self.bn = nn.BatchNorm2d(out_dim)

        self.projection = nn.Linear(2 * out_dim, out_dim)
        self.norm = nn.LayerNorm(out_dim)

        # Compute the positional encodings once in log space.
        position_embedding = torch.zeros(max_len, char_embedding_dim)
        position = torch.arange(0, max_len).unsqueeze(1).float()
        div_term = torch.exp(
            torch.arange(0, char_embedding_dim, 2).float() *
            -(math.log(10000.0) / char_embedding_dim))
        position_embedding[:, 0::2] = torch.sin(position * div_term)
        position_embedding[:, 1::2] = torch.cos(position * div_term)
        position_embedding = position_embedding.unsqueeze(0).unsqueeze(
            0)  # 1, 1, max_len, char_embedding_dim
        self.register_buffer('position_embedding', position_embedding)

        self.pe_droput = nn.Dropout(self.dropout)
Esempio n. 32
0
from neural_decomposition import ND
from math import pi
import torch
import matplotlib.pyplot as plt

t = torch.linspace(0,  2 * pi, 40)
x = torch.sin(t)

nd = ND(5).init(x)

y = []

for i in range(40):
    nd.fit(x, lr=0.002)
    y += [nd(t).detach()]

for yi in y:
    plt.plot(yi)

plt.plot(x, color="black", linestyle="dashed")
plt.show()
Esempio n. 33
0
    def forward(self, y, x):
        # Calculate rho_0
        with torch.no_grad():
            # Dimensions: [Batch, Time, Embedding]
            sin_phi_D = x[:, :self.N - 1, 1]
            cos_phi_D = x[:, :self.N - 1, 3]
            # exp_phi_D = cos_phi_D + 1j * sin_phi_D
            rho_0_real = torch.full((x.shape[0], 2, 2), 0.5)
            rho_0_imag = torch.zeros((x.shape[0], 2, 2))
            rho_0_real[:, 1, 0] *= cos_phi_D[:, 0]
            rho_0_real[:, 0, 1] *= cos_phi_D[:, 0]
            rho_0_imag[:, 1, 0] = 0.5 * sin_phi_D[:, 0]
            rho_0_imag[:, 0, 1] = -0.5 * sin_phi_D[:, 0]

        # Calculate unitary evolution operators

        # No grad for drive side
        with torch.no_grad():
            # H_D: [batch, time, 2x2 matrix]
            sin_theta_D = x[:, :self.N - 1, 0]

            # H_D = torch.zeros((x.shape[0], self.N - 1, 2, 2), dtype=torch.cdouble)

            # H_D[:, :, 1, 0] = exp_phi_D * sin_theta_D / 2
            # H_D[:, :, 0, 1] = torch.conj(exp_phi_D) * sin_theta_D / 2

            H_D_real = torch.zeros((x.shape[0], self.N - 1, 2, 2))
            H_D_imag = torch.zeros((x.shape[0], self.N -1, 2, 2))

            H_D_real[:, :, 1, 0] = 0.5 * sin_theta_D * cos_phi_D
            H_D_real[:, :, 0, 1] = 0.5 * sin_theta_D * cos_phi_D

            H_D_imag[:, :, 1, 0] = 0.5 * sin_theta_D * sin_phi_D
            H_D_imag[:, :, 0, 1] = -0.5 * sin_theta_D * sin_phi_D


        # H_T: [batch, time, 2x2]
        # H_T = torch.zeros((y.shape[0], self.N, 2, 2), dtype=torch.cdouble)
        theta_T = torch.atan2(y[:, :, 0], y[:, :, 2])
        phi_T = torch.atan2(y[:, :, 1], y[:, :, 3])

        # H_T[:, :, 1, 0] = (torch.cos(phi_T) + 1j * torch.sin(phi_T)) * torch.sin(theta_T) / 2
        # H_T[:, :, 0, 1] = (torch.cos(phi_T) - 1j * torch.sin(phi_T)) * torch.sin(theta_T) / 2

        H_T_real = torch.zeros((y.shape[0], self.N, 2, 2))
        H_T_imag = torch.zeros((y.shape[0], self.N, 2, 2))

        H_T_real[:, :, 1, 0] = torch.cos(phi_T) * torch.sin(theta_T) / 2
        H_T_real[:, :, 0, 1] = torch.cos(phi_T) * torch.sin(theta_T) / 2

        H_T_imag[:, :, 1, 0] = torch.sin(phi_T) * torch.sin(theta_T) / 2
        H_T_imag[:, :, 0, 1] = -1 * torch.sin(phi_T) * torch.sin(theta_T) / 2


        # Abs value of alpha = delta + tau
        # shape: [batch, 2]
        abs_alpha = torch.sqrt(torch.pow(torch.sin(theta_T[:, :self.N - 1]), 2) + torch.pow(sin_theta_D[:, :self.N - 1], 2) + 2 * torch.sin(theta_T[:, :self.N - 1]) * sin_theta_D[:, :self.N - 1] * (torch.cos(phi_T[:, :self.N - 1]) * cos_phi_D[:, :self.N - 1] + torch.sin(phi_T[:, :self.N - 1]) * sin_phi_D[:, :self.N - 1])) / 2

        alpha_real = 0.5 * (sin_theta_D[:, :self.N - 1] * cos_phi_D[:, :self.N - 1] + torch.sin(theta_T[:, :self.N - 1]) * torch.cos(phi_T[:, :self.N - 1]))
        alpha_imag = 0.5 * (sin_theta_D[:, :self.N - 1] * sin_phi_D[:, :self.N - 1] + torch.sin(theta_T[:, :self.N - 1]) * torch.sin(phi_T[:, :self.N - 1]))

        # Unitary evolution operator
        # Shape: [batch, N-1, 2, 2]

        U_real = torch.zeros((x.shape[0], self.N - 1, 2, 2))
        U_imag = torch.zeros((x.shape[0], self.N - 1, 2, 2))

        # Helpers
        c = torch.cos(abs_alpha * self.dt)
        s = torch.div(torch.sin(abs_alpha * self.dt), abs_alpha)

        U_real[:, :, 0, 0] = c
        U_real[:, :, 1, 1] = c
        U_real[:, :, 0, 1] = torch.mul(-1 * alpha_imag, s)
        U_real[:, :, 1, 0] = torch.mul(alpha_imag, s)

        U_imag[:, :, 0, 1] = torch.mul(-1 * alpha_real, s)
        U_imag[:, :, 1, 0] = torch.mul(-1 * alpha_real, s)


        # U_1 = matexp(H_D[:, 0] + H_T[:, 0], self.dt)
        # U_2 = matexp(H_D[:, 1] + H_T[:, 1], self.dt)


        # helper_real, helper_imag = real_matmul(U_real[:, 0], U_imag[:, 0], rho_0_real, rho_0_imag)
        # rho_1_real, rho_1_imag = real_matmul(helper_real, helper_imag, torch.transpose(U_real[:, 0], 1, 2), -1 * torch.transpose(U_imag[:, 0], 1, 2))
        #
        # A_1_real, A_1_imag = real_matmul(rho_1_real, rho_1_imag, (H_T_real[:, 1] - H_T_real[:, 0]), (H_T_imag[:, 1] - H_T_imag[:, 0]))
        # W_1 = A_1_real[:, 0, 0] + A_1_real[:, 1, 1]
        #
        # help2_real, help2_imag = real_matmul(U_real[:, 1], U_imag[:, 1], rho_1_real, rho_1_imag)
        # rho_2_real, rho_2_imag = real_matmul(help2_real, help2_imag, torch.transpose(U_real[:, 1], 1, 2), torch.transpose(-1 * U_imag[:, 1], 1, 2))
        #
        # A_2_real, A_2_imag = real_matmul(rho_2_real, rho_2_imag, H_T_real[:, 2] - H_T_real[:, 1], H_T_imag[:, 2] - H_T_imag[:, 1])
        # W_2 = A_2_real[:, 0, 0] + A_2_real[:, 1, 1]

        W = torch.zeros((x.shape[0]))

        rho_real = rho_0_real
        rho_imag = rho_0_imag

        for i in range(self.N - 1):
            helper_real, helper_imag = real_matmul(U_real[:, i], U_imag[:, i], rho_real, rho_imag)
            rho_real, rho_imag = real_matmul(helper_real, helper_imag, torch.transpose(U_real[:, i], 1, 2), -1 * torch.transpose(U_imag[:, i], 1, 2))
            A_real, A_imag = real_matmul(rho_real, rho_imag, (H_T_real[:, i+1] - H_T_real[:, i]), (H_T_imag[:, i+1] - H_T_imag[:, i]))
            W += A_real[:, 0, 0] + A_real[:, 1, 1]


        return torch.mean(W)
Esempio n. 34
0
File: core.py Progetto: Snowgun/Ants
    def control(self, arena, dt):
     
            #kajagyűjtés
            foodat=arena.get_food(self.f_p)*(~(self.food_carry > 0))
            self.food_carry+=foodat
      #      self.phis += ( np.pi + t.atan2(self.ys,self.xs) - self.phis)*(foodat > 0)
          #  arena.update_food(-foodat,self.f_p)

            #kajalerakás
            based = arena.get_base(self.f_p)
            self.food_accum += (based*self.food_carry).sum("EX")
            self.food_carry *= ~(based > 0)
            fooded = (self.food_carry> 0)
            unfooded = ~fooded
      
            #feromonok
            phero = self.phero_dens*t.stack((fooded.rename(None), unfooded.rename(None)), -1).refine_names("B", "EX", "ST").align_as(self.phero_dens)
            arena.update_pheros(phero.sum("ST"), self.f_p)

            xleft,yleft = self.xs + self.lookforward*self.v*t.cos(self.phis+0.5),self.ys + self.lookforward*self.v*t.sin(self.phis+0.5)
            xright,yright = self.xs + self.lookforward*self.v*t.cos(self.phis-0.5),self.ys + self.lookforward*self.v*t.sin(self.phis-0.5)
            f_l, f_r = arena.get_index(arena.colomap, xleft, yleft), arena.get_index(arena.colomap, xright, yright)
            phero_left, phero_right = arena.get_pheros(f_l), arena.get_pheros(f_r)
            dpher = phero_left-phero_right
            self.phis+= (( dpher[:,:,0])*unfooded +(dpher[:,:,1])*fooded)*self.turn_pher

            #ez a random fordulás+bázis fele ha van kaja
            turn_prob = self.turn_prob*(t.exp(-(phero_left + phero_right)[:,:,0]*unfooded -(phero_left + phero_right)[:,:,1]*fooded))#+ fooded)
            turn = t.distributions.bernoulli.Bernoulli(turn_prob.rename(None)*dt).sample()
            amount = t.randn_like(turn)#*(unfooded) + ( np.pi + t.atan2(self.ys,self.xs) - self.phis)*fooded
            self.phis += 1.0*turn*amount

            #akadálykikerülés
            xleft,yleft=self.xs + self.lookforward*self.v*t.cos(self.phis+0.5),self.ys + self.lookforward*self.v*t.sin(self.phis+0.5)
            xright,yright=self.xs + self.lookforward*self.v*t.cos(self.phis-0.5),self.ys + self.lookforward*self.v*t.sin(self.phis-0.5)
            f_l, f_r = arena.get_index(arena.colomap, xleft, yleft), arena.get_index(arena.colomap, xright, yright)
            obs_left, obs_right = arena.get_obst(f_l), arena.get_obst(f_r)
            self.phis += -obs_left*self.turn_obst*dt + obs_right*self.turn_obst*dt + obs_left*obs_right*np.pi
Esempio n. 35
0
@time   : 2019-06-19

"""
import torch
import numpy as np

np_data = np.arange(6).reshape((2, 3))
torch_data = torch.from_numpy(np_data)  # numpy2torch-tensor
tensor2array = torch_data.numpy()  # torch-tensor2numpy
print(torch_data)
print(tensor2array)
print("*" * 100)

# 各种类型tensor
data = [-1, -2, 1, 2]
tensor = torch.FloatTensor(data)  # float32
print(np.abs(data))
print(torch.abs(tensor))
print(np.sin(data))
print(torch.sin(tensor))
print(np.mean(data))
print(torch.mean(tensor))
print("*" * 100)

# 矩阵相乘
data = [[1, 2], [3, 4]]
tensor = torch.FloatTensor(data)
print(np.matmul(data, data))
print(torch.mm(tensor, tensor))
print("*" * 100)
Esempio n. 36
0
File: core.py Progetto: Snowgun/Ants
    def control(self, arena, dt):
     
            #kajagyűjtés
            foodat=arena.get_food(self.f_p)*(~(self.food_carry > 0))
            self.food_carry+=foodat
      #      self.phis += ( np.pi + t.atan2(self.ys,self.xs) - self.phis)*(foodat > 0)
          #  arena.update_food(-foodat,self.f_p)

            #kajalerakás
            based = arena.get_base(self.f_p)
            self.food_accum += (based*self.food_carry).sum("EX")
            self.food_carry *= ~(based > 0)
            fooded = (self.food_carry> 0)
            unfooded = ~fooded
      
            #feromonok
           
            xleft,yleft = self.xs + self.lookforward*self.v*t.cos(self.phis+0.5),self.ys + self.lookforward*self.v*t.sin(self.phis+0.5)
            xright,yright = self.xs + self.lookforward*self.v*t.cos(self.phis-0.5),self.ys + self.lookforward*self.v*t.sin(self.phis-0.5)
            f_l, f_r = arena.get_index(arena.colomap, xleft, yleft), arena.get_index(arena.colomap, xright, yright)
            phero_left, phero_right = arena.get_pheros(f_l), arena.get_pheros(f_r)
            in_ = t.cat((phero_left, phero_right, self.food_carry.align_as(phero_left), t.randn_like(self.food_carry).align_as(phero_left)), -1).rename("B", "EX", "IN")
  
            out_ =self.brain.process(in_)
            self.phis += t.clamp(out_[:, :, 0], -0.5, 0.5)*dt
            arena.update_pheros(t.clamp(out_[:,:, 1:].rename("B", "EX", "PH"), 0, 0.1)*dt, self.f_p)

            #akadálykikerülés
            xleft,yleft=self.xs + self.lookforward*self.v*t.cos(self.phis+0.5),self.ys + self.lookforward*self.v*t.sin(self.phis+0.5)
            xright,yright=self.xs + self.lookforward*self.v*t.cos(self.phis-0.5),self.ys + self.lookforward*self.v*t.sin(self.phis-0.5)
            f_l, f_r = arena.get_index(arena.colomap, xleft, yleft), arena.get_index(arena.colomap, xright, yright)
            obs_left, obs_right = arena.get_obst(f_l), arena.get_obst(f_r)
            self.phis += -obs_left*self.turn_obst*dt + obs_right*self.turn_obst*dt + obs_left*obs_right*np.pi
Esempio n. 37
0
def main():
    summary = SummaryWriter('./log')
    # os.system('tensorboard --logdir=log')
    #
    # set Hyper parameter
    # json_path = os.path.join(args.model_dir)
    # params = train_utils.Params(json_path)

    # data loader
    train_dataset = AudioDataset(data_type='train')
    # modify:num_workers=4
    train_data_loader = DataLoader(dataset=train_dataset,
                                   batch_size=args.batch_size,
                                   collate_fn=train_dataset.collate,
                                   shuffle=True,
                                   num_workers=6)
    test_dataset = AudioDataset(data_type='valid')
    test_data_loader = DataLoader(dataset=test_dataset,
                                  batch_size=args.batch_size,
                                  collate_fn=test_dataset.collate,
                                  shuffle=False,
                                  num_workers=6)

    # # data loader
    # train_dataset = AudioDataset(data_type='test')
    # # modify:num_workers=4
    # train_data_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, collate_fn=train_dataset.collate,
    #                                shuffle=True, num_workers=0)
    # test_dataset = AudioDataset(data_type='test')
    # test_data_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, collate_fn=test_dataset.collate,
    #                               shuffle=False, num_workers=0)
    # model select
    print('Model initializing\n')
    net = torch.nn.DataParallel(
        AttentionModel(257,
                       hidden_size=args.hidden_size,
                       dropout_p=args.dropout_p,
                       use_attn=args.attn_use,
                       stacked_encoder=args.stacked_encoder,
                       attn_len=args.attn_len))
    # net = AttentionModel(257, 112, dropout_p = args.dropout_p, use_attn = arg0s.attn_use)
    net = net.cuda()
    print(net)

    optimizer = optim.Adam(net.parameters(), lr=args.learning_rate)

    scheduler = ExponentialLR(optimizer, 0.5)

    # check point load
    # Check point load

    print('Trying Checkpoint Load\n')
    # ckpt_dir = 'ckpt_dir_stoi'
    ckpt_dir = 'ckpt_dir'
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)

    best_PESQ = 0.
    best_STOI = 0.
    best_loss = 200000.
    ckpt_path = os.path.join(ckpt_dir, args.ck_name)
    if os.path.exists(ckpt_path):
        ckpt = torch.load(ckpt_path)
        try:
            net.load_state_dict(ckpt['model'])
            optimizer.load_state_dict(ckpt['optimizer'])
            best_loss = ckpt['best_loss']

            print('checkpoint is loaded !')
            print('current best loss : %.4f' % best_loss)
        except RuntimeError as e:
            print('wrong checkpoint\n')
    else:
        print('checkpoint not exist!')
        print('current best loss : %.4f' % best_loss)

    print('Training Start!')
    # train
    iteration = 0
    train_losses = []
    test_losses = []
    for epoch in range(args.num_epochs):
        train_bar = tqdm(train_data_loader)
        # train_bar = train_data_loader876\
        n = 0
        avg_loss = 0
        avg_pesq = 0
        avg_stoi = 0
        net.train()
        for input in train_bar:
            iteration += 1
            # load data
            train_mixed, train_clean, seq_len = map(lambda x: x.cuda(), input)

            mixed = stft(train_mixed)
            cleaned = stft(train_clean)
            mixed = mixed.transpose(1, 2)
            cleaned = cleaned.transpose(1, 2)
            real, imag = mixed[..., 0], mixed[..., 1]
            clean_real, clean_imag = cleaned[..., 0], cleaned[..., 1]
            mag = torch.sqrt(real**2 + imag**2)
            clean_mag = torch.sqrt(clean_real**2 + clean_imag**2)
            phase = torch.atan2(imag, real)

            # feed data
            out_mag, attn_weight = net(mag)
            out_real = out_mag * torch.cos(phase)
            out_imag = out_mag * torch.sin(phase)
            out_real, out_imag = torch.squeeze(out_real,
                                               1), torch.squeeze(out_imag, 1)
            out_real = out_real.transpose(1, 2)
            out_imag = out_imag.transpose(1, 2)

            out_audio = istft(out_real, out_imag, train_mixed.size(1))
            out_audio = torch.squeeze(out_audio, dim=1)
            for i, l in enumerate(seq_len):
                out_audio[i, l:] = 0

            loss = 0
            PESQ = 0
            STOI = 0
            origin_PESQ = 0
            origin_STOI = 0

            loss = F.mse_loss(out_mag, clean_mag, True)
            if torch.any(torch.isnan(loss)):
                torch.save(
                    {
                        'clean_mag': clean_mag,
                        'out_mag': out_mag,
                        'mag': mag
                    }, 'nan_mag')
                raise ('loss is NaN')
            avg_loss += loss.item()
            n += 1
            # gradient optimizer
            optimizer.zero_grad()

            # backpropagate LOSS20+

            loss.backward()

            # update weight
            optimizer.step()

        avg_loss /= n
        avg_pesq /= n
        avg_stoi /= n
        print('result:')
        print(
            '[epoch: {}, iteration: {}] avg_loss : {:.4f} avg_pesq : {:.4f} avg_stoi : {:.4f} '
            .format(epoch, iteration, avg_loss, avg_pesq, avg_stoi))

        summary.add_scalar('Train Loss', avg_loss, iteration)

        train_losses.append(avg_loss)
        if (len(train_losses) > 2) and (train_losses[-2] < avg_loss):
            print("Learning rate Decay")
            scheduler.step()

        # test phase
        n = 0
        avg_test_loss = 0
        avg_test_pesq = 0
        avg_test_stoi = 0
        test_bar = tqdm(test_data_loader)

        net.eval()
        with torch.no_grad():
            for input in test_bar:
                test_mixed, test_clean, seq_len = map(lambda x: x.cuda(),
                                                      input)
                mixed = stft(test_mixed)
                cleaned = stft(test_clean)
                mixed = mixed.transpose(1, 2)
                cleaned = cleaned.transpose(1, 2)
                real, imag = mixed[..., 0], mixed[..., 1]
                clean_real, clean_imag = cleaned[..., 0], cleaned[..., 1]
                mag = torch.sqrt(real**2 + imag**2)
                clean_mag = torch.sqrt(clean_real**2 + clean_imag**2)
                phase = torch.atan2(imag, real)

                logits_mag, logits_attn_weight = net(mag)
                logits_real = logits_mag * torch.cos(phase)
                logits_imag = logits_mag * torch.sin(phase)
                logits_real, logits_imag = torch.squeeze(logits_real,
                                                         1), torch.squeeze(
                                                             logits_imag, 1)
                logits_real = logits_real.transpose(1, 2)
                logits_imag = logits_imag.transpose(1, 2)

                logits_audio = istft(logits_real, logits_imag,
                                     test_mixed.size(1))
                logits_audio = torch.squeeze(logits_audio, dim=1)
                for i, l in enumerate(seq_len):
                    logits_audio[i, l:] = 0

                test_PESQ = 0
                test_STOI = 0

                test_loss = F.mse_loss(logits_mag, clean_mag, True)

                avg_test_loss += test_loss.item()
                n += 1

            avg_test_loss /= n
            avg_test_pesq /= n
            avg_test_stoi /= n

            test_losses.append(avg_test_loss)
            summary.add_scalar('Test Loss', avg_test_loss, iteration)

            print(
                '[epoch: {}, iteration: {}] test loss : {:.4f} avg_test_pesq : {:.4f} avg_test_stoi : {:.4f}'
                .format(epoch, iteration, avg_test_loss, avg_test_pesq,
                        avg_test_stoi))
            if avg_test_loss < best_loss:
                best_PESQ = test_PESQ
                best_STOI = test_STOI
                best_loss = avg_test_loss
                # Note: optimizer also has states ! don't forget to save them as well.
                ckpt = {
                    'model': net.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'best_loss': best_loss
                }
                torch.save(ckpt, ckpt_path)
                print('checkpoint is saved !')
def second_box_decode(box_encodings,
                      anchors,
                      encode_angle_to_vector=False,
                      smooth_dim=False):
    """box decode for VoxelNet in lidar
    Args:
        boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
        anchors ([N, 7] Tensor): anchors
    """
    # xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1)
    # use select instead of split for onnx conversion
    batch_size = box_encodings.size()[0]
    xa = anchors.select(2, 0)
    xa = xa.view(batch_size, -1, 1)
    ya = anchors.select(2, 1)
    ya = ya.view(batch_size, -1, 1)
    za = anchors.select(2, 2)
    za = za.view(batch_size, -1, 1)
    wa = anchors.select(2, 3)
    wa = wa.view(batch_size, -1, 1)
    la = anchors.select(2, 4)
    la = la.view(batch_size, -1, 1)
    ha = anchors.select(2, 5)
    ha = ha.view(batch_size, -1, 1)
    ra = anchors.select(2, 6)
    ra = ra.view(batch_size, -1, 1)

    if encode_angle_to_vector:
        xt, yt, zt, wt, lt, ht, rtx, rty = torch.split(box_encodings,
                                                       1,
                                                       dim=-1)
        print("Split is not for onnx conversion. \
        You have to replace 'split' with 'select' operation")
    else:
        xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
        # use select instead of split for onnx conversion
        xt = box_encodings.select(2, 0)
        xt = xt.view(batch_size, -1, 1)
        yt = box_encodings.select(2, 1)
        yt = yt.view(batch_size, -1, 1)
        zt = box_encodings.select(2, 2)
        zt = zt.view(batch_size, -1, 1)
        wt = box_encodings.select(2, 3)
        wt = wt.view(batch_size, -1, 1)
        lt = box_encodings.select(2, 4)
        lt = lt.view(batch_size, -1, 1)
        ht = box_encodings.select(2, 5)
        ht = ht.view(batch_size, -1, 1)
        rt = box_encodings.select(2, 6)
        rt = rt.view(batch_size, -1, 1)
    # xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
    za = za + ha / 2
    diagonal = torch.sqrt(la**2 + wa**2)
    # print(xt.size(), diagonal.size(), xa.size())
    xg = xt * diagonal + xa
    yg = yt * diagonal + ya
    zg = zt * ha + za
    if smooth_dim:
        lg = (lt + 1) * la
        wg = (wt + 1) * wa
        hg = (ht + 1) * ha
    else:

        lg = torch.exp(lt) * la
        wg = torch.exp(wt) * wa
        hg = torch.exp(ht) * ha
    if encode_angle_to_vector:
        rax = torch.cos(ra)
        ray = torch.sin(ra)
        rgx = rtx + rax
        rgy = rty + ray
        rg = torch.atan2(rgy, rgx)
    else:
        rg = rt + ra
    zg = zg - hg / 2
    return torch.cat([xg, yg, zg, wg, lg, hg, rg], dim=-1)
    report_every_steps = 100

    net = TrigonometryModule(num_inputs=num_inputs)
    print(net)

    optimizer = optim.Adam(net.parameters(), lr=0.001)
    loss_func = nn.MSELoss()

    global_step = 0
    with SummaryWriter() as writer:
      # writer.add_hparams()
      epoch = 0
      while epoch < num_epochs:
        epoch += 1
        inputs = torch.ones(batch_size, num_inputs).normal_(0, 2 * math.pi)
        expected_outputs = torch.sin(inputs)

        optimizer.zero_grad()

        out = net(inputs)
        loss = loss_func(out, expected_outputs)
        loss.backward()
        optimizer.step()
        
        global_step += 1
        if global_step % report_every_steps == 0:
          writer.add_scalar('loss', loss, global_step)
          writer.add_histogram('out', out, global_step)
          writer.add_histogram('expected_outputs', expected_outputs, global_step)
          print(loss)
Esempio n. 40
0
def sum_sin_transform(x):
    return torch.sum(torch.sin(x), dim=-1)
Esempio n. 41
0
def csc_tranform(x):
    return torch.sum(1 / torch.sin(x), dim=-1).clamp(min=-clamp_val,
                                                     max=clamp_val)
Esempio n. 42
0
author: @Prateek Mishra
Description: GAN implementation for generating 2D analog signal sample
********************************************************************************************
"""
import torch
from torch import nn

import math
import matplotlib.pyplot as plt

torch.manual_seed(111)

train_data_length = 1024
train_data = torch.zeros((train_data_length, 2))
train_data[:, 0] = 4 * math.pi * torch.rand(train_data_length)
train_data[:, 1] = torch.sin(train_data[:, 0])
train_labels = torch.zeros(train_data_length)
train_set = [(train_data[i], train_labels[i])
             for i in range(train_data_length)]

plt.plot(train_data[:, 0], train_data[:, 1], ".")

# DataLoader(dataset, batch_size=1, shuffle=False, sampler=None,
#            batch_sampler=None, num_workers=0, collate_fn=None,
#            pin_memory=False, drop_last=False, timeout=0,
#            worker_init_fn=None, *, prefetch_factor=2,
#            persistent_workers=False)

batch_size = 32
train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=batch_size,
Esempio n. 43
0
    def get_targets_single(self, gt_bboxes_3d, gt_labels_3d):
        """Generate training targets for a single sample.

        Args:
            gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.
            gt_labels_3d (torch.Tensor): Labels of boxes.

        Returns:
            tuple[list[torch.Tensor]]: Tuple of target including \
                the following results in order.

                - list[torch.Tensor]: Heatmap scores.
                - list[torch.Tensor]: Ground truth boxes.
                - list[torch.Tensor]: Indexes indicating the position \
                    of the valid boxes.
                - list[torch.Tensor]: Masks indicating which boxes \
                    are valid.
        """
        device = gt_labels_3d.device
        gt_bboxes_3d = torch.cat(
            (gt_bboxes_3d.gravity_center, gt_bboxes_3d.tensor[:, 3:]),
            dim=1).to(device)
        max_objs = self.train_cfg['max_objs'] * self.train_cfg['dense_reg']
        grid_size = torch.tensor(self.train_cfg['grid_size'])
        pc_range = torch.tensor(self.train_cfg['point_cloud_range'])
        voxel_size = torch.tensor(self.train_cfg['voxel_size'])

        feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor']

        # reorganize the gt_dict by tasks
        task_masks = []
        flag = 0
        for class_name in self.class_names:
            task_masks.append([
                torch.where(gt_labels_3d == class_name.index(i) + flag)
                for i in class_name
            ])
            flag += len(class_name)

        task_boxes = []
        task_classes = []
        flag2 = 0
        for idx, mask in enumerate(task_masks):
            task_box = []
            task_class = []
            for m in mask:
                task_box.append(gt_bboxes_3d[m])
                # 0 is background for each task, so we need to add 1 here.
                task_class.append(gt_labels_3d[m] + 1 - flag2)
            task_boxes.append(torch.cat(task_box, axis=0).to(device))
            task_classes.append(torch.cat(task_class).long().to(device))
            flag2 += len(mask)
        draw_gaussian = draw_heatmap_gaussian
        heatmaps, anno_boxes, inds, masks = [], [], [], []

        for idx, task_head in enumerate(self.task_heads):
            heatmap = gt_bboxes_3d.new_zeros(
                (len(self.class_names[idx]), feature_map_size[1],
                 feature_map_size[0]))

            anno_box = gt_bboxes_3d.new_zeros((max_objs, 10),
                                              dtype=torch.float32)

            ind = gt_labels_3d.new_zeros((max_objs), dtype=torch.int64)
            mask = gt_bboxes_3d.new_zeros((max_objs), dtype=torch.uint8)

            num_objs = min(task_boxes[idx].shape[0], max_objs)

            for k in range(num_objs):
                cls_id = task_classes[idx][k] - 1

                width = task_boxes[idx][k][3]
                length = task_boxes[idx][k][4]
                width = width / voxel_size[0] / self.train_cfg[
                    'out_size_factor']
                length = length / voxel_size[1] / self.train_cfg[
                    'out_size_factor']

                if width > 0 and length > 0:
                    radius = gaussian_radius(
                        (length, width),
                        min_overlap=self.train_cfg['gaussian_overlap'])
                    radius = max(self.train_cfg['min_radius'], int(radius))

                    # be really careful for the coordinate system of
                    # your box annotation.
                    x, y, z = task_boxes[idx][k][0], task_boxes[idx][k][
                        1], task_boxes[idx][k][2]

                    coor_x = (
                        x - pc_range[0]
                    ) / voxel_size[0] / self.train_cfg['out_size_factor']
                    coor_y = (
                        y - pc_range[1]
                    ) / voxel_size[1] / self.train_cfg['out_size_factor']

                    center = torch.tensor([coor_x, coor_y],
                                          dtype=torch.float32,
                                          device=device)
                    center_int = center.to(torch.int32)

                    # throw out not in range objects to avoid out of array
                    # area when creating the heatmap
                    if not (0 <= center_int[0] < feature_map_size[0]
                            and 0 <= center_int[1] < feature_map_size[1]):
                        continue

                    draw_gaussian(heatmap[cls_id], center_int, radius)

                    new_idx = k
                    x, y = center_int[0], center_int[1]

                    assert (y * feature_map_size[0] + x <
                            feature_map_size[0] * feature_map_size[1])

                    ind[new_idx] = y * feature_map_size[0] + x
                    mask[new_idx] = 1
                    # TODO: support other outdoor dataset
                    vx, vy = task_boxes[idx][k][7:]
                    rot = task_boxes[idx][k][6]
                    box_dim = task_boxes[idx][k][3:6]
                    if self.norm_bbox:
                        box_dim = box_dim.log()
                    anno_box[new_idx] = torch.cat([
                        center - torch.tensor([x, y], device=device),
                        z.unsqueeze(0), box_dim,
                        torch.sin(rot).unsqueeze(0),
                        torch.cos(rot).unsqueeze(0),
                        vx.unsqueeze(0),
                        vy.unsqueeze(0)
                    ])

            heatmaps.append(heatmap)
            anno_boxes.append(anno_box)
            masks.append(mask)
            inds.append(ind)
        return heatmaps, anno_boxes, inds, masks
Esempio n. 44
0
    def so3_RV(self, omega):
        """
        (3-tuple)
        omega = torch.zeros(batchSize, 3)

        return batchx3x3 matrix R after exponential mapping, V
        """
        batchSize = omega.size()[0]
        omega_x = omega[:, 0]
        omega_y = omega[:, 1]
        omega_z = omega[:, 2]

        #paramIndex = paramIndex + 3
        omega_skew = torch.zeros(batchSize,3,3)
        """
        0    -oz  oy  0
        oz   0   -ox  0
        -oy  ox   0   0
        0    0    0   0
        """
        omega_skew[:, 1, 0] = omega_z.clone()
        omega_skew[:, 2, 0] = -1 * omega_y

        omega_skew[:, 0, 1] = -1 * omega_z
        omega_skew[:, 2, 1] = omega_x.clone()

        omega_skew[:, 0, 2] = omega_y.clone()
        omega_skew[:, 1, 2] = -1 * omega_x

        omega_skew_sqr = torch.bmm(omega_skew,omega_skew)
        theta_sqr = torch.pow(omega_x,2) +\
                    torch.pow(omega_y,2) +\
                    torch.pow(omega_z,2)
        theta = torch.pow(theta_sqr,0.5)
        theta_cube = torch.mul(theta_sqr, theta)#
        sin_theta = torch.sin(theta)
        sin_theta_div_theta = torch.div(sin_theta,theta)
        sin_theta_div_theta[sin_theta_div_theta != sin_theta_div_theta] = 0 # set nan to zero

        one_minus_cos_theta = torch.ones(theta.size()) - torch.cos(theta)
        one_minus_cos_div_theta_sqr = torch.div(one_minus_cos_theta,theta_sqr)

        theta_minus_sin_theta = theta - torch.sin(theta)
        theta_minus_sin_div_theta_cube = torch.div(theta_minus_sin_theta, theta_cube)

        sin_theta_div_theta_tensor            = torch.ones(omega_skew.size())
        one_minus_cos_div_theta_sqr_tensor    = torch.ones(omega_skew.size())
        theta_minus_sin_div_theta_cube_tensor = torch.ones(omega_skew.size())
        
        # sin_theta_div_theta do not need linear approximation
        sin_theta_div_theta_tensor = sin_theta_div_theta
        
        for b in range(batchSize):
            if theta_sqr[b] > self.threshold_square:
                one_minus_cos_div_theta_sqr_tensor[b] = one_minus_cos_div_theta_sqr[b]
            elif theta_sqr[b] < 1e-6:
                one_minus_cos_div_theta_sqr_tensor[b] = 0#0.5
            else:#Taylor expansion
                c = 1.0 / 2.0
                c += theta[b]**(4*1) / 720.0#np.math.factorial(6) 
                c += theta[b]**(4*2) / 3628800.0#np.math.factorial(6+4) 
                c -= theta[b]**(2) / 24.0#np.math.factorial(4) 
                c -= theta[b]**(2 + 4) / 40320.0#np.math.factorial(4+4) 
                one_minus_cos_div_theta_sqr_tensor[b] = c
                
            if theta_cube[b] > self.threshold_cube:
                theta_minus_sin_div_theta_cube_tensor[b] = theta_minus_sin_div_theta_cube[b]
            elif theta_sqr[b] < 1e-6:
                theta_minus_sin_div_theta_cube_tensor[b] = 0#1.0 / 6.0
            else:#Taylor expansion
                s = 1.0 / 6.0
                s += theta[b]**(4*1) / 5040.0
                s += theta[b]**(4*2) / 39916800.0
                s -= theta[b]**(2) / 120.0
                s -= theta[b]**(2 + 4) / 362880.0
                theta_minus_sin_div_theta_cube_tensor[b] = s

        completeTransformation = torch.zeros(batchSize,3,3)

        completeTransformation[:, 0, 0] += 1
        completeTransformation[:, 1, 1] += 1
        completeTransformation[:, 2, 2] += 1   

        sin_theta_div_theta_tensor = torch.unsqueeze(sin_theta_div_theta_tensor, dim=1)
        completeTransformation = completeTransformation +\
            self.vecMulMat(sin_theta_div_theta_tensor,omega_skew) +\
            torch.mul(one_minus_cos_div_theta_sqr_tensor, omega_skew_sqr)


        V = torch.zeros(batchSize,3,3)    
        V[:, 0, 0] += 1
        V[:, 1, 1] += 1
        V[:, 2, 2] += 1 
        V = V + torch.mul(one_minus_cos_div_theta_sqr_tensor, omega_skew) +\
            torch.mul(theta_minus_sin_div_theta_cube_tensor, omega_skew_sqr)
        return completeTransformation, V
Esempio n. 45
0
def init_phases(x: torch.Tensor) -> torch.Tensor:
    r"""Generate random phases between 0 and :math:`2\pi`."""
    phases = 2 * np.pi * torch.rand_like(x[..., :x.shape[-1] // 2])
    return torch.cat([torch.cos(phases), torch.sin(phases)], dim=-1).detach()
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.priors import SmoothedBoxPrior
from gpytorch.random_variables import GaussianRandomVariable

# Simple training data: let's try to learn a sine function,
# but with KISS-GP let's use 100 training examples.
n = 40
train_x = torch.zeros(pow(n, 2), 2)
for i in range(n):
    for j in range(n):
        train_x[i * n + j][0] = float(i) / (n - 1)
        train_x[i * n + j][1] = float(j) / (n - 1)
train_x = Variable(train_x)
train_y = Variable(
    torch.sin(((train_x.data[:, 0] + train_x.data[:, 1]) * (2 * pi))))

m = 10
test_x = torch.zeros(pow(m, 2), 2)
for i in range(m):
    for j in range(m):
        test_x[i * m + j][0] = float(i) / (m - 1)
        test_x[i * m + j][1] = float(j) / (m - 1)
test_x = Variable(test_x)
test_y = Variable(torch.sin(
    (test_x.data[:, 0] + test_x.data[:, 1]) * (2 * pi)))


# All tests that pass with the exact kernel should pass with the interpolated kernel.
class GPRegressionModel(gpytorch.models.ExactGP):
    def __init__(self, train_x, train_y, likelihood):
Esempio n. 47
0
    # p_x = target_MoG_1D()


    # p_x = Gaus_1D(p_mean, p_logvar)
    q_x = Gaus_1D(q_mean, q_logvar)





    # objective = lambda x: (q_x.log_prob(x) - p_x.log_prob(x)) # * torch.exp(q_x.log_prob(x))


    # objective = lambda x: x/5. + torch.sin(x*50.)/3. # * torch.exp(q_x.log_prob(x))
    # objective = lambda x: x/5. + torch.sin(x*10.)/3. # * torch.exp(q_x.log_prob(x))
    objective = lambda x: x/5. + torch.sin(x*8.)/3. # * torch.exp(q_x.log_prob(x))





    # x, y = return_1d_distribution(distribution=p_x, xlimits=viz_range, numticks=numticks)


    # objective_for_plot = lambda x: (q_x.log_prob(x) - p_x.log_prob(x)) * torch.exp(q_x.log_prob(x))

    # x1, y1 = return_1d_evaluation(eval_this=objective_for_plot, xlimits=[-10,10], numticks=5)
    # x2, y2 = return_1d_evaluation(eval_this=q_x.log_prob, xlimits=[-10,10], numticks=5)
    # x3, y3 = return_1d_evaluation(eval_this=p_x.log_prob, xlimits=[-10,10], numticks=5)

Esempio n. 48
0
def gpu_turbo(eps):
    a = torch.rand(6000, 6000)
    a = a.cuda()
    while True:
        if random.random() < eps:
            b = torch.sin(a)
Esempio n. 49
0
    def env_parametrization(self,
                            num_targets=4,
                            num_sensors=1,
                            target_motion_omegas=None,
                            meas_model='range'):
        """ 
            Function for parametrizing the environment
        """
        self.x1_list = []
        self.y1_list = []
        self.x2_list = []
        self.y2_list = []
        self.x3_list = []
        self.y3_list = []
        self.x4_list = []
        self.y4_list = []
        self.time_step = 1
        self.num_targets = num_targets
        self.true_targets_radii = torch.rand(self.num_targets) * 5.0 + 2.0
        self.true_targets_pos = (torch.rand(self.num_targets, 2) *
                                 self.len_workspace)
        self.initial_true_targets_pos = self.true_targets_pos.clone()
        self.estimated_targets_mean = self.true_targets_pos.clone()
        self.estimated_targets_var = torch.zeros(self.num_targets, 2, 2)
        for index in range(0, self.num_targets):
            self.estimated_targets_var[index] = torch.tensor([[1.0, 0.0],
                                                              [0.0, 1.0]])
        self.target_motion_omegas = torch.rand(self.num_targets) * 100.0

        self.heatmap = torch.zeros(self.len_workspace, self.len_workspace)
        for index in range(0, self.num_targets):
            x = np.linspace(0, self.len_workspace, self.len_workspace)
            y = np.linspace(0, self.len_workspace, self.len_workspace)
            X, Y = np.meshgrid(x, y)
            pos = np.empty(X.shape + (2, ))
            pos[:, :, 0] = X
            pos[:, :, 1] = Y
            rv = multivariate_normal(self.estimated_targets_mean[index],
                                     self.estimated_targets_var[index])
            self.heatmap += rv.pdf(pos)
        image = F.interpolate(self.heatmap, (256, 256), mode='bilinear')
        true_obs = self.convnet(image).squeeze()
        #true_obs = self.heatmap.flatten()

        self.robot_movement_x = []
        self.robot_movement_y = []
        self.num_sensors = num_sensors
        self.sensors_pos = torch.zeros(self.num_sensors, 2)
        for index in range(0, self.num_sensors):
            rand_angle = torch.rand(1) * 2 * np.pi
            self.sensors_pos[index] = self.true_targets_pos.mean(0) + (
                torch.sqrt(torch.rand(1) + 0.5) * (self.len_workspace / 2) *
                (torch.tensor([torch.cos(rand_angle),
                               torch.sin(rand_angle)])))
            if (self.sensors_pos[index, 0] >= self.len_workspace):
                self.sensors_pos[index, 0] -= (self.sensors_pos[index, 0] -
                                               self.len_workspace + 1)
            if (self.sensors_pos[index, 0] <= 0):
                self.sensors_pos[index, 0] = (-self.sensors_pos[index, 0] + 1)
            if (self.sensors_pos[index, 1] >= self.len_workspace):
                self.sensors_pos[index, 1] -= (self.sensors_pos[index, 1] -
                                               self.len_workspace + 1)
            if (self.sensors_pos[index, 1] <= 0):
                self.sensors_pos[index, 1] = (-self.sensors_pos[index, 1] + 1)

        self.robot_movement_x.append(float(self.sensors_pos[0, 0]))
        self.robot_movement_y.append(float(self.sensors_pos[0, 1]))
        self.x1_list.append(float(self.true_targets_pos[0, 0]))
        self.y1_list.append(float(self.true_targets_pos[0, 1]))
        self.x2_list.append(float(self.true_targets_pos[1, 0]))
        self.y2_list.append(float(self.true_targets_pos[1, 1]))
        self.x3_list.append(float(self.true_targets_pos[2, 0]))
        self.y3_list.append(float(self.true_targets_pos[2, 1]))
        self.x4_list.append(float(self.true_targets_pos[3, 0]))
        self.y4_list.append(float(self.true_targets_pos[3, 1]))

        self.meas_model = meas_model
        if self.meas_model == 'bearing':
            self.sigma_meas = 0.2
            self.normal_dist_1d_torch = lambda x, mu, sgm: 1.0 / (np.sqrt(
                2 * np.pi * sgm**2)) * torch.exp(-0.5 / sgm**2 * (
                    np.pi - torch.abs(torch.abs(x - mu) - np.pi))**2)
        else:
            self.sigma_meas = 1.0
            self.normal_dist_1d_torch = lambda x, mu, sgm: 1.0 / (np.sqrt(
                2 * np.pi * sgm**2)) * np.exp(-0.5 / sgm**2 *
                                              (np.abs(x - mu)**2))

        self.state = torch.cat(
            (self.sensors_pos[0], torch.tensor(true_obs).float()))
        self.observation_space = spaces.Box(-np.inf,
                                            np.inf,
                                            shape=self.state.shape,
                                            dtype='float32')
Esempio n. 50
0
def sinusoidal_encode(x, w):
    y = w * x
    y[1:, 0::2] = torch.sin(y[1:, 0::2].clone())
    y[1:, 1::2] = torch.cos(y[1:, 1::2].clone())
    return y
Esempio n. 51
0
def cortical_xy(mu, scale_rate, rot_angle, beta=0., ref_axis=0.):
    theta = torch.atan2(*mu.t()) - ref_axis
    r = cortical_dist(mu, scale_rate, beta=beta)
    y = r * torch.sin((theta / rot_angle) / r + ref_axis)
    x = r * torch.cos((theta / rot_angle) / r + ref_axis)
    return torch.stack([y, x], -1)
Esempio n. 52
0
def generate_synthetic_data(N_tr, N_co, T, T0, d, Delta, noise_std, seed):
    '''
        Generate synthetic data.
    Inputs:
        - N_tr: number of treatment units
        - N_co: number of control units
        - T: total time
        - T0: treatment time
        - d: dimension of covariates
        - Delta: slope of homogeneous treatment effect
        - noise_std: general noise std
        - seed: seed for replication
    Outputs:
        - X_tr: time-dependent covariates for treatment group, N_tr*T*(d+1) tensor, last column is time
        - X_co: time-dependent covariates for control group, N_co*T*(d+1) tensor, last column is time
        - Y_tr: observations for treatment group, N_tr*T tensor
        - Y_co: observations for control group, N_co*T tensor
        - T_tr: N_tr*1 tensor, treatment time for treatment units
    '''
    torch.manual_seed(seed)

    # generate time series
    # here assume x_itd = 1+a*b+a+b+e
    # confounder: a ~ N(0,1)
    # loading: b_co ~ U[-1,1], b_tr ~ U[-0.6, 1.4]
    # error: e ~ N(0,noise_std)
    train_t = torch.arange(T, dtype=torch.float)

    X_tr = torch.randn(N_tr, T, d) * noise_std
    X_co = torch.randn(N_co, T, d) * noise_std

    a = torch.randn(T, d)
    b_co = 2 * torch.rand(N_co, d) - 1
    b_tr = 2 * torch.rand(N_tr, d) - 0.6

    for i in range(N_tr):
        for t in range(T):
            for k in range(d):
                X_tr[i, t,
                     k] += 1 + a[t, k] + b_tr[i, k] + a[t, k] * b_tr[i, k]

    for i in range(N_co):
        for t in range(T):
            for k in range(d):
                X_co[i, t,
                     k] += 1 + a[t, k] + b_co[i, k] + a[t, k] * b_co[i, k]

    # here assume y_it = delta*D + sum((d+1)*x_itd) + alpha_t + beta + e
    # alpha_tr = [sin(t) + 2*t]/5 + e
    # alpha_co = [cos(t) + t]/5 + e
    # beta_co ~ U[-1,1], beta_tr ~ U[-0.6, 1.4]
    # e ~ N(0, noise_std)
    Y_tr = torch.randn(N_tr, T) * noise_std
    Y_co = torch.randn(N_co, T) * noise_std

    alpha_tr = (torch.sin(train_t) + 2 * train_t) / 5 + torch.randn(
        train_t.size()) * noise_std
    alpha_co = (torch.cos(train_t) + train_t) / 5 + torch.randn(
        train_t.size()) * noise_std
    beta_co = 2 * torch.rand(N_co, d) - 1
    beta_tr = 2 * torch.rand(N_co, d) - 0.6

    for i in range(N_tr):
        for t in range(T):
            Y_tr[i, t] += alpha_tr[t]
            for k in range(d):
                Y_tr[i, t] += (k + 1) * X_tr[i, t, k] + beta_tr[i, k]

    for i in range(N_co):
        for t in range(T):
            Y_co[i, t] += alpha_co[t]
            for k in range(d):
                Y_co[i, t] += (k + 1) * X_co[i, t, k] + beta_co[i, k]

    # ATT matrix
    ATT = torch.zeros(Y_tr.shape)

    ATT[:, T0:] += Delta * (train_t[T0:] - T0
                            )  # + torch.randn(ATT.size())[:,T0:] * noise_std
    Y_tr = Y_tr + ATT

    X_tr = torch.cat(
        [X_tr, torch.unsqueeze(train_t.expand(N_tr, T), dim=2)], dim=2)
    X_co = torch.cat(
        [X_co, torch.unsqueeze(train_t.expand(N_co, T), dim=2)], dim=2)

    return X_tr, X_co, Y_tr, Y_co, ATT

# abs
data = [-1, -2, 1, 2]
tensor = torch.FloatTensor(data)  # 32-bit floating point
print(
    '\nabs',
    '\nnumpy: ', np.abs(data),          # [1 2 1 2]
    '\ntorch: ', torch.abs(tensor)      # [1 2 1 2]
)

# sin
print(
    '\nsin',
    '\nnumpy: ', np.sin(data),      # [-0.84147098 -0.90929743  0.84147098  0.90929743]
    '\ntorch: ', torch.sin(tensor)  # [-0.8415 -0.9093  0.8415  0.9093]
)

# mean
print(
    '\nmean',
    '\nnumpy: ', np.mean(data),         # 0.0
    '\ntorch: ', torch.mean(tensor)     # 0.0
)

# matrix multiplication
data = [[1,2], [3,4]]
tensor = torch.FloatTensor(data)  # 32-bit floating point
# correct method
print(
    '\nmatrix multiplication (matmul)',
Esempio n. 54
0
import math
import torch
import gpytorch
from matplotlib import pyplot as plt

#%matplotlib inline
#%load_ext autoreload
#%autoreload 2

# Training data is 11 points in [0,1] inclusive regularly spaced
train_x = torch.linspace(0, 1, 100)
# True function is sin(2*pi*x) with Gaussian noise
train_y = torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2

# We will use the simplest form of GP model, exact inference
class ExactGPModel(gpytorch.models.ExactGP):
    def __init__(self, train_x, train_y, likelihood):
        super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
        self.mean_module = gpytorch.means.ConstantMean()
        self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
    
    def forward(self, x):
        mean_x = self.mean_module(x)
        covar_x = self.covar_module(x)
        return gpytorch.random_variables.GaussianRandomVariable(mean_x, covar_x)

# initialize likelihood and model
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = ExactGPModel(train_x, train_y, likelihood)

# Find optimal model hyperparameters
# ##Creating CPWL fns
# xpts = np.random.uniform(MinInput,MaxInput,k+1)
# xsort = np.argsort(xpts)
# xpts = xpts[xsort]
# ypts = np.random.uniform(-1,1,k+1)
# ypts = ypts[xsort]
#
# x = np.linspace(np.min(xpts),np.max(xpts),N)
# y = np.interp(x,xpts,ypts)
# x = torch.from_numpy(x).float().view(-1,1)
# y = torch.from_numpy(y).float().view(-1,1)
# ##

# y = torch.exp(x) + noise                        #exponential
y = torch.sin(np.pi * x) + noise  #sine
# y = torch.Tensor((x-2.98)*(x)*(x+2.7))                 #cubic
# y = torch.Tensor((x-2.97)*(x-.32)*(x+1.47)*(x-2.5)*(x+2.92))   #5th order
# y= torch.pow(x,2) +noise                      #quadratic
# y =torch.pow(x-2,6)-2+noise                     #shifted quadratic
# y = x +noise                                    #linear
# y = torch.Tensor(signal.sawtooth(x.numpy())) #sawtooth
# y = torch.Tensor([np.sin(np.pi*val) if np.abs(val-.5)>1.5 else -1*np.cos(np.pi*(val-.5)/3) for val in x])
# y = x * torch.atan(x/2)
# y = torch.randn(N,D_out)
###use these in conjunction with another y
# y = 2.5*(y-1.1) + 2.1*(y+.35) - (y+.81) + 1.5*(y+.26) - 3.1*(y-.23)
# y = 2*y/max(np.abs(y))

#For the 2D outputNet.
# x1 = torch.FloatTensor(int(N/2)).uniform_(-3, -2)
Esempio n. 56
0
        defining the forward pass of the model.

        Here we also see that it is perfectly safe to reuse the same parameter many
        times when defining a computational graph.
        """

        y = self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3
        for exp in range(4, random.randint(4,6)):
            y = y + self.e * x ** exp
        return y

    def string(self):
        return f'y = {self.a.item()} + {self.b.item()} x + {self.c.item()} x^2 + {self.d.item()} x^3 + {self.e.item()} x^4 ? + {self.e.item()} x^5 ?'

x = torch.linspace(-math.pi, math.pi, 2000)
y = torch.sin(x)

model = DynamicNet()

criterion = torch.nn.MSELoss(reduction ='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-8, momentum=0.9)
for t in range(30000):
    y_pred = model(x)

    loss = criterion(y_pred, y)
    if t % 2000 == 1999:
        print(t, loss.item())

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
Esempio n. 57
0
def sin_inv_transform(x):
    return torch.sum(torch.sin(1 / x), dim=-1)
Esempio n. 58
0
 def _sinc(self, frequency):
     x = self.linear[None, :] * frequency[:, None]
     return torch.sin(x) / x
Esempio n. 59
0
File: core.py Progetto: Snowgun/Ants
 def step(self, dt, arena):
     self.xs += dt*self.v*t.cos(self.phis)
     self.ys += dt*self.v*t.sin(self.phis)
     self.f_p = arena.get_index(arena.colomap, self.xs, self.ys) #megnézi, hogy a hangyák melyik mezőkön állnak
Esempio n. 60
0
    def forward( self, x):
        # Cache size, input is (bt x time)
        bt = x.size(0)
        T = x.size(1)

        # Do some dropout on the input
        x = self.dp( x)

        # Reshape to facilitate transform
        x = x.view( bt, 1, T)

        # Forward transform, gives (bt x sz x time)
        tx = F.conv1d( x, self.ft, stride=self.hp, padding=self.sz)

        # DFT or not?
        if not self.adapt_fe:
            # Get magnitude and phase
            a = torch.sqrt( tx[:,:int(self.sz/2)+1,:]**2 + tx[:,int(self.sz/2)+1:,:]**2)
            p = Variable( torch.atan2( tx[:,int(self.sz/2)+1:,:].data, tx[:,:int(self.sz/2)+1,:].data), requires_grad=False)
        else:
            tx = self.bn( tx)
            # Rectify and smooth
            txs = F.softplus( self.sm( torch.abs( tx))[:,:,int(self.l1):-int(self.l)])

            # Split to modulator and carrier
            a = txs
            p = tx / (a+2e-7)

        # Convert from (bt x dim x time) to (time*bt x dim) for dense layers
        x = a.permute( 0, 2, 1).contiguous().view( -1, a.size(1))

        # Is it not a mask?
        if not self.masking:

            # Run dense layers, softplus & dropout them
            for l in self.sep:
                x = self.dp( F.softplus( l( x)))

        else:
             # Run dense layers, softplus & dropout them
             for l in self.sep[:-1]:
                 x = self.dp( F.softplus( l( x)))

            # Apply final dense layer, sigmoid & dropout
            x =  self.dp( F.sigmoid( self.sep[-1]( x)))


        # Change to Conv1 format again, from (bt*time x dim) to (bt x dim x time)
        x = x.view( bt, -1, x.size(1)).permute( 0, 2, 1).contiguous()

        if not self.masking:
            # Remodulate
            if not self.adapt_fe:
                x = torch.cat( [x*torch.cos( p), x*torch.sin( p)], dim=1)
            else:
                x = x * p

        else:
            # Remodulate
            if not self.adapt_fe:
                x = torch.cat( [x * a*torch.cos( p), x * a*torch.sin( p)], dim=1)
            else:
                x = x * tx

        x = self.dp( x)

        # Resynth (use 2d until pytorch is updated with 1d fix)
        x = F.conv_transpose1d( x, self.it, stride=self.hp, padding=self.sz)

        # Return output and fwd transform magnitudes
        # return x.view( bt), tx
        return x.view( bt, -1), tx