Beispiel #1
0
def apply_constraints(
    obj: Tensor,
    constraints: List[Callable[[Tensor], Tensor]],
    samples: Tensor,
    infeasible_cost: float,
    eta: float = 1e-3,
) -> Tensor:
    r"""Apply constraints using an infeasible_cost `M` for negative objectives.

    This allows feasibility-weighting an objective for the case where the
    objective can be negative by usingthe following strategy:
    (1) add `M` to make obj nonnegative
    (2) apply constraints using the sigmoid approximation
    (3) shift by `-M`

    Args:
        obj: A `n_samples x b x q` Tensor of objective values.
        constraints: A list of callables, each mapping a Tensor of size `b x q x m`
            to a Tensor of size `b x q`, where negative values imply feasibility.
            This callable must support broadcasting. Only relevant for multi-
            output models (`m` > 1).
        samples: A `b x q x m` Tensor of samples drawn from the posterior.
        infeasible_cost: The infeasible value.
        eta: The temperature parameter of the sigmoid function.

    Returns:
        A `n_samples x b x q`-dim tensor of feasibility-weighted objectives.
    """
    # obj has dimensions n_samples x b x q
    obj = obj.add(infeasible_cost)  # now it is nonnegative
    obj = apply_constraints_nonnegative_soft(obj=obj,
                                             constraints=constraints,
                                             samples=samples,
                                             eta=eta)
    return obj.add(-infeasible_cost)
def _kernel_distance(squared_distances: torch.Tensor,
                     eps: float = 1e-8) -> torch.Tensor:
    r"""Compute the TPS kernel distance function: :math:`r^2 log(r)`, where `r` is the euclidean distance.
    Since :math:`\log(r) = 1/2 \log(r^2)`, this function takes the squared distance matrix and calculates
    :math:`0.5 r^2 log(r^2)`."""
    # r^2 * log(r) = 1/2 * r^2 * log(r^2)
    return 0.5 * squared_distances * squared_distances.add(eps).log()
Beispiel #3
0
def is_div(input: Tensor, target: Tensor) -> Tensor:
    r"""The `Itakura–Saito divergence
    <https://en.wikipedia.org/wiki/Itakura%E2%80%93Saito_distance>`__, which equal to β-divergence loss when β = 0.

    .. math::
        \ell(x, y) = \sum_{n = 0}^{N - 1} \frac{x_n}{y_n} -  log(\frac{x_n}{y_n}) - 1

    Args:
        input (Tensor): tensor of arbitrary shape
        target (Tensor): tensor of the same shape as input

    Returns:
        Tensor: single element tensor
    """
    div = target.add(eps) / input.add(eps)
    return div.sum() - div.log().sum() - target.numel()
Beispiel #4
0
def _sp_double_backward_update(pos_out: Tensor,
                               neg_out: Tensor,
                               param: Parameter,
                               gamma: float,
                               l1_reg: float,
                               l2_reg: float,
                               pos: Tensor = None):
    param.grad = None
    # first backward
    neg_out.backward()
    neg = param.grad.relu_().add_(eps)

    if pos is None:
        param.grad = None
        pos_out.backward()
        pos = param.grad.relu_().add_(eps)

    if l1_reg > 0:
        pos.add_(l1_reg)
    if l2_reg > 0:
        pos = pos.add(param.data, alpha=l2_reg)
    multiplier = neg.div_(pos)
    if gamma != 1:
        multiplier.pow_(gamma)
    param.data.mul_(multiplier)
Beispiel #5
0
def generate_disc_set(nb, from_=0, to_=1, one_hot_encoding=False, label_1_in_center= False):
    """
    To generate data set
    :param nb:
    :param from_:
    :param to_:
    :param one_hot_encoding:
    :param label_1_in_center: True if data as in new version of the pdf (ee559-miniprojects.pdf) 
    :return:
    """
    try:
        input_ = Tensor(nb, 2).uniform_(from_, to_)
        if label_1_in_center:
            target = input_.add(-.5).pow(2).sum(1).sub( 1/ (2*pi)).sign().add(1).div(2).long()
        else:
            target = input_.pow(2).sum(1).sub( 1/ (2*pi)).sign().add(1).div(2).long()
        if one_hot_encoding:
            new_target = zeros_like(input_)
            new_target[:,1][target > 0] = 1 ## inside the circle
            new_target[:,0][target < 1] = 1 ## outside the circle
            return input_, new_target
        return input_, target
    except (RuntimeError):
        print(f"error in generate_disc_set() function, from val {from_} should be lower than to {to_}.")
        exit()
Beispiel #6
0
    def forward(self, boxes: Tensor, deltas: Tensor) -> Tensor:
        # calculate input boxes width/height/center
        widths = boxes[:, :, 2] - boxes[:, :, 0]
        heights = boxes[:, :, 3] - boxes[:, :, 1]
        center_x = boxes[:, :, 0] + 0.5 * widths
        center_y = boxes[:, :, 1] + 0.5 * heights

        # unapply mean/variance normalization on deltas
        if self.std is not None:
            deltas = deltas.mul(self.std)
        if self.mean is not None:
            deltas = deltas.add(self.mean)
        dx, dy, dw, dh = [deltas[:, :, i] for i in range(4)]

        # unapply log on dh, dw
        if self.log_length:
            dw, dh = [torch.exp(x) for x in (dw, dh)]

        pred_center_x = center_x + dx * widths
        pred_center_y = center_y + dy * heights

        pred_w = dw * widths
        pred_h = dh * heights

        pred_boxes_x1 = pred_center_x - 0.5 * pred_w
        pred_boxes_y1 = pred_center_y - 0.5 * pred_h
        pred_boxes_x2 = pred_center_x + 0.5 * pred_w
        pred_boxes_y2 = pred_center_y + 0.5 * pred_h

        pred_boxes = torch.stack(
            [pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2],
            dim=-1)
        return pred_boxes
Beispiel #7
0
def add_nnz_(src: SparseTensor, other: torch.Tensor,
             layout: Optional[str] = None) -> SparseTensor:
    value = src.storage.value()
    if value is not None:
        value = value.add_(other.to(value.dtype))
    else:
        value = other.add(1)
    return src.set_value_(value, layout=layout)
Beispiel #8
0
def kl_div(input: Tensor, target: Tensor) -> Tensor:
    r"""The generalized `Kullback-Leibler divergence Loss
    <https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`__, which equal to β-divergence loss when β = 1.

    The loss can be described as:

    .. math::
        \ell(x, y) = \sum_{n = 0}^{N - 1} x_n log(\frac{x_n}{y_n}) - x_n + y_n

    Args:
        input (Tensor): tensor of arbitrary shape
        target (Tensor): tensor of the same shape as input

    Returns:
        Tensor: single element tensor
    """
    return target.reshape(-1) @ (target.add(eps).log() - input.add(eps).log()
                                 ).reshape(-1) - target.sum() + input.sum()
Beispiel #9
0
 def add_gaussian_noise(self, sgram: torch.Tensor):
     noise = torch.rand_like(sgram) * (self.hparams.noise_variance**(0.5))
     noise = noise.mul(
         torch.exp(
             torch.tensor(self.weight_decay_steps /
                          self.hparams.noise_decay_C)))
     if self.training:
         self.weight_decay_steps += 1
     return sgram.add(noise)
Beispiel #10
0
 def output_symbolic_execution(self, out: Tensor):
     output_dequant_symbolic_kwargs = self.symbolic_kwargs['output_dequant_symbolic_kwargs']
     output_quant_symbolic_kwargs = self.symbolic_kwargs['output_quant_symbolic_kwargs']
     bias = self.symbolic_kwargs['bias']
     if output_dequant_symbolic_kwargs is not None:
         out = DequantizeLinearFn.apply(out, *output_dequant_symbolic_kwargs.values())
     if bias is not None:
         out = out.add(bias)
     if output_quant_symbolic_kwargs is not None:
         out = QuantizeLinearFn.apply(out, *output_quant_symbolic_kwargs.values())
     return out
Beispiel #11
0
def _double_backward_update(V: Tensor,
                            WH: Tensor,
                            param: Parameter,
                            beta: float,
                            gamma: float,
                            l1_reg: float,
                            l2_reg: float,
                            pos: Tensor = None):
    param.grad = None
    if beta == 2:
        output_neg = V
        output_pos = WH
    elif beta == 1:
        output_neg = V / WH.add(eps)
        output_pos = None
    elif beta == 0:
        WH_eps = WH.add(eps)
        output_pos = WH_eps.reciprocal_()
        output_neg = output_pos.square().mul_(V)
    else:
        WH_eps = WH.add(eps)
        output_neg = WH_eps.pow(beta - 2).mul_(V)
        output_pos = WH_eps.pow_(beta - 1)

    # first backward
    WH.backward(output_neg, retain_graph=pos is None)
    neg = param.grad.relu_().add_(eps)

    if pos is None:
        param.grad = None
        WH.backward(output_pos)
        pos = param.grad.relu_().add_(eps)

    if l1_reg > 0:
        pos.add_(l1_reg)
    if l2_reg > 0:
        pos = pos.add(param.data, alpha=l2_reg)
    multiplier = neg.div_(pos)
    if gamma != 1:
        multiplier.pow_(gamma)
    param.data.mul_(multiplier)
Beispiel #12
0
def _double_backward_update(V: Tensor,
                            WH: Tensor,
                            param: Parameter,
                            beta: float,
                            gamma: float,
                            l1_reg: float,
                            l2_reg: float,
                            pos: Tensor = None):
    param.grad = None
    if beta == 2:
        output_neg = V
        output_pos = WH
    elif beta == 1:
        output_neg = V / WH.add(eps)
        output_pos = None
    elif beta == 0:
        output_neg = V / (WH * WH).add(eps)
        output_pos = 1 / WH.add(eps)
    else:
        output_neg = WH.pow(beta - 2) * V
        output_pos = WH.pow(beta - 1)
    # first backward
    WH.backward(output_neg, retain_graph=pos is None)
    neg = torch.clone(param.grad).relu_().add_(eps)

    if pos is None:
        param.grad.zero_()
        WH.backward(output_pos)
        pos = torch.clone(param.grad).relu_().add_(eps)

    if l1_reg > 0:
        pos.add_(l1_reg)
    if l2_reg > 0:
        pos = pos.add(param.data, alpha=l2_reg)
    multiplier = neg / pos
    if gamma != 1:
        multiplier.pow_(gamma)
    param.data.mul_(multiplier)
    def forward(self, in_spatial: th.Tensor, in_temporal: tp.Optional[th.Tensor] = None) -> th.Tensor:
        _out = in_spatial

        if in_temporal is not None:
            _out = in_spatial.add(in_temporal)  # noqa

        _out = self.conv1(_out)
        _out = self.bn1(_out)
        _out = self.relu(_out)

        _out = self.conv2(_out)
        _out = self.bn2(_out)

        _out = _out.add(in_spatial)  # noqa
        _out = self.relu(_out)

        return _out
Beispiel #14
0
    def __call__(self, param: torch.nn.Parameter, grad: torch.Tensor,
                 group: Dict[str, any]):
        """
        ### Perform weight decay and return the gradient
        """

        # If we are doing the decay on the parameter directly
        if self.weight_decouple:
            # If the weight decay coefficient is absolute
            if self.absolute:
                param.data.mul_(1.0 - group['weight_decay'])
            # Otherwise,
            else:
                param.data.mul_(1.0 - group['lr'] * group['weight_decay'])
            # Return the unmodified gradient
            return grad
        else:
            if group['weight_decay'] != 0:
                # Add the weight decay to the gradient and return the modified gradient
                return grad.add(param.data, alpha=group['weight_decay'])
Beispiel #15
0
 def forward(self, x: torch.Tensor) -> torch.Tensor:
     return x.add(0.5).clamp_(min=0, max=1).mul_(x)
 def _eq_logqw(self, logvar: torch.Tensor):
     logqw = logvar.add(math.log(2. * math.pi)).add(1.).mul(-0.5)
     return torch.sum(logqw)
Beispiel #17
0
def cross_entropy(predicted: torch.Tensor, true: torch.Tensor):
    return predicted.mul(true.add(1e-20).log().neg()).mean(dim=(-1, ))
Beispiel #18
0
def entropy(probability: torch.Tensor):
    return probability.mul(probability.add(1e-20).log().neg()).sum(dim=(-1, ))
Beispiel #19
0
 def _normalize_01(self, tensor: torch.Tensor):
     _min = tensor.min(2, True)[0].min(3, True)[0]
     _max = tensor.max(2, True)[0].max(3, True)[0]
     return tensor.add(-_min).div(_max - _min + 1e-6)
Beispiel #20
0
 def forward(self, x: Tensor) -> Tensor:
     return x.add(self.lr_mult, self.bias[:, None, None])
    def step(self, actions: torch.Tensor,last_p: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor, dict, torch.Tensor):
        if actions.dtype not in (torch.short, torch.int, torch.long):
            raise TypeError('actions Tensor must be an integer type i.e. '
                            '{torch.ShortTensor, torch.IntTensor, torch.LongTensor}')

        if actions.shape[0] != self.num_envs:
            raise RuntimeError('Must have the same number of actions as environments.')

        reward = torch.ones((self.num_envs,)).float().to(self.device).requires_grad_(False)*-1
        done = torch.zeros((self.num_envs,)).byte().to(self.device).byte().requires_grad_(False)
        info = dict()

        t0 = time()
        snake_sizes = self.envs[:, BODY_CHANNEL:BODY_CHANNEL + 1, :].view(self.num_envs, -1).max(dim=1)[0]

        orientations = determine_orientations(self.envs)
        if self.verbose > 0:
            print(f'\nOrientations: {time()-t0}s')

        t0 = time()
        # Check if any snakes are trying to move backwards and change
        # their direction/action to just continue forward
        # The test for this is if their orientation number {0, 1, 2, 3}
        # is the same as their action
        mask = orientations == actions
        actions.add_((mask * 2).long()).fmod_(4)

        # Create head position deltas
        head_deltas = F.conv2d(head(self.envs), ORIENTATION_FILTERS.to(self.device), padding=1)
        # Select the head position delta corresponding to the correct action
        actions_onehot = torch.FloatTensor(self.num_envs, 4).to(self.device)
        actions_onehot.zero_()
        actions_onehot.scatter_(1, actions.unsqueeze(-1), 1)
        head_deltas = torch.einsum('bchw,bc->bhw', [head_deltas, actions_onehot]).unsqueeze(1)

        # Move head position by applying delta
        head_coord = (head(self.envs)==1).nonzero()[:,2:]
        food_coord = (food(self.envs)==1).nonzero()[:,2:]
        old_dist = (head_coord-food_coord).abs().sum(dim=-1)
        # print("old_dist \n")
        # print(old_dist)
        self.envs[:, HEAD_CHANNEL:HEAD_CHANNEL + 1, :, :].add_(head_deltas).round_()
        head_coord = (head(self.envs)==1).nonzero()[:,2:]
        food_coord = (food(self.envs)==1).nonzero()[:,2:]
        new_dist = (head_coord - food_coord).abs().sum(dim=-1)
        # print("new_dist \n")
        # print(new_dist)
        moving_away = (old_dist <= new_dist).float()
        # print("moving_away \n")
        # print(moving_away)
        reward.sub_(moving_away).div_(snake_sizes.float())
        if self.verbose:
            print(f'Head movement: {time() - t0}s')

        ### updating reward based on time out
        modified_l = 0.7*snake_sizes
        modified_l.add_(10)
        last_p = last_p.to(self.device)
        timeout = (last_p > modified_l).float()
        reward.sub_(timeout).div_(snake_sizes)
        new_p = last_p.add(1)

        ################
        # Apply update #
        ################

        t0 = time()
        head_food_overlap = (head(self.envs) * food(self.envs)).view(self.num_envs, -1).sum(dim=-1)

        # Decay the body sizes by 1, hence moving the body, apply ReLu to keep above 0
        # Only do this for environments which haven't just eaten food
        body_decay_env_indices = ~head_food_overlap.byte()
        self.envs[body_decay_env_indices, BODY_CHANNEL:BODY_CHANNEL + 1, :, :] -= 1
        self.envs[body_decay_env_indices, BODY_CHANNEL:BODY_CHANNEL + 1, :, :] = \
            self.envs[body_decay_env_indices, BODY_CHANNEL:BODY_CHANNEL + 1, :, :].relu()

        # Check for hitting self
        self_collision = (head(self.envs) * body(self.envs)).view(self.num_envs, -1).sum(dim=-1) > EPS
        reward -= self_collision.float()*100
        # reward.sub_((head(self.envs) * body(self.envs)).view(self.num_envs, -1).sum(dim=-1).float()*100)
        info.update({'self_collision': self_collision})
        done = done | self_collision

        # Create a new head position in the body channel
        # Make this head +1 greater if the snake has just eaten food
        self.envs[:, BODY_CHANNEL:BODY_CHANNEL + 1, :, :] += \
            head(self.envs) * (
                snake_sizes[:, None, None, None].expand((self.num_envs, 1, self.size, self.size)) +
                head_food_overlap[:, None, None, None].expand((self.num_envs, 1, self.size, self.size))
            )

        if self.verbose:
            print(f'Body movement: {time()-t0}')

        t0 = time()
        # Remove food and give reward
        # `food_removal` is 0 except where a snake head is at the same location as food where it is -1
        food_removal = head(self.envs) * food(self.envs) * -1
        # print("Food Reward")
        # print(food_removal.view(self.num_envs, -1).sum(dim=-1).float()*10)
        # print("Reward Before")
        # print(reward)
        reward.sub_(food_removal.view(self.num_envs, -1).sum(dim=-1).float()*200)
        # print("Reward After")
        # print(reward)
        new_p = new_p*(food_removal.view(self.num_envs, -1).sum(dim=-1).add(1))
        self.envs[:, FOOD_CHANNEL:FOOD_CHANNEL + 1, :, :] += food_removal
        if self.verbose:
            print(f'Food removal: {time() - t0}s')

        # Add new food if necessary.
        if food_removal.sum() < 0:
            t0 = time()
            food_addition_env_indices = (food_removal * -1).view(self.num_envs, -1).sum(dim=-1).byte()
            add_food_envs = self.envs[food_addition_env_indices, :, :, :]
            food_addition = self._get_food_addition(add_food_envs)
            self.envs[food_addition_env_indices, FOOD_CHANNEL:FOOD_CHANNEL+1, :, :] += food_addition
            if self.verbose:
                print(f'Food addition ({food_addition_env_indices.sum().item()} envs): {time() - t0}s')

        t0 = time()
        # Check for boundary, Done by performing a convolution with no padding
        # If the head is at the edge then it will be cut off and the sum of the head
        # channel will be 0
        edge_collision = F.conv2d(
            head(self.envs),
            NO_CHANGE_FILTER.to(self.device),
        ).view(self.num_envs, -1).sum(dim=-1) < EPS
        # print("Edge collision")
        # print(edge_collision)
        # print("Reward before")
        # print(reward)
        reward -= edge_collision.float()*100
        # print("Reward after")
        # print(reward)
        done = done | edge_collision
        info.update({'edge_collision': edge_collision})
        if self.verbose:
            print(f'Edge collision ({edge_collision.sum().item()} envs): {time() - t0}s')
        half_tensor = torch.ones(self.num_envs).to(self.device)*0.5
        check_done = (done.float() < half_tensor).float()
        new_p = new_p*check_done

        # Apply rounding to stop numerical errors accumulating
        self.envs.round_()

        self.done = done

        return self._observe(self.observation_mode), reward.unsqueeze(-1), done.unsqueeze(-1), info, new_p
 def forward(self, x: Tensor, style: Tensor) -> Tensor:
     x = self.conv2d_forward(x, style, self.weight * self.weight_mult)
     if self.bias is not None:
         x = x.add(self.lr_mult, self.bias[:, None, None])
     return x