コード例 #1
0
    def forward(self, x, batch=None):
        device = x.device
        if batch is None:
            x = x - x.mean()
            out = x / (x.std(unbiased=False) + self.eps)
        else:
            batch_size = int(batch.max()) + 1
            batch_idx = [batch == i for i in range(batch_size)]
            norm = torch.tensor([i.sum() for i in batch_idx],
                                dtype=x.dtype).clamp_(min=1).to(device)
            norm = norm.mul_(x.size(-1)).view(-1, 1)
            tmp_list = [x[i] for i in batch_idx]
            mean = torch.concat([i.sum(0).unsqueeze(0) for i in tmp_list],
                                dim=0).sum(dim=-1, keepdim=True).to(device)
            mean = mean / norm
            x = x - mean.index_select(0, batch.long())
            var = torch.concat([(i * i).sum(0).unsqueeze(0) for i in tmp_list],
                               dim=0).sum(dim=-1, keepdim=True).to(device)
            var = var / norm
            out = x / (var + self.eps).sqrt().index_select(0, batch.long())

        if self.weight is not None and self.bias is not None:
            out = out * self.weight + self.bias

        return out
コード例 #2
0
ファイル: lstm.py プロジェクト: diku-dk/futhark-ad
  def vjp(self, input_, target, runs):
    def vjp_(self, input_, target):
      self.zero_grad()
      x = input_ if self.input_ == None else self.input_
      y = target if self.target == None else self.target
      h = c = None

      # get predictions (forward pass)
      y_hat, h, c = self(x, h, c, runs=None)
  
      self.loss = torch.mean((y_hat - y)**2)
      # backprop
      self.loss.backward(gradient=torch.tensor(1.0))

    start  = torch.cuda.Event(enable_timing=True)
    end = torch.cuda.Event(enable_timing=True)
    start.record()
    for i in range(runs):
       vjp_(self, input_, target)
    torch.cuda.synchronize()
    end.record()
    torch.cuda.synchronize()

    d = {n: p.grad for n, p in self.named_parameters()}
    self.grads = {  'weight_ih_l0': torch.concat([torch.transpose(g, 0, 1) for g in [d['W_i'], d['W_f'], d['W_j'], d['W_o']]])
                    , 'weight_hh_l0': torch.concat([torch.transpose(g, 0, 1) for g in [d['U_i'], d['U_f'], d['U_j'], d['U_o']]])
                    , 'bias_ih_l0'  : torch.concat([d['b_ii'], d['b_if'], d['b_ij'], d['b_io']])
                    , 'bias_hh_l0'  : torch.concat([d['b_hi'], d['b_hf'], d['b_hj'], d['b_ho']])
                    , 'weight'      : torch.transpose(d['W_y'], 0, 1)
                    , 'bias'        : d['b_y']
                   }
    return start.elapsed_time(end) / runs
    def learn(self):
        for _ in range(self.nb_gradient_steps):
            # gradient_step() function in RL5 notebook
            if len(self.replay_buffer) > self.batch_size:
                #  NEW, samples from buffer contains goals
                states, actions, rewards, new_states, dones, goals = self.replay_buffer.sample(
                    self.batch_size)

                # NEW concatenate states and goals, because we need to put them inside our model
                goal_conditioned_states = torch.concat((states, goals), dim=-1)
                goal_conditioned_new_states = torch.concat((new_states, goals),
                                                           dim=-1)

                q_prime = self.target_model(goal_conditioned_new_states).max(
                    1)[0].detach()
                update = rewards + self.gamma * (1 - dones) * q_prime
                q_s_a = self.model(goal_conditioned_states).gather(
                    1,
                    actions.to(torch.long).unsqueeze(1))
                loss = self.criterion(q_s_a, update.unsqueeze(1))
                self.model.learn(loss)

        # update target network if needed
        if self.time_step_id % self.update_target_freq == 0:
            self.target_model.load_state_dict(self.model.state_dict())
コード例 #4
0
def ae_fit_transform(x, enc_dim, batch_size=256, max_epochs=3000, patience=250, num_workers=0):
    # Create train datasets and data loaders
    train_data = TensorDataset(torch.tensor(x), torch.tensor(x))
    train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size, num_workers=num_workers)

    # Instantiate model
    model = LitAutoEncoder(input_dim=x.shape[1], enc_dim=enc_dim)

    # Callbacks
    early_stop_callback = EarlyStopping(monitor="train_loss", patience=patience, mode="min")

    # Train model
    trainer = pl.Trainer(max_epochs=max_epochs, callbacks=[early_stop_callback], devices="auto", accelerator="auto",
                         logger=True, enable_checkpointing=False)
    trainer.fit(model, train_loader)

    # Transform data
    model.mode = "encode"
    test_enc_data = TensorDataset(torch.tensor(x))
    test_enc_loader = torch.utils.data.DataLoader(test_enc_data, shuffle=False, batch_size=batch_size, num_workers=num_workers)
    x_enc = trainer.predict(model, dataloaders=test_enc_loader)
    x_enc = torch.concat(x_enc, dim=0).detach().cpu().numpy()

    # Inverse transform
    model.mode = "decode"
    test_dec_data = TensorDataset(torch.tensor(x_enc))
    test_dec_loader = torch.utils.data.DataLoader(test_dec_data, shuffle=False, batch_size=batch_size, num_workers=num_workers)
    x_dec = trainer.predict(model, dataloaders=test_dec_loader)
    x_dec = torch.concat(x_dec, dim=0).detach().cpu().numpy()

    return x_enc, x_dec
コード例 #5
0
    def learn(self):
        if len(self.replay_buffer) > self.batch_size:
            states, actions, rewards, new_states, dones = self.replay_buffer.sample(
                self.batch_size)

            with torch.no_grad():
                target_actions = self.target_actor.forward(new_states)
                critic_value_ = self.target_critic.forward(
                    torch.concat((new_states, target_actions), dim=-1))
            critic_value = self.critic.forward(
                torch.concat((states, actions), dim=-1))
            target = torch.addcmul(rewards, self.gamma, 1 - dones,
                                   critic_value_.squeeze()).view(
                                       self.batch_size, 1)
            self.critic.optimizer.zero_grad()
            critic_loss = torch.nn.functional.mse_loss(target, critic_value)
            critic_loss.backward()
            self.critic.optimizer.step()

            self.actor.optimizer.zero_grad()
            actions = self.actor.forward(states)
            actor_loss = -self.critic.forward(
                torch.concat((states, actions), dim=-1))
            actor_loss = torch.mean(actor_loss)
            actor_loss.backward()
            self.actor.optimizer.step()

            self.target_critic.converge_to(self.critic, tau=self.tau)
            self.target_actor.converge_to(self.actor, tau=self.tau)
コード例 #6
0
def get_environment(n_atoms, grid=None):

    if n_atoms == 1:
        neighborhood_idx = -torch.ones((1, 1), dtype=torch.float32)
        offsets = torch.zeros((n_atoms, 1, 3), dtype=torch.float32)
    else:
        neighborhood_idx = torch.arange(
            n_atoms, dtype=torch.float32).unsqueeze(0).repeat(n_atoms, 1)
        neighborhood_idx = neighborhood_idx[
            ~torch.eye(n_atoms, dtype=torch.long).byte()].view(
                n_atoms, n_atoms - 1).long()

        if grid is not None:
            n_grid = grid.shape[0]
            neighborhood_idx = torch.concat(
                [neighborhood_idx, -torch.ones((n_atoms, 1))], 1)
            grid_nbh = torch.tile(
                torch.arange(n_atoms, dtype=torch.float32).unsqueeze(-1),
                (n_grid, 1))
            neighborhood_idx = torch.concat([neighborhood_idx, grid_nbh], 0)

        offsets = torch.zeros(
            (neighborhood_idx.shape[0], neighborhood_idx.shape[1], 3),
            dtype=torch.float32)
    return neighborhood_idx, offsets
コード例 #7
0
ファイル: maddpg.py プロジェクト: goodbyeearth/mpnn
def q_train(make_obs_ph_n,
            act_space_n,
            q_index,
            q_func,
            optimizer,
            grad_norm_clipping=None,
            local_q_func=False,
            scope="trainer",
            reuse=None,
            num_units=64):
    with tf.variable_scope(scope, reuse=reuse):
        # create distribtuions
        act_pdtype_n = [make_pdtype(act_space) for act_space in act_space_n]

        # set up placeholders
        obs_ph_n = make_obs_ph_n
        act_ph_n = [
            act_pdtype_n[i].sample_placeholder([None], name="action" + str(i))
            for i in range(len(act_space_n))
        ]
        target_ph = tf.placeholder(tf.float32, [None], name="target")

        q_input = tf.concat(obs_ph_n + act_ph_n, 1)
        if local_q_func:
            q_input = tf.concat([obs_ph_n[q_index], act_ph_n[q_index]], 1)
        q = q_func(q_input, 1, scope="q_func", num_units=num_units)[:, 0]
        q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))

        q_loss = tf.reduce_mean(tf.square(q - target_ph))

        # viscosity solution to Bellman differential equation in place of an initial condition
        q_reg = tf.reduce_mean(tf.square(q))
        loss = q_loss  #+ 1e-3 * q_reg

        optimize_expr = U.minimize_and_clip(optimizer, loss, q_func_vars,
                                            grad_norm_clipping)

        # Create callable functions
        train = U.function(inputs=obs_ph_n + act_ph_n + [target_ph],
                           outputs=loss,
                           updates=[optimize_expr])
        q_values = U.function(obs_ph_n + act_ph_n, q)

        # target network
        target_q = q_func(q_input,
                          1,
                          scope="target_q_func",
                          num_units=num_units)[:, 0]
        target_q_func_vars = U.scope_vars(
            U.absolute_scope_name("target_q_func"))
        update_target_q = make_update_exp(q_func_vars, target_q_func_vars)

        target_q_values = U.function(obs_ph_n + act_ph_n, target_q)

        return train, update_target_q, {
            'q_values': q_values,
            'target_q_values': target_q_values
        }
コード例 #8
0
 def forward(self, input1, input2):
     point_idx_in = input1.get_spatial_locations()[:, -1]
     max_idx = torch.max(point_idx_in).max().item()
     assert max_idx % self.frames
     batch_size = 1 + max_idx
     tmp_f = torch.concat([input1.features, input2.features])
     tmp_c = input2.get_spatial_locations()
     tmp_c[:, -1] = tmp_c[:, -1] + batch_size
     tmp_c = torch.concat([input1.get_spatial_locations(), tmp_c])
     return self.input([tmp_c, tmp_f])
コード例 #9
0
ファイル: deduce.py プロジェクト: quantapix/qnarre
 def logits(self, x, i=None):
     y = x
     a = self.adapt_ws[i or 0]
     if a is not None:
         y = torch.einsum("bih,ph->bip", y, a)
     t = self.table_ws[i or 0]
     b = self.table_bs[i or 0]
     if i == 0:
         t = torch.concat([t, self.clust_w], 0)
         b = torch.concat([b, self.clust_b], 0)
     y = torch.einsum("bie,ne->bin", y, t) + b
     return y
コード例 #10
0
def create_mask(qlen, mlen, dtype=torch.float32, same_length=False):
    """Creates attention mask when single-side context allowed only."""
    attn_mask = torch.ones([qlen, qlen], dtype=dtype)
    mask_u = torch.triu(attn_mask, 0, -1)
    mask_dia = torch.triu(attn_mask, 0, 0)
    attn_mask_pad = torch.zeros([qlen, mlen], dtype=dtype)
    ret = torch.concat([attn_mask_pad, mask_u - mask_dia], 1)
    if same_length:
        mask_l = torch.triu(attn_mask, -1, 0)
        ret = torch.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]],
                           1)

    return ret
コード例 #11
0
    def forward(self, x):
        enc1 = self.enc1(x)
        enc2 = self.enc2(enc1)
        enc3 = self.enc3(enc2)
        enc4 = self.enc4(enc3)
        enc5 = self.enc5(enc4)

        dec5 = self.dec5(enc5)
        dec4 = self.dec4(torch.concat(enc4, dec5, 1))
        dec3 = self.dec3(torch.concat(enc3, dec4, 1))
        dec2 = self.dec2(torch.concat(enc2, dec3, 1))
        dec1 = self.dec1(torch.concat(enc1, dec2, 1))

        return self.final(dec1)
コード例 #12
0
    def forward_bboxes(self,
                       bboxes: torch.FloatTensor,
                       image_tensor=None,
                       width_height=None) -> torch.FloatTensor:
        """Applies a transformation on Image coordinate defined bounding boxes.

        Bounding Boxes are encoded as [Left, Top, Right, Bottom]

        Args:
            bboxes (torch.FloatTensor) : A tensor with bboxes for a sample [N x 4] or a batch [S x N x 4]
            image_tensor (torch.FloatTensor): A valid batch image tensor [S x C x H x C] or sample image tensor
                [C x H x W]. In both cases it only used to normalise bbox coordinates and can be omitted if width_height
                is specified.
            width_height (int, int ): Values used to normalise bbox coordinates to [-1,1] and back, should be omitted if
                image tensor is passed

        Returns: a tensor with the bounding boxes of the transformed bounding box.
        """
        if len(bboxes.size()) == 2:
            bboxes = bboxes.unsqueeze(dim=0)
        if image_tensor is not None:
            width = image_tensor.size(-1)
            height = image_tensor.size(-2)
        else:
            width, height = width_height
        normalise_bboxes = torch.tensor(
            [[width * .5, height * .5, width * .5, height * .5]])
        normalised_bboxes = bboxes / normalise_bboxes - 1
        bboxes_left = normalised_bboxes[:, 0].unsqueeze(dim=0).unsqueeze(dim=0)
        bboxes_top = normalised_bboxes[:, 1].unsqueeze(dim=0).unsqueeze(dim=0)
        bboxes_right = normalised_bboxes[:,
                                         2].unsqueeze(dim=0).unsqueeze(dim=0)
        bboxes_bottom = normalised_bboxes[:,
                                          3].unsqueeze(dim=0).unsqueeze(dim=0)
        bboxes_x = torch.concat(
            [bboxes_left, bboxes_right, bboxes_right, bboxes_left], dim=1)
        bboxes_y = torch.concat(
            [bboxes_top, bboxes_top, bboxes_bottom, bboxes_bottom], dim=1)

        pointcloud = (bboxes_x, bboxes_y)
        pointcloud = self.forward_pointcloud(pointcloud)
        pointcloud = torch.clamp(pointcloud[0], -1,
                                 1), torch.clamp(pointcloud[0], -1, 1)

        left = ((pointcloud[0].min(dim=1) + 1) * .5 * width).view(- [1, 1])
        right = ((pointcloud[0].max(dim=1) + 1) * .5 * width).view(- [1, 1])
        top = ((pointcloud[1].min(dim=1) + 1) * .5 * height).view(- [1, 1])
        bottom = ((pointcloud[1].max(dim=1) + 1) * .5 * height).view(- [1, 1])
        result_bboxes = torch.concat([left, top, right, bottom], dim=1)
        return result_bboxes
コード例 #13
0
def load_torchscript_model(dummy_input):
    loaded_model = torch.jit.load(TS_MODEL_PATH)
    loaded_model.eval()

    dummy_input_tensor = torch.concat(dummy_input)  # dtype = torch.int64
    all_encoder_layers, pooled_output = loaded_model(dummy_input_tensor)
    print(all_encoder_layers)  # dtype = tprch.float32
コード例 #14
0
ファイル: oracle.py プロジェクト: tinagu945/CausalPhys
    def simulate(self, control_idx, batch_size):
        inputs = np.random.uniform(low=self.low,
                                   high=self.high,
                                   size=(1, self.num_nodes, 1, 1))
        # Input vars are constant over the trajectory, and same for all datapoints in one batch
        # Controlled var is constant over the trajectory but different for all datapoints in one batch
        inputs = inputs.repeat(
            (self.batch_size, self.num_nodes, self.trajectory_len, 1))

        control = np.random.uniform(low=self.control_low,
                                    high=control_high,
                                    size=(batch_size, 1, 1, 1))
        control = inputs.repeat((batch_size, 1, self.trajectory_len, 1))

        inputs[:, control_idx, :, :] = control
        targets = self.func(inputs)
        data = torch.concat((inputs, targets), dim=-1)

        outputs = torch.zeros(
            (data.size(0), data.size(1), data.size(2), 1 + data.size(1)))
        outputs[:, :, :, 0] = data
        # Add one hot encoding
        for i in range(1, outputs.size(-1)):
            outputs[:, :, :, i] = 1

        return outputs
コード例 #15
0
ファイル: tensor_ops.py プロジェクト: malfet/pytorch
 def tensor_indexing_ops(self):
     x = torch.randn(2, 4)
     y = torch.randn(2, 4, 2)
     t = torch.tensor([[0, 0], [1, 0]])
     mask = x.ge(0.5)
     i = [0, 1]
     return (
         torch.cat((x, x, x), 0),
         torch.concat((x, x, x), 0),
         torch.conj(x),
         torch.chunk(x, 2),
         torch.dsplit(y, i),
         torch.column_stack((x, x)),
         torch.dstack((x, x)),
         torch.gather(x, 0, t),
         torch.hsplit(x, i),
         torch.hstack((x, x)),
         torch.index_select(x, 0, torch.tensor([0, 1])),
         torch.masked_select(x, mask),
         torch.movedim(x, 1, 0),
         torch.moveaxis(x, 1, 0),
         torch.narrow(x, 0, 0, 2),
         torch.nonzero(x),
         torch.permute(x, (0, 1)),
         torch.reshape(x, (-1, )),
     )
コード例 #16
0
    def score(self, target_h, source_hs, source_mask):
        # target_h: N, target_dim
        # source_hs: seq_len, N, source_dim
        # source_mask: seq_len, N

        target_h = target_h.unsqueeze(0)  # 1, N, target_dim
        target_size = (source_hs.size(0), -1, -1)  # seq_len, N, target_dim

        if self.score_method == "dot":
            att = torch.mul(target_h.expand(target_size),
                            source_hs)  # seq_len, N, target_dim
            att = torch.sum(att, dim=2)  # seq_len, N

        elif self.score_method == "general":
            att = self.atten(source_hs)  # seq_len, N, target_dim
            att = torch.mul(target_h.expand(target_size),
                            att)  # seq_len, N, target_dim
            att = torch.sum(att, dim=2)  # seq_len, N

        elif self.score_method == "concat":
            target_h = target_h.expand(target_size)  # seq_len, N, target_dim
            concat = torch.concat((source_hs, target_h),
                                  dim=2)  # seq_len, N, source_dim + target_dim
            att = self.atten(concat)  # seq_len, N, concat_hidden_dim
            v = self.v.unsqueeze(0).unsqueeze(0)  # 1, 1, concat_hidden_dim
            att = torch.mul(v.expand_as(att),
                            att)  # seq_len, N, concat_hidden_dim
            att = torch.sum(att, dim=2)  # seq_len, N

        else:
            raise RuntimeError("Unsupported score_method")

        return softmax_mask(att, source_mask, dim=0)  # seq_len, N
コード例 #17
0
    def predict_from_theta(self, theta, C):
        """
        Predict labels from a distribution over topics
        """
        if self.n_covariates > 0:
            if self.covar_emb_dim > 0:
                c_emb = self.covar_emb_layer(C)
            elif self.covar_emb_dim < 0:
                c_emb = C
            else:
                c_emb = C

        Y_recon = None
        if self.n_labels > 0:
            if self.n_covariates > 0:
                classifier_input = torch.concat([theta, c_emb], dim=1)
            else:
                classifier_input = theta
            if self.classifier_layers == 0:
                decoded_y = self.classifier_layer_0(classifier_input)
            elif self.classifier_layers == 1:
                cls0 = self.classifier_layer_0(classifier_input)
                cls0_sp = F.softplus(cls0)
                decoded_y = self.classifier_layer_1(cls0_sp)
            else:
                cls0 = self.classifier_layer_0(classifier_input)
                cls0_sp = F.softplus(cls0)
                cls1 = self.classifier_layer_1(cls0_sp)
                cls1_sp = F.softplus(cls1)
                decoded_y = self.classifier_layer_1(cls1_sp)
            Y_recon = F.softmax(decoded_y, dim=1)

        return Y_recon
コード例 #18
0
def test_MultivariateNormalDistributionLoss(center, transformation):
    normalizer = TorchNormalizer(center=center, transformation=transformation)

    mean = torch.tensor([1.0, 1.0])
    std = torch.tensor([0.2, 0.1])
    cov_factor = torch.tensor([[0.0], [0.0]])
    n = 1000000

    loss = MultivariateNormalDistributionLoss()
    target = loss.distribution_class(loc=mean,
                                     cov_diag=std**2,
                                     cov_factor=cov_factor).sample((n, ))
    target = normalizer.inverse_preprocess(target)
    target = target[:, 0]
    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    scale = torch.ones_like(normalized_target) * normalized_target.std()
    parameters = torch.concat(
        [
            normalized_target[..., None], scale[..., None],
            torch.zeros((1, normalized_target.size(1), loss.rank))
        ],
        dim=-1,
    )

    rescaled_parameters = loss.rescale_parameters(parameters,
                                                  target_scale=target_scale,
                                                  encoder=normalizer)
    samples = loss.sample(rescaled_parameters, 1)
    assert torch.isclose(target.mean(), samples.mean(), atol=3.0, rtol=0.5)
    if center:  # if not centered, softplus distorts std too much for testing
        assert torch.isclose(target.std(), samples.std(), atol=0.1, rtol=0.7)
コード例 #19
0
def merge_dict(*dicts):
    """ combine list of dicts, add new keys or concat on same keys 
        assume dict only union keys only contain only 
        int/float/list/np.array/torch.tensor/dict
    """
    res = {}
    for d in dicts:
        union_d = {k:v for k, v in d.items() if k in res}
        exclusion_d = {k:v for k, v in d.items() if k not in res}
        res.update(exclusion_d)
        for k, v in union_d.items():
            if isinstance(v, (int, float)):
                res[k] += v
            elif isinstance(v, (list, tuple)):
                res[k] += v 
            elif isinstance(v, np.array):
                if len(v.shape) == 0 or v.shape[0] == 1:
                    # if scalar or global to batch, interpret as addition
                    res[k] += v
                else:
                    # other wise interpret as concatenation along 1st dim
                    res[k] = np.concatenate([res[k], v], 0)
            elif isinstance(v, torch.Tensor):
                if len(v.shape) == 0 or v.shape[0] == 1:
                    res[k] += v
                else:
                    res[k] = torch.concat([res[k], v], 0)
            elif isinstance(v, dict):
                res[k] = merge_dict(res[k], v)
    return res
コード例 #20
0
def message_passing(nodes,
                    edges,
                    edge_features,
                    message_fn,
                    edge_keep_prob=1.0):
    """
    Pass messages between nodes and sum the incoming messages at each node.
    Implements equation 1 and 2 in the paper, i.e. m_{.j}^t &= \sum_{i \in N(j)} f(h_i^{t-1}, h_j^{t-1})

    :param nodes: (n_nodes, n_features) tensor of node hidden states.
    :param edges: (n_edges, 2) tensor of indices (i, j) indicating an edge from nodes[i] to nodes[j].
    :param edge_features: features for each edge. Set to zero if the edges don't have features.
    :param message_fn: message function, will be called with input of shape (n_edges, 2*n_features + edge_features). The output shape is (n_edges, n_outputs), where you decide the size of n_outputs
    :param edge_keep_prob: The probability by which edges are kept. Basically dropout for edges. Not used in the paper.
    :return: (n_nodes, n_output) Sum of messages arriving at each node.
    """
    print(f"Nodes: {nodes.size()}")
    print(f"Edges: {edges.size()}")
    print(f"E-features: {edge_features}")

    n_nodes = nodes.size(0)
    n_features = nodes.size(1)
    n_edges = edges.size(0)

    message_inputs = torch.gather(nodes, 1, edges).view(-1, 2 * n_features)
    reshaped = torch.concat((message_inputs, edge_features))
    messages = message_fn(reshaped)
    messages = F.dropout(messages, 1 - edge_keep_prob)

    n_output = messages.size(1)
コード例 #21
0
    def forward_single(self, x, idx, eval=False, upsampled_size=None):
        ins_feat = x
        cate_feat = x
        # ins branch
        # concat coord
        x_range = torch.linspace(-1, 1, ins_feat.shape[-1], device=ins_feat.device)
        y_range = torch.linspace(-1, 1, ins_feat.shape[-2], device=ins_feat.device)
        y, x = torch.meshgrid(y_range, x_range)
        y = y.expand([ins_feat.shape[0], 1, -1, -1])
        x = x.expand([ins_feat.shape[0], 1, -1, -1])
        coord_feat = torch.concat([x, y], 1)
        ins_feat = torch.cat([ins_feat, coord_feat], 1)

        for i, ins_layer in enumerate(self.ins_convs):
            ins_feat = ins_layer(ins_feat)
        ins_feat = F.interpolate(ins_feat, scale_factor=2., mode='bilinear')
        ins_pred = self.solo_ins_list[idx](ins_feat)

        # cate branch
        for i, cate_layer in enumerate(self.cate_convs):
            if i == self.cate_down_pos: # what's this? NOTE NOTE
                seg_num_grid = self.seg_num_grids[idx]
                cate_feat = F.interpolate(cate_feat, size=seg_num_grid, mode='binlinear')
            cate_feat = cate_layer(cate_feat)
        
        cate_pred = self.solo_cate(cate_feat)
        if eval:
            ins_pred = F.interpolate(ins_pred.sigmoid(), size=upsampled_size, mode='bilinear')
            cate_pred = point_nms(cate_pred.sigmoid(), kernel=2).permute(0, 2, 3, 1)    # what's this
        return ins_pred, cate_pred
コード例 #22
0
def latent_decoder_fn(y, n_z, is_training=True):
    """The latent decoder function, modelling p(z | y).

    Args:
    y: Categorical cluster variable, `Tensor` of size `[B, n_y]`.
    n_z: int, number of dims of z.
    is_training: Boolean, whether to build the training graph or an evaluation
      graph.

    Returns:
    The Gaussian distribution `p(z | y)`.
    """
    del is_training  # Unused for now.

    if len(y.shape) != 2:
        raise NotImplementedError('The latent decoder function expects `y` to be '
                                  '2D, but its shape was %s instead.' %
                                  str(y.shape))

    lin_mu = nn.Linear(y.shape[1], n_z)
    lin_sigma = nn.Linear(y.shape[1], n_z)

    mu = lin_mu(y)
    sigma = lin_sigma(y)

    logits = torch.concat([mu, sigma], axis=1)

    return curl_utils.generate_gaussian(
        logits=logits, sigma_nonlin='softplus', sigma_param='var')
コード例 #23
0
 def sample(self, x, y):
     """
     :param x:
     :param y:
     :return: res
              preds
              acc
     """
     embedding_out = self.embedding_layer(x)
     encoder_outputs = self.encoder(embedding_out)
     # begin
     b = torch.ones(x.size(0), 1) * (-2)
     x = torch.concat((b, x), 1)
     embed_out = self.embedding_layer(x)
     res = []
     preds = []
     for i in range(self.multi_num):
         out, h = self.decoder(embed_out[:, i], h, encoder_outputs)
         res = self.output_layer(out).squeeze()
         res.append(res)
         preds.append(nn.softmax(res, -1))
     res = torch.stack(res)
     preds = torch.stack(pred)
     acc = self.compute_accuary(preds, y)
     return res, preds, acc
コード例 #24
0
 def extract(data_loader, features=None, labels=None):
     self.feature_net.eval()
     for i, inputs in enumerate(data_loader):
         if self.task_type == 'classification':
             inputs, label = split_data(inputs)
         elif self.task_type == 'reid':
             inputs, pids, cams = split_data(inputs)
         outputs = self.feature_net(inputs)
         if features is None:
             features = outputs
         else:
             features = torch.concat(features, outputs)
         if labels is None:
             labels = label
         else:
             labels = torch.concat(labels, outputs)
     return features, labels
コード例 #25
0
    def forward(self, x1, x2):
        '''
        1. stack sentence vector similarity

        2. for layer_size
            abcnn1
            convolution
            stack sentence vector similarity
            W-ap for next loop x1, x2

        3. concatenate similarity list
            size (batch_size, layer_size + 1)

        4. Linear layer
            size (batch_size, 1)

        Parameters
        ----------
        x1, x2 : 4-D torch Tensor
            size (batch_size, 1, sentence_length, emb_dim)

        Returns
        -------
        output : 2-D torch Tensor
            size (batch_size, 1)
        '''
        # sim = []
        # sim.append(self.distance(self.ap[0](x1), self.ap[0](x2)))

        representation_x1 = self.ap[0](x1)
        representation_x2 = self.ap[0](x2)

        for i in range(self.layer_size):
            x1, x2 = self.abcnn1[i](x1, x2)
            x1 = self.conv[i](x1)
            x2 = self.conv[i](x2)
            # sim.append(self.distance(self.ap[i + 1](x1), self.ap[i + 1](x2)))
            representation_x1 = torch.concat(
                (representation_x1, self.ap[i + 1](x1)), 0)
            representation_x2 = torch.concat(
                (representation_x2, self.ap[i + 1](x2)), 0)
            x1, x2 = self.abcnn2[i](x1, x2)

        # sim_fc = torch.cat(sim, dim=1)
        # output = self.fc(sim_fc)
        return representation_x1, representation_x2
コード例 #26
0
def fftshift(im, axis=0, name="fftshift"):
    """Perform fft shift.
    This function assumes that the axis to perform fftshift is divisible by 2.
    """
    split0, split1 = torch.split(im, 2, axis=axis)
    output = torch.concat((split1, split0), axis=axis)

    return output
コード例 #27
0
 def forward(self, pos_seq, nbatch=None):
     sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
     pos_embed = torch.concat(
         [sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
     if nbatch is None:
         return pos_embed[:, None, :]
     else:
         return pos_embed[:, None, :].expand(-1, nbatch, -1)
コード例 #28
0
ファイル: nn.py プロジェクト: sulcata/spark-rl
 def forward(self, input, sample):
     feature = self.feature(input)
     embedding = self.embedding(sample)
     if self.reduction == 'residual':
         return feature * (1 + embedding)
     if self.reduction == 'concat':
         return torch.concat((feature, embedding), dim=-1)
     return feature * embedding
コード例 #29
0
 def tensor_indexing_ops(self):
     x = torch.randn(2, 4)
     y = torch.randn(4, 4)
     t = torch.tensor([[0, 0], [1, 0]])
     mask = x.ge(0.5)
     i = [0, 1]
     return len(
         torch.cat((x, x, x), 0),
         torch.concat((x, x, x), 0),
         torch.conj(x),
         torch.chunk(x, 2),
         torch.dsplit(torch.randn(2, 2, 4), i),
         torch.column_stack((x, x)),
         torch.dstack((x, x)),
         torch.gather(x, 0, t),
         torch.hsplit(x, i),
         torch.hstack((x, x)),
         torch.index_select(x, 0, torch.tensor([0, 1])),
         x.index(t),
         torch.masked_select(x, mask),
         torch.movedim(x, 1, 0),
         torch.moveaxis(x, 1, 0),
         torch.narrow(x, 0, 0, 2),
         torch.nonzero(x),
         torch.permute(x, (0, 1)),
         torch.reshape(x, (-1, )),
         torch.row_stack((x, x)),
         torch.select(x, 0, 0),
         torch.scatter(x, 0, t, x),
         x.scatter(0, t, x.clone()),
         torch.diagonal_scatter(y, torch.ones(4)),
         torch.select_scatter(y, torch.ones(4), 0, 0),
         torch.slice_scatter(x, x),
         torch.scatter_add(x, 0, t, x),
         x.scatter_(0, t, y),
         x.scatter_add_(0, t, y),
         # torch.scatter_reduce(x, 0, t, reduce="sum"),
         torch.split(x, 1),
         torch.squeeze(x, 0),
         torch.stack([x, x]),
         torch.swapaxes(x, 0, 1),
         torch.swapdims(x, 0, 1),
         torch.t(x),
         torch.take(x, t),
         torch.take_along_dim(x, torch.argmax(x)),
         torch.tensor_split(x, 1),
         torch.tensor_split(x, [0, 1]),
         torch.tile(x, (2, 2)),
         torch.transpose(x, 0, 1),
         torch.unbind(x),
         torch.unsqueeze(x, -1),
         torch.vsplit(x, i),
         torch.vstack((x, x)),
         torch.where(x),
         torch.where(t > 0, t, 0),
         torch.where(t > 0, t, t),
     )
コード例 #30
0
 def rescale_parameters(self, parameters: torch.Tensor,
                        target_scale: torch.Tensor,
                        encoder: BaseEstimator) -> torch.Tensor:
     self._transformation = encoder.transformation
     return torch.concat([
         parameters,
         target_scale.unsqueeze(1).expand(-1, parameters.size(1), -1)
     ],
                         dim=-1)