Пример #1
0
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
    """
    LSTM.weight_ih_l[k] = (4*hidden_size, input_size)
    将三个gate的ih和输入的ih上下拼接合成一个矩阵weight_ih_l[k],
    所以为什么是4倍hidden_size拼接顺序无所谓
    同理 LSTM.weight_hh_l[k] = (4*hidden_size, hidden_size),也是四个hh矩阵拼接合成一个矩阵
    weight_ih =
    [ih_in
     ih_forget
     ih_cell
     ih_out
    ]
    """
    hx, cx = hidden

    gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)

    # 此处沿着1轴切成4块,分别作为4个输入, 返回顺序无所谓,都是原始参数矩阵的输出
    # 重要是之后的激活函数确定是那个门
    # 分解出来的四个矩阵shape是(hidden_size, hidden_size)
    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)

    ingate = F.sigmoid(ingate)
    forgetgate = F.sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    outgate = F.sigmoid(outgate)

    cy = (forgetgate * cx) + (ingate * cellgate)
    hy = outgate * F.tanh(cy)

    return hy, cy
Пример #2
0
    def forward(self, output, target):
        t0 = time.time()
        #output : BxAs*(4+1+num_classes)*H*W
        t0 = time.time()
        nB, _, nH, nW = output.size()
        nA = self.num_anchors
        nC = self.num_classes

        output = output.view(nB, nA, (5 + nC), nH, nW)
        coordinate_x = F.sigmoid(
            output.index_select(2, Variable(torch.cuda.LongTensor([0]))).view(
                nB, nA, nH, nW))
        coordinate_y = F.sigmoid(
            output.index_select(2, Variable(torch.cuda.LongTensor([1]))).view(
                nB, nA, nH, nW))
        coordinate_w = output.index_select(
            2, Variable(torch.cuda.LongTensor([2]))).view(nB, nA, nH, nW)
        coordinate_h = output.index_select(
            2, Variable(torch.cuda.LongTensor([3]))).view(nB, nA, nH, nW)
        confidence = F.sigmoid(
            output.index_select(2, Variable(torch.cuda.LongTensor([4]))).view(
                nB, nA, nH, nW))
        cls = output.index_select(
            2,
            Variable(torch.linspace(5, 5 + nC - 1), nC).long().cuda())
        cls = cls.view(nB * nA, nC, nH * nW).transpose(1, 2).contiguous().view(
            nB * nA * nH * nW, nC)
        t1 = time.time()

        pred_boxes = torch.cuda.FloatTensor(4, nB * nA * nH * nW)
        grid_x = torch.linspace(0, nW - 1,
                                nW).repeat(nB * nA, 1,
                                           1).view(nB * nA * nH * nW).cuda()
        grid_y = torch.linspace(0, nH - 1, nH).repeat(nW, 1).t().repeat(
            nB * nA, 1, 1).view(nB * nA * nH * nW).cuda()
        anchor_w = torch.Tensor(self.anchors).view(
            nA, self.anchor_step).index_select(1,
                                               torch.LongTensor([0])).cuda()
        anchor_h = torch.Tensor(self.anchors).view(
            nA, self.anchor_step).index_select(1,
                                               torch.LongTensor([1])).cuda()
        anchor_w = anchor_w.repeat(nB,
                                   1).repeat(1, 1,
                                             nH * nW).view(nB * nA * nH * nW)
        anchor_h = anchor_h.repeat(nB,
                                   1).repeat(1, 1,
                                             nH * nW).view(nB * nA * nH * nW)
        pred_boxes[0] = coordinate_x.data + grid_x
        pred_boxes[1] = coordinate_y.data + grid_y
        pred_boxes[2] = torch.exp(coordinate_w.data) * anchor_w
        pred_boxes[3] = torch.exp(coordinate_h.data) * anchor_h
        pred_boxes = convert2cpu(
            pred_boxes.transpose(0, 1).contiguous().view(-1, 4))
        t2 = time.time()

        nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls = build_targets(
            pred_boxes, target.data, self.anchors, nA, nC, nH, nW,
            self.noobject_scale, self.object_scale, self.thresh, self.seen)
 def residue_forward(self, input, conv_sigmoid, conv_tanh, skip_scale,
                     residue_scale):
     output = input
     output_sigmoid, output_tanh = conv_sigmoid(output), conv_tanh(output)
     output = F.sigmoid(output_sigmoid) * F.tanh(output_tanh)
     skip = skip_scale(output)
     output = residue_scale(output)
     output = output + input[:, :, -output.size(2):]
     return output, skip
    def forward(self, x):

        #x = self.embedding(x)

        # x = self.one_hot_to_emb(x)

        #import pdb; pdb.set_trace()

        #x = x.permute(0,2,1)

        #x_conv_list = [F.relu(conv1d(x)) for conv1d in self.conv1_list]

        #x_pool_list = [F.max_pool1d(x_conv, kernel_size=x_conv.shape[2])
        #    for x_conv in x_conv_list]

        #x_fc = torch.cat([x_pool.squeeze(dim=2) for x_pool in x_pool_list], dim=1)

        #logits = self.fc(self.dropout(x_fc))

        #if self.num_classes == 1:
        #    logits = logits.squeeze()
        #x = self.conv1(x)
        #x = F.leaky_relu(x, negative_slope=self.lrelu_neg_slope)

        #x = self.conv2(x)
        #x = self.batch_norm2(x)
        #x = F.leaky_relu(x, negative_slope=self.lrelu_neg_slope)

        #x = self.conv3(x)
        #x = self.batch_norm3(x)
        #x = F.leaky_relu(x, negative_slope=self.lrelu_neg_slope)

        #x = torch.flatten(x)

        x = x.unsqueeze(3)

        import pdb
        pdb.set_trace()

        # Encoding
        x = self.conv1(x)
        x = self.relu(x)
        x = self.pool(x)

        x = self.conv2(x)
        x = self.relu(x)
        x = self.pool(x)

        # Decoding
        x = self.t_conv1(x)
        x = self.relu(x)
        x = self.t_conv2(x)
        #x = F.relu(x)

        x = F.sigmoid(x)

        return x
Пример #5
0
    def forward(self, x):
        x = F.leaky_relu(self.conv1(x), 0.2)
        x = F.leaky_relu(self.bn1(self.conv2(x)), 0.2)
        x = F.leaky_relu(self.bn2(self.conv3(x)), 0.2)
        x = F.leaky_relu(self.bn3(self.conv4(x)), 0.2)
        x = F.leaky_relu(self.bn4(self.conv5(x)), 0.2)
        x = F.leaky_relu(self.bn5(self.conv6(x)), 0.2)
        x = F.leaky_relu(self.bn6(self.conv7(x)), 0.2)
        x = F.leaky_relu(self.bn7(self.conv8(x)), 0.2)
        x = F.leaky_relu(self.conv9(self.flatten(x)))
        x = F.sigmoid(self.conv10(x))

        return x
Пример #6
0
 def forward(self, x):
     if self.upsample:
         new_features = []
         for layer in self.layers:
             out = layer(x)
             x = torch.cat([x, out], 1)
             new_features.append(out)
         out = torch.cat(new_features, 1)
         fm_size = out.size()[2]
         scale_weight = F.avg_pool2d(out, fm_size)
         scale_weight = F.relu(self.SE_upsample1(scale_weight))
         scale_weight = F.sigmoid(self.SE_upsample2(scale_weight))
         out = out * scale_weight.expand_as(out)
         return out
     else:
         for layer in self.layers:
             out = layer(x)
             x = torch.cat([x, out], 1)  # 1 = channel axis
         fm_size = x.size()[2]
         scale_weight = F.avg_pool2d(x, fm_size)
         scale_weight = F.relu(self.SE1(scale_weight))
         scale_weight = F.sigmoid(self.SE2(scale_weight))
         x = x * scale_weight.expand_as(x)
         return x
Пример #7
0
 def dec_act(self, inputs):
     if self.args.dec_act == 'tanh':
         return F.tanh(inputs)
     elif self.args.dec_act == 'elu':
         return F.elu(inputs)
     elif self.args.dec_act == 'relu':
         return F.relu(inputs)
     elif self.args.dec_act == 'selu':
         return F.selu(inputs)
     elif self.args.dec_act == 'sigmoid':
         return F.sigmoid(inputs)
     elif self.args.dec_act == 'linear':
         return inputs
     else:
         return F.elu(inputs)
Пример #8
0
    def choose_action(self, state):  #here state is simply the current f_map
        state_tensor = torch.tensor([state]).to(self.ActorCritic.device)

        (mu, sigma), _ = self.ActorCritic.forward(state_tensor)

        actions = np.zeros(self.cell_nb, self.cell_nb)
        log_probs = np.zeros_like(actions)
        for ir, (mu_r, sig_r) in enumerate(zip(mu, sigma)):
            for ic, (mu_c, sig_c) in enumerate(zip(mu_r, sig_r)):
                #mu_c and sig_c are the mu and sigma parameter for the gaussian distribution of the current cell
                sig_c = torch.exp(sig_c)
                dist = torch.distributions.Normal(mu_c, sig_c)
                action = dist.sample()
                log_prob = dist.log_prob(action).to(self.ActorCritic.device)
                actions[ir, ic] = F.sigmoid(action.item(
                ))  #bound the normalized transmit power between 0 and 1
                log_probs[
                    ir, ic] = log_prob  #for later, to calculate the actor loss
        self.log_probs = log_probs
        return actions
Пример #9
0
def softwhere(input, x=0.0, y=1.0, width=1.0):
    """Soft version of `torch.where(input > 0, x, y)`

    The operation is defined as:
    .. math::

        out = p * x + (1 - p) * y
        p = sigmoid(input / width)

    Args:
        - input : Tensor
        - threshold : float | Tensor
        - x : float | Tensor
        - y : float | Tensor
        - width : float | Tensor

    Returns:
        TODO(simaki)

    Shape:
        - input : :math:`(*)`
    """
    p = fn.sigmoid(input / width)
    return p * x + (1 - p) * y
Пример #10
0
def bernoulli(mean, temp):
    g1 = -torch.log(1e-10 - torch.log(1e-10+Variable(mean.data.new(mean.shape).uniform_())))
    g2 = -torch.log(1e-10 - torch.log(1e-10+Variable(mean.data.new(mean.shape).uniform_())))
    return F.sigmoid((g1 + torch.log(1e-10+mean) - g2 - torch.log(1e-10+1-mean)) / temp)
Пример #11
0
 def forward(self, usr_emb):
     y = F.sigmoid(self.layer1(usr_emb))
     return y
def eval_metrics(output, target):
    output = (F.sigmoid(output) > 0.5)
    target = target
    return torch.norm(output - target)
Пример #13
0
 def forward(self, x):
     x = F.elu(self.map1(x))
     x = F.elu(self.map2(x))
     return F.sigmoid(self.map3(x))
def eval_metrics(output, target):
    output = (F.sigmoid(output) > 0.5).cpu().data.numpy()
    target = target.cpu().data.numpy()
    return np.linalg.norm(output - target)