def decode(self, z): # Issue here TODO # print(z.shape) out = self.dec1(z) # print(out.shape) out = F.sigmoid(self.dec2(out.view(-1, 256, 1, 1))) # TODO check view # print(out.shape) return out #.view(-1, self.D)
def forward(self, x): """Forward feature map of a single scale level.""" rpn_out = self.rpn_conv_dw(x) rpn_out = F.relu(rpn_out, inplace=True) rpn_out = self.rpn_conv_linear(rpn_out) rpn_out = F.relu(rpn_out, inplace=True) sam = F.sigmoid(self.bn(self.sam(rpn_out))) x = x * sam rpn_cls_score = self.rpn_cls(rpn_out) rpn_bbox_pred = self.rpn_reg(rpn_out) return rpn_cls_score, rpn_bbox_pred, x
def forward(self, input): rpn = input[0][0] cem = input[1][0] sam = self.conv1(rpn) sam = self.bn(sam) sam = F.sigmoid(sam) out = cem * sam return tuple([out])
def forward(self, input): out = [] rpn = input[0] cem = input[1] for lvl_feat, lvl_rpn in zip(cem, rpn): sam = self.conv1(lvl_rpn) sam = self.bn(sam) sam = F.sigmoid(sam) out.append(lvl_feat * sam) return tuple(out)
def interpolate_batch(frames, factor): frame0 = torch.stack(frames[:-1]) frame1 = torch.stack(frames[1:]) i0 = frame0.to(device) i1 = frame1.to(device) ix = torch.cat([i0, i1], dim=1) flow_out = flow(ix) f01 = flow_out[:, :2, :, :] f10 = flow_out[:, 2:, :, :] frame_buffer = [] for i in range(1, factor): t = i / factor temp = -t * (1 - t) co_eff = [temp, t * t, (1 - t) * (1 - t), temp] ft0 = co_eff[0] * f01 + co_eff[1] * f10 ft1 = co_eff[2] * f01 + co_eff[3] * f10 gi0ft0 = back_warp(i0, ft0) gi1ft1 = back_warp(i1, ft1) iy = torch.cat((i0, i1, f01, f10, ft1, ft0, gi1ft1, gi0ft0), dim=1) io = interp(iy) ft0f = io[:, :2, :, :] + ft0 ft1f = io[:, 2:4, :, :] + ft1 vt0 = F.sigmoid(io[:, 4:5, :, :]) vt1 = 1 - vt0 gi0ft0f = back_warp(i0, ft0f) gi1ft1f = back_warp(i1, ft1f) co_eff = [1 - t, t] ft_p = (co_eff[0] * vt0 * gi0ft0f + co_eff[1] * vt1 * gi1ft1f) / \ (co_eff[0] * vt0 + co_eff[1] * vt1) frame_buffer.append(ft_p) return frame_buffer
def forward(self, x1, x2, z=None): conv1_1 = self.conv1(x1) conv1_2 = self.conv1(x2) conv2_1 = self.conv2(conv1_1) conv2_2 = self.conv2(conv1_2) conv3_1 = self.conv3(conv2_1) conv3_2 = self.conv3(conv2_2) conv4_1 = self.conv4(conv3_1) conv4_2 = self.conv4(conv3_2) conv5_1 = self.conv5(conv4_1) conv5_2 = self.conv5(conv4_2) # center_1 = self.center(self.pool(conv5_1)) # center_2 = self.center(self.pool(conv5_2)) center_1 = self.center(conv5_1) center_2 = self.center(conv5_2) feature_1 = self.global_avg_pool(center_1) feature_2 = self.global_avg_pool(center_2) feature_12 = torch.cat((feature_1, feature_2), dim=1) # pdb.set_trace() attention_12 = F.sigmoid( self.mlp2(F.tanh(self.mlp1(feature_12.view(-1, 512))))).view( -1, 256, 1, 1) center_1 = center_1 * attention_12 center_2 = center_2 * attention_12 dec5_1 = self.dec5(torch.cat([center_1, conv5_1], 1)) dec5_2 = self.dec5(torch.cat([center_2, conv5_2], 1)) dec4_1 = self.dec4(torch.cat([dec5_1, conv4_1], 1)) dec4_2 = self.dec4(torch.cat([dec5_2, conv4_2], 1)) dec3_1 = self.dec3(torch.cat([dec4_1, conv3_1], 1)) dec3_2 = self.dec3(torch.cat([dec4_2, conv3_2], 1)) dec2_1 = self.dec2(torch.cat([dec3_1, conv2_1], 1)) dec2_2 = self.dec2(torch.cat([dec3_2, conv2_2], 1)) dec1_1 = self.dec1(dec2_1) dec1_2 = self.dec1(dec2_2) dec0_1 = self.dec0(dec1_1) dec0_2 = self.dec0(dec1_2) return self.final(dec0_1), self.final(dec0_2)
def decoder(self, z): h1= F.elu(self.d1(z)) h1= h1.view(h1.size(0), 256, 1, 1) h2= F.elu(self.conv1(h1)) h3=self.upsample(h2) h4= F.elu(self.conv2(h3)) h5= self.upsample(h4) h6= F.elu(self.conv3(h5)) h7= self.conv4(h6) h10= F.sigmoid(h7) return h10
avg_train_loss.append(np.mean(train_epoch_loss)) self.writer.add_scalar('loss/training', avg_train_loss[-1], epoch+1) logger.info('==> Validation ..') # validation with torch.no_grad(): # turn off gradient calc model.eval() # evaluation mode y_true, y_pred = [], [] for step, (images, labels) in enumerate(progress_bar(validloader)): # one-hot encode labels labels = labels.unsqueeze(-1) targets = torch.zeros(labels.size(0), model.n_classes).scatter_(1, labels, 1.) inputs, targets = images.to(self.device), targets.to(self.device) outputs = model(inputs) loss = criterion(outputs, targets) preds = F.sigmoid(outputs) # get the sigmoid from raw logits # pred = torch.argmax(output, dim=1) y_true.extend(targets.cpu().detach().numpy().tolist()) y_pred.extend(preds.cpu().detach().numpy().tolist()) valid_loss += loss.item() valid_epoch_loss.append(loss.item()) # self.writer.add_scalar('Valid loss', loss.item(), step + 1) # ===================log======================== if (step + 1) % 50 == 0: logger.info( "step: [{}/{}], epochs: [{}/{}], validation loss: {:.4f}".format( step + 1, len(validloader), epoch + 1, self.epochs, valid_loss/50,
def forward(self, inputs): x = inputs x = self.arch(x) # x = self.Linear(x) x = F.sigmoid(x) return x
def forward(self, x): return x * F.sigmoid(x)