def forward(self, kernel, search): kernel1 = kernel[:, :, 4:11, 4:11] kernel1 = self.z_conv1(kernel1) search1 = self.x_conv1(search) search1 = F.pad(search1,(2,2,2,2)) feature1 = xcorr_depthwise(search1, kernel1) out1 = self.head1(feature1) out = [] out.append(out1) if cfg.TRAIN.STACK==1: return out kernel20 = self.z_conv2(kernel) kernel2 = kernel20[:, :, 4:11, 4:11] kernel2 = self.z_conv22(kernel2) search2 = self.x_conv2(feature1) search2 = F.pad(search2,(2,2,2,2)) feature2 = xcorr_depthwise(search2, kernel2) out2 = self.head2(feature2) out.append(out2) if cfg.TRAIN.STACK==2: return out kernel3 = self.z_conv3(kernel20) kernel3 = kernel3[:,:,4:11,4:11] kernel3 = self.z_conv32(kernel3) search3 = self.x_conv3(feature2) search3 = F.pad(search3,(2,2,2,2)) feature3 = xcorr_depthwise(search3, kernel3) out3 = self.head3(feature3) out.append(out3) return out
def forward(self, kernel, search): kernel = self.conv_kernel_SK(kernel) search = self.conv_search_SK(search) feature = xcorr_depthwise(search, kernel) out = self.head(feature) return out
def forward(self, kernel, search): kernel = self.conv_kernel(kernel) search = self.conv_search(search) # Head layer are always set to 1 * 1 convolution. feature = xcorr_depthwise(search, kernel) out = self.head(feature) return out
def forward(self, kernel, search): kernel = self.conv_kernel(kernel) search = self.conv_search(search) # print('kernel size: ',kernel.size()) # print('search size: ',search.size()) feature = xcorr_depthwise(search, kernel) out = self.head(feature) return out
def forward(self, kernel, search): kernel = self.conv_kernel(kernel) search = self.conv_search(search) # add the DSiam Trans here if self.TRANS: kernel = Trans(kernel, self.V, self.lr_v) search = Trans(search, self.U, self.lr_u) feature = xcorr_depthwise(search, kernel) out = self.head(feature) return out
def forward(self, kernel, search, frame_id=None, layer_id=None, branch=None): kernel = self.conv_kernel(kernel) search = self.conv_search(search) feature = xcorr_depthwise(search, kernel) out = self.head(feature) return out
def forward(self, kernel, search): kernel = self.conv_kernel(kernel) search = self.conv_search(search) feature = xcorr_depthwise(search, kernel) out0 = self.head(feature) if self.last_weights is None: self.last_weights = self.last_weights0.data self.last_bias = self.last_bias0.data return out0
def forward(self, kernel, search): kernel = self.conv_kernel(kernel) search = self.conv_search(search) feature = xcorr_depthwise(search, kernel) out = self.head(feature) if self.center_ness: center_ness = self.get_center_ness(feature) return out, center_ness if self.glide_vertex: glide_vertex = self.get_glide_vertex(feature) overlap_rate = self.get_overlap_rate(feature) return out, glide_vertex, overlap_rate return out
def forward(self, kernel, search): kernel = [ self.conv_kernel_(kernel_) for self.conv_kernel_, kernel_ in zip(self.conv_kernel, kernel) ] search = [ self.conv_search_(search_) for self.conv_search_, search_ in zip(self.conv_search, search) ] feature = [ xcorr_depthwise(search_, kernel_) for search_, kernel_ in zip(search, kernel) ] # depth_wise卷积 feature:[32,256,25,25] 不同深度特征结合 atts = [att(f) for att, f in zip(self.att_mods, feature) ] # atts:[32,1,25,25]*3 return atts
def forward(self, kernel, search): kernel = self.conv_kernel(kernel) search = self.conv_search(search) feature = xcorr_depthwise(search, kernel) if self.fused != 'none': raw = self.conv_raw(search) if self.fused == 'con': feature = torch.cat((feature, raw), dim=1) elif self.fused == 'mod': feature = torch.matmul(feature, raw) elif self.fused == 'avg': feature = feature + raw else: raise NotImplementedError() out = self.head(feature) return out
def forward(self, kernel, search): kernel = [ self.conv_kernel_(kernel_) for self.conv_kernel_, kernel_ in zip(self.conv_kernel, kernel) ] search = [ self.conv_search_(search_) for self.conv_search_, search_ in zip(self.conv_search, search) ] feature = [ xcorr_depthwise(search_, kernel_) for search_, kernel_ in zip(search, kernel) ] # depth_wise卷积 feature:[32,256,25,25] 不同深度特征结合 offs = [off(f) for off, f in zip(self.offs, feature)] # offs:[28,2,25,25]*3 return offs
def track(self, x): xf = self.backbone(x) if cfg.ADJUST.ADJUST: xf = self.neck(xf) if cfg.FCOS.TYPE == 'CARHead': feature = xcorr_depthwise(xf, self.zf) cls, cen, loc = self.fcos_head(feature) else: cls, cen, loc = self.fcos_head(self.zf, xf) return { 'cls': cls, 'cen': cen, 'loc': loc, # 'mask': mask if cfg.MASK.MASK else None }
def forward(self, data): """ only used in training """ template = data['template'].cuda() search = data['search'].cuda() label_cls = data['label_cls'].cuda() # 64 * 1 * 25 * 25 label_loc = data['bbox'].cuda() # 64 * 4 # print(label_cls) # get feature zf = self.backbone(template) xf = self.backbone(search) if cfg.ADJUST.ADJUST: zf = self.neck(zf) xf = self.neck(xf) if cfg.FCOS.TYPE == 'CARHead': feature = xcorr_depthwise(xf, zf) cls, cen, loc = self.fcos_head(feature) else: cls, cen, loc = self.fcos_head(zf, xf) locations = compute_locations(cls, 8) # print(locations) # # locations: 625 * 2 # print(locations.size()) # if cfg.FCOS.TYPE == 'CARHead': cls = self.log_softmax(cls) #batchsize * 1 * 25 * 25 * 2 cls_loss, loc_loss, cen_loss = self.loss_evaluator( locations, cls, loc, cen, label_cls, label_loc) outputs = {} outputs['total_loss'] = cfg.TRAIN.CLS_WEIGHT * cls_loss + \ cfg.TRAIN.LOC_WEIGHT * loc_loss + cfg.TRAIN.CEN_WEIGHT * cen_loss outputs['cls_loss'] = cls_loss outputs['loc_loss'] = loc_loss outputs['cen_loss'] = cen_loss return outputs
def forward(self, template: Tensor, search: Tensor) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: score = xcorr_depthwise(search, template) return score, self.position_embedding(score)
def forward(self, kernel_t, kernel_l, kernel_b, kernel_r, search): kernel_t_t = [ self.conv_kernel_t_(kernel_t_) for self.conv_kernel_t_, kernel_t_ in zip(self.conv_kernel_t, kernel_t) ] kernel_l_l = [ self.conv_kernel_l_(kernel_l_) for self.conv_kernel_l_, kernel_l_ in zip(self.conv_kernel_l, kernel_l) ] kernel_b_b = [ self.conv_kernel_b_(kernel_b_) for self.conv_kernel_b_, kernel_b_ in zip(self.conv_kernel_b, kernel_b) ] kernel_r_r = [ self.conv_kernel_r_(kernel_r_) for self.conv_kernel_r_, kernel_r_ in zip(self.conv_kernel_r, kernel_r) ] feature_t = [ xcorr_depthwise(search_, kernel_t) for search_, kernel_t in zip(search, kernel_t_t) ] # depth_wise卷积 feature:[32,256,25,25] 不同深度特征结合 feature_l = [ xcorr_depthwise(search_, kernel_l) for search_, kernel_l in zip(search, kernel_l_l) ] # depth_wise卷积 feature:[32,256,25,25] 不同深度特征结合 feature_b = [ xcorr_depthwise(search_, kernel_b) for search_, kernel_b in zip(search, kernel_b_b) ] # depth_wise卷积 feature:[32,256,25,25] 不同深度特征结合 feature_r = [ xcorr_depthwise(search_, kernel_r) for search_, kernel_r in zip(search, kernel_r_r) ] # depth_wise卷积 feature:[32,256,25,25] 不同深度特征结合 tl_modules = [ tl_mod_(f_t, f_l) for tl_mod_, f_t, f_l in zip(self.tl_modules, feature_t, feature_l) ] # [28,256,25,25]-->[28,256,25,25] br_modules = [ br_mod_(f_b, f_r) for br_mod_, f_b, f_r in zip(self.br_modules, feature_b, feature_r) ] tl_heats = [ tl_heat_(tl_mod) for tl_heat_, tl_mod in zip(self.tl_heats, tl_modules) ] # [28,256,25,25]--> [28,1,25,25]*3 br_heats = [ br_heat_(br_mod) for br_heat_, br_mod in zip(self.br_heats, br_modules) ] #tl_tags = [tl_tag_(tl_mod) for tl_tag_, tl_mod in zip(self.tl_tags, tl_modules)] # [5,1,64,64]*3 #br_tags = [br_tag_(br_mod) for br_tag_, br_mod in zip(self.br_tags, br_modules)] tl_offs = [ tl_off_(tl_mod) for tl_off_, tl_mod in zip(self.tl_offs, tl_modules) ] # [28,2,25,25] br_offs = [ br_off_(br_mod) for br_off_, br_mod in zip(self.br_offs, br_modules) ] # [28,2,25,25] #for feature_ in feature_t: # feature_ = feature_.squeeze() # feature_ = feature_.cpu() # feature_ = feature_.detach().numpy() # feature_ = cv2.normalize(feature_, None, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC3) # k = 182 # print(feature_[k, :, :]) #feature_[k,10,12] =1 #feature_[k,9,12] =1 # for i in range(0,25): # for j in range(0,25): # if feature_[k,i,j] <164: # feature_[k,i,j] =1 # print(feature_[k, :, :]) # plt.axis('off') # plt.imshow(feature_[k, :, :]) # plt.show() # print('yangkai') #cv2.imwrite("feature_.jpg", feature_) # save picture #time.sleep(1500) return tl_heats, br_heats, tl_offs, br_offs