def test(model, validloader, criterion_label, use_gpu, test_batch): batch_time = AverageMeter() model.eval() hidden01, hidden02, hidden03 = model.init_hidden(test_batch) test_loss_list = [] with torch.no_grad(): # no bp for batch_idx, (batch_data, labels) in enumerate(validloader): if use_gpu: batch_data, labels = batch_data.cuda(), labels.cuda() end = time.time() output_thetas, hidden1, hidden2, hidden3 = model( batch_data, hidden01, hidden02, hidden03) hidden01, hidden02, hidden03 = hidden1, hidden2, hidden3 theta_loss = criterion_label(output_thetas, labels) test_loss_list.append(theta_loss) batch_time.update(time.time() - end) squre = [i**2 for i in test_loss_list] loss = (sum(squre) / len(squre))**0.5 print("==> BatchTime(s)/BatchSize(json): {:.3f}/{}".format( batch_time.avg, args.test_batch)) print("Results ----------") print("label_loss: {:.3}".format(loss)) print("------------------") return loss
def _train_epoch(self, epoch): self.encoder.train() self.decoder.train() batch_time = AverageMeter() data_time = AverageMeter() losses_clsf = AverageMeter() losses_recons = AverageMeter() accs = AverageMeter() end = time.time() for batch_idx, (inputs, targets) in enumerate(self.train_loader): data_time.update(time.time() - end) if USE_GPU: inputs = inputs.cuda() targets = targets.cuda() bs = inputs.size(0) # === Forward outputs, fms = self.encoder(inputs, return_fm=True) img_recons = self.decoder(fms[3], scale_factor=self.decoder_scale_factor, out_size=self.decoder_output_size) if self.dsae: loss_recons = self.criterion_mse(img_recons, fms[2]) # reconstruction loss else: loss_recons = self.criterion_mse(img_recons, inputs) losses_recons.update(loss_recons.item(), bs) loss_clsf = self.criterion_ce(outputs, targets) # classification loss _, preds = outputs.max(dim=1) acc = preds.eq(targets).sum().float() / bs accs.update(acc.item(), bs) losses_clsf.update(loss_clsf.item(), bs) # === Backward loss_all = self.gamma * loss_clsf + (1 - self.gamma) * loss_recons self.optimizer.zero_grad() loss_all.backward() nn.utils.clip_grad_norm_(self.trainable_params, max_norm=5., norm_type=2) self.optimizer.step() # print(batch_idx, '; loss:', loss.item()) batch_time.update(time.time() - end) end = time.time() # Release CUDA memory torch.cuda.empty_cache() self.scheduler.step() acc_avg = accs.avg loss_c_avg = losses_clsf.avg loss_r_avg = losses_recons.avg self.writer.add_scalar('Loss/train/Classification', loss_c_avg, global_step=epoch) self.writer.add_scalar('Loss/train/Reconstruction', loss_r_avg, global_step=epoch) print( '-Train- Epoch: {}, Lr: {:.5f}, Time: {:.1f}s, Data: {:.1f}s, ' 'Loss(C|R): {:.4f} | {:.4f}, Acc: {:.2%}'.format( epoch, self.lr, batch_time.sum, data_time.sum, loss_c_avg, loss_r_avg, acc_avg))
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu): xent_losses = AverageMeter() htri_losses = AverageMeter() accs = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() model.train() for p in model.parameters(): p.requires_grad = True # open all layers end = time.time() for batch_idx, (imgs, pids, _, _) in enumerate(trainloader): data_time.update(time.time() - end) if use_gpu: imgs, pids = imgs.cuda(), pids.cuda() outputs, features = model(imgs) if isinstance(outputs, (tuple, list)): xent_loss = DeepSupervision(criterion_xent, outputs, pids) else: xent_loss = criterion_xent(outputs, pids) if isinstance(features, (tuple, list)): htri_loss = DeepSupervision(criterion_htri, features, pids) else: htri_loss = criterion_htri(features, pids) loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) xent_losses.update(xent_loss.item(), pids.size(0)) htri_losses.update(htri_loss.item(), pids.size(0)) accs.update(accuracy(outputs, pids)[0]) if (batch_idx + 1) % args.print_freq == 0: print('Epoch: [{0}/{1}][{2}/{3}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.4f} ({data_time.avg:.4f})\t' 'Xent {xent.val:.4f} ({xent.avg:.4f})\t' 'Htri {htri.val:.4f} ({htri.avg:.4f})\t' 'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format( epoch + 1, args.max_epoch, batch_idx + 1, len(trainloader), batch_time=batch_time, data_time=data_time, xent=xent_losses, htri=htri_losses, acc=accs)) end = time.time()
def test_vehicleid_formal(model, probeloader, galleryloader, train_query_loader, train_gallery_loader, use_gpu, test_batch, loss_type, euclidean_distance_loss, epoch, use_metric_cuhk03=False, ranks=[1, 5, 10, 20], return_distmat=False): batch_time = AverageMeter() model.eval() with torch.no_grad(): pf, p_pids, p_paths = [], [], [] for batch_idx, (imgs, pids, _, _, _, paths) in enumerate(probeloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() pf.append(features) p_pids.extend(pids) p_paths.extend(paths) pf = torch.cat(pf, 0) p_pids = np.asarray(p_pids) p_paths = np.asarray(p_paths) print("Extracted features for query set, obtained {}-by-{} matrix". format(pf.size(0), pf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, test_batch)) # result = {'query_f': qf.numpy(), # 'query_cam': q_camids, 'query_label': q_pids, 'quim_path': q_paths, # 'gallery_f': gf.numpy(), # 'gallery_cam': g_camids, 'gallery_label': g_pids, 'gaim_path': g_paths} # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '.mat'), result) # dist_mat_dict = {'dist_mat': distmat} # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '_dist.mat'), dist_mat_dict) print("Start computing CMC and mAP") start_time = time.time() cmc, mAP = cmc_vehicleid(pf.numpy(), p_pids, repeat=10, topk=50) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Evaluate test data time (h:m:s): {}.".format(elapsed)) print("Test data results ----------") print("Epoch {} temAP: {:.2%}".format(epoch, mAP)) print("CMC curve") for r in ranks: print("Epoch {} teRank-{:<3}: {:.2%}".format(epoch, r, cmc[r - 1])) print("------------------") return cmc[0], mAP
def test_PECvid(model,W,queryloader, galleryloader, train_query_loader, train_gallery_loader, test_batch, loss_type, euclidean_distance_loss, epoch, use_metric_cuhk03=False, ranks=[1, 5, 10, 20], return_distmat=False): batch_time = AverageMeter() model.eval() qf, q_pids, q_dirIDs, q_paths = [], [], [], [] for batch_idx, (imgs, _, pids ,q_dirID) in enumerate(queryloader): imgs = imgs.cuda() end = time.time() # features= model(imgs) batch_time.update(time.time() - end) # features = features.data.cpu() # qf.append(features) q_pids.extend(pids) q_dirIDs.extend(q_dirID) # qf = torch.cat(qf, 0) # torch.save(qf,'./vggqf') qf = torch.load('./STN_qf.pkl') q_pids = np.asarray(q_pids) q_dirIDs = np.asarray(q_dirIDs) # q_paths = np.asarray(q_paths) print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, test_batch)) gf, g_pids, g_dirIDs, g_paths = [], [], [], [] for batch_idx, (imgs, _, pids ,g_dirID) in enumerate(galleryloader): imgs = imgs.cuda() end = time.time() # features= model(imgs) batch_time.update(time.time() - end) # features = features.data.cpu() # gf.append(features) g_pids.extend(pids) g_dirIDs.extend(g_dirID) # gf = torch.cat(gf, 0) # torch.save(gf, './vgggf') gf = torch.load('./STN_gf.pkl') g_pids = np.asarray(g_pids) g_dirIDs = np.asarray(g_dirIDs) start_time = time.time() # cmc, mAP = cmc_common_oneshot_v2(qf.numpy(),part_qf, q_pids, gf.numpy(),part_gf, g_pids, repeat=1, topk=50) for i in range(200): i=i+200 w=i/200 print(str(w)) cmc, mAP = cmc_common_oneshot_v2(w,qf.cpu(), q_dirIDs, q_pids, gf.cpu(), g_dirIDs, g_pids, repeat=1, topk=50) # cmc, mAP = cmc_common_oneshot_v2(0.86, qf.cpu(), q_dirIDs, q_pids, gf.cpu(), g_dirIDs, g_pids, repeat=1, topk=50) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) return cmc[0], mAP
def train( epoch, model, criterion_label, optimizer, trainloader, use_gpu): losses = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() model.train() hidden01, hidden02, hidden03 = model.init_hidden(1) end = time.time() trainloader_len = len(trainloader) for batch_idx, (batch_data, labels) in enumerate(trainloader): # data_time.update(time.time() - end) batch_data = batch_data.transpose(0, 1) labels = labels.transpose(0, 1) if use_gpu: batch_data, labels = batch_data.cuda(), labels.cuda() # print("batch_data size is {}".format(batch_data.shape)) # [1,64,7] # print("label size is {}".format(labels.shape)) # [1,64] output_thetas, hidden1, hidden2, hidden3, l2_loss = model(batch_data, hidden01, hidden02, hidden03) theta_loss = criterion_label(output_thetas, labels) loss = theta_loss + l2_loss optimizer.zero_grad() loss.backward() optimizer.step() hidden01, hidden02, hidden03 = hidden1.data, hidden2.data, hidden3.data batch_time.update(time.time() - end) losses.update(loss.item()) if (batch_idx + 1) % args.print_freq == 0: print( 'Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.4f} ({data_time.avg:.4f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( epoch + 1, batch_idx + 1, trainloader_len, batch_time=batch_time, data_time=data_time, loss=losses)) end = time.time()
def _eval_session(self, session, use_centroid=False): assert len(self.eval_dataloaders) == session + 1 if use_centroid: self.memory.upd_centroids() accuracies = [] for sess in range(session + 1): eval_loader_sess = self.eval_dataloaders[sess] acc_sess = self._eval_epoch(epoch=None, eval_loader=eval_loader_sess, use_centroid=use_centroid) accuracies.append(acc_sess) acc_sum = AverageMeter() for sess in range(session + 1): acc = accuracies[sess] if sess == 0: n_cls = 60 # self.args.n_class else: n_cls = self.args.n_novel acc_sum.update(acc, n_cls) print('Session {} Evaluation. Overall Acc.: {}'.format(session, acc_sum.avg))
def evaluate(model, testloader, use_gpu, test_batch, test_mask): batch_time = AverageMeter() model.eval() hidden01, hidden02, hidden03 = model.init_hidden(test_batch) test_theta = [] theta_temp = [] with torch.no_grad(): # no bp for batch_idx, batch_data in enumerate(testloader): if use_gpu: batch_data = batch_data.cuda() end = time.time() output_thetas, hidden1, hidden2, hidden3 = model( batch_data, hidden01, hidden02, hidden03) hidden01, hidden02, hidden03 = hidden1, hidden2, hidden3 batch_time.update(time.time() - end) theta_temp.append(output_thetas.squeeze().item()) temp_index = 0 theta_len = len(theta_temp) for i, _ in enumerate(test_mask): if _: test_theta.append(theta_temp[temp_index]) temp_index += 1 else: # test_theta.append(0) if temp_index != theta_len: temp1 = theta_temp[temp_index - 1] temp2 = theta_temp[temp_index] temp = (temp1 + temp2) / 2 test_theta.append(temp) else: test_theta.append(0) continue return test_theta
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu): losses = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() model.train() end = time.time() for batch_idx, (imgs, pids, _) in enumerate(trainloader): data_time.update(time.time() - end) if use_gpu: imgs, pids = imgs.cuda(), pids.cuda() outputs, features = model(imgs) if args.htri_only: if isinstance(features, tuple): loss = DeepSupervision(criterion_htri, features, pids) else: loss = criterion_htri(features, pids) else: if isinstance(outputs, tuple): xent_loss = DeepSupervision(criterion_xent, outputs, pids) else: xent_loss = criterion_xent(outputs, pids) if isinstance(features, tuple): htri_loss = DeepSupervision(criterion_htri, features, pids) else: htri_loss = criterion_htri(features, pids) loss = xent_loss + htri_loss optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) losses.update(loss.item(), pids.size(0)) if (batch_idx + 1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.4f} ({data_time.avg:.4f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time, data_time=data_time, loss=losses)) end = time.time()
def train(epoch, model, criterion, optimizer, trainloader, use_gpu, freeze_bn=False): losses = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() model.train() if freeze_bn or args.freeze_bn: model.apply(set_bn_to_eval) end = time.time() for batch_idx, (imgs, pids, _) in enumerate(trainloader): data_time.update(time.time() - end) if use_gpu: imgs, pids = imgs.cuda(), pids.cuda() outputs = model(imgs) if isinstance(outputs, tuple): loss = DeepSupervision(criterion, outputs, pids) else: loss = criterion(outputs, pids) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) losses.update(loss.item(), pids.size(0)) if (batch_idx + 1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.4f} ({data_time.avg:.4f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time, data_time=data_time, loss=losses)) end = time.time()
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu): losses = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() model.train() end = time.time() for batch_idx, (imgs, pids, _) in enumerate(trainloader): data_time.update(time.time() - end) if use_gpu: imgs, pids = imgs.cuda(), pids.cuda() outputs, features = model(imgs) if args.htri_only: if isinstance(features, tuple): loss = DeepSupervision(criterion_htri, features, pids) else: loss = criterion_htri(features, pids) else: if isinstance(outputs, tuple): xent_loss = DeepSupervision(criterion_xent, outputs, pids) else: xent_loss = criterion_xent(outputs, pids) if isinstance(features, tuple): htri_loss = DeepSupervision(criterion_htri, features, pids) else: htri_loss = criterion_htri(features, pids) loss = xent_loss + htri_loss optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) losses.update(loss.item(), pids.size(0)) if (batch_idx+1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.4f} ({data_time.avg:.4f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time, data_time=data_time, loss=losses)) end = time.time()
def train(self, fixbase=False): """ Train the model for an epoch. :param fixbase: Is this a fixbase epoch? :return: Time of execution end. """ losses = AverageMeter() accs = AverageMeter() accs_atts = AverageMeter() self.model.train() for batch_idx, (imgs, labels, _) in enumerate(self.trainloader): if self.use_gpu: imgs, labels = imgs.cuda(), labels.cuda() outputs = self.model(imgs) loss = self.criterion(outputs, labels) self.optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0) self.optimizer.step() losses.update(loss.item(), labels.size(0)) acc, acc_atts = accuracy(self.criterion.logits(outputs), labels) accs.update(acc) accs_atts.update(acc_atts) if (batch_idx + 1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Loss {loss.avg:.4f}'.format(self.epoch + 1, batch_idx + 1, len(self.trainloader), loss=losses)) print('Epoch: [{0}][{1}/{2}]\t' 'Loss {loss.avg:.4f}'.format(self.epoch + 1, batch_idx + 1, len(self.trainloader), loss=losses)) return losses.avg
def _eval_epoch(self, epoch): self.encoder.eval() self.decoder.eval() accs = AverageMeter() losses_clsf = AverageMeter() losses_recons = AverageMeter() for batch_idx, (inputs, targets) in enumerate(self.eval_loader): # print('inputs:', inputs.shape) bs = inputs.size(0) if USE_GPU: inputs = inputs.cuda() targets = targets.cuda() outputs, fms = self.encoder(inputs, return_fm=True) # print('fm4:', fms[3].shape, '; fm3:', fms[2].shape) loss_clsf = self.criterion_ce(outputs, targets) _, preds = outputs.max(dim=1) acc = preds.eq(targets).sum().float() / bs accs.update(acc.item(), bs) losses_clsf.update(loss_clsf.item(), bs) img_recons = self.decoder(fms[3], scale_factor=self.decoder_scale_factor, out_size=self.decoder_output_size) # print('img recon shape:', img_recons.shape) if self.dsae: loss_recons = self.criterion_mse(img_recons, fms[2]) # reconstruction loss else: loss_recons = self.criterion_mse(img_recons, inputs) # reconstruction loss losses_recons.update(loss_recons, bs) acc_avg = accs.avg loss_c_avg = losses_clsf.avg loss_r_avg = losses_recons.avg self.writer.add_scalar('Loss/eval/Classification', loss_c_avg, global_step=epoch) self.writer.add_scalar('Loss/eval/Reconstruction', loss_r_avg, global_step=epoch) self.writer.add_scalar('Accuracy/eval', acc_avg, global_step=epoch) print('-Eval- Epoch: {}, Loss(C|R): {:.4f} | {:.4f}, Accuracy: {:.2%}'.format( epoch, loss_c_avg, loss_r_avg, acc_avg)) return acc_avg
def _eval_epoch(self, epoch, eval_loader, use_centroid=False): self.encoder.eval() self.memory.eval() self.decoder.eval() accs = AverageMeter() losses_clsf = AverageMeter() losses_recons = AverageMeter() for batch_idx, (inputs, targets) in enumerate(eval_loader): if USE_GPU: inputs = inputs.cuda() targets = targets.cuda() outputs, fms = self.encoder(inputs, return_fm=True) # print('outputs:', outputs.shape) # Decoder img_recons = self.decoder(fms[3], scale_factor=self.decoder_scale_factor, out_size=self.decoder_output_size) if self.dsae: loss_recons = self.mse_loss(img_recons, fms[2]) # reconstruction loss else: loss_recons = self.mse_loss(img_recons, inputs) preds, loss_memory = self.memory(outputs, targets, use_centroid=use_centroid) losses_clsf.update(loss_memory.item(), targets.size(0)) losses_recons.update(loss_recons.item(), targets.size(0)) acc = preds.eq(targets).sum().float() / targets.size(0) accs.update(acc.item(), targets.size(0)) acc_avg = accs.avg loss_c_avg = losses_clsf.avg loss_r_avg = losses_recons.avg if epoch is not None: self.writer.add_scalar('Loss/eval/Classification', loss_c_avg, global_step=epoch) self.writer.add_scalar('Loss/eval/Reconstruction', loss_r_avg, global_step=epoch) self.writer.add_scalar('Accuracy/eval', acc_avg, global_step=epoch) print('-Eval- Epoch: {}, Loss(C|R): {:.4f} | {:.4f}, Accuracy: {:.2%}'.format( epoch, loss_c_avg, loss_r_avg, acc_avg)) return acc_avg
def train(epoch, model, criterion, optimizer, trainloader, use_gpu, freeze_bn=False): losses = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() model.train() if freeze_bn or args.freeze_bn: model.apply(set_bn_to_eval) end = time.time() for batch_idx, (imgs, pids, _) in enumerate(trainloader): data_time.update(time.time() - end) if use_gpu: imgs, pids = imgs.cuda(), pids.cuda() outputs = model(imgs) if isinstance(outputs, tuple): loss = DeepSupervision(criterion, outputs, pids) else: loss = criterion(outputs, pids) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) losses.update(loss.item(), pids.size(0)) if (batch_idx+1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.4f} ({data_time.avg:.4f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time, data_time=data_time, loss=losses)) end = time.time()
def test(model, queryloader, galleryloader, train_query_loader, train_gallery_loader, use_gpu, test_batch, loss_type, euclidean_distance_loss, epoch, use_metric_cuhk03=False, ranks=[1, 5, 10, 20], return_distmat=False): batch_time = AverageMeter() model.eval() with torch.no_grad(): tqf, tq_pids, tq_camids = [], [], [] for batch_idx, (imgs, pids, _, _, camids, _) in enumerate(train_query_loader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() tqf.append(features) tq_pids.extend(pids) tq_camids.extend(camids) tqf = torch.cat(tqf, 0) tq_pids = np.asarray(tq_pids) tq_camids = np.asarray(tq_camids) print( "Extracted features for train_query set, obtained {}-by-{} matrix". format(tqf.size(0), tqf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, test_batch)) tgf, tg_pids, tg_camids = [], [], [] for batch_idx, (imgs, pids, _, _, camids, _) in enumerate(train_gallery_loader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() tgf.append(features) tg_pids.extend(pids) tg_camids.extend(camids) tgf = torch.cat(tgf, 0) tg_pids = np.asarray(tg_pids) tg_camids = np.asarray(tg_camids) print( "Extracted features for train_gallery set, obtained {}-by-{} matrix" .format(tgf.size(0), tgf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, test_batch)) print("Start compute distmat.") if loss_type in euclidean_distance_loss: m, n = tqf.size(0), tgf.size(0) distmat = torch.pow(tqf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(tgf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, tqf, tgf.t()) distmat = distmat.numpy() elif loss_type == 'angle': tvec_dot = torch.matmul(tqf, tgf.t()) tqf_len = tqf.norm(dim=1, keepdim=True) tgf_len = tgf.norm(dim=1, keepdim=True) tvec_len = torch.matmul(tqf_len, tgf_len.t()) + 1e-5 distmat = -torch.div(tvec_dot, tvec_len).numpy() else: raise KeyError("Unsupported loss: {}".format(loss_type)) print("Compute distmat done.") print("distmat shape:", distmat.shape) print("Start computing CMC and mAP") start_time = time.time() cmc, mAP = evaluate(distmat, tq_pids, tg_pids, tq_camids, tg_camids, use_metric_cuhk03=use_metric_cuhk03, use_cython=False) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Evaluate train data time (h:m:s): {}.".format(elapsed)) print("Train data results ----------") print("Epoch {} trmAP: {:.2%}".format(epoch, mAP)) print("CMC curve") for r in ranks: print("Epoch {} trRank-{:<3}: {:.2%}".format(epoch, r, cmc[r - 1])) print("------------------") with torch.no_grad(): qf, q_pids, q_camids, q_paths = [], [], [], [] for batch_idx, (imgs, pids, _, _, camids, paths) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) q_paths.extend(paths) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) q_paths = np.asarray(q_paths) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, test_batch)) gf, g_pids, g_camids, g_paths = [], [], [], [] for batch_idx, (imgs, pids, _, _, camids, paths) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) g_paths.extend(paths) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) g_paths = np.asarray(g_paths) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, test_batch)) print("Start compute distmat.") if loss_type in euclidean_distance_loss: m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() elif loss_type == 'angle': vec_dot = torch.matmul(qf, gf.t()) qf_len = qf.norm(dim=1, keepdim=True) gf_len = gf.norm(dim=1, keepdim=True) vec_len = torch.matmul(qf_len, gf_len.t()) + 1e-5 distmat = -torch.div(vec_dot, vec_len).numpy() else: raise KeyError("Unsupported loss: {}".format(loss_type)) print("Compute distmat done.") print("distmat shape:", distmat.shape) # result = {'query_f': qf.numpy(), # 'query_cam': q_camids, 'query_label': q_pids, 'quim_path': q_paths, # 'gallery_f': gf.numpy(), # 'gallery_cam': g_camids, 'gallery_label': g_pids, 'gaim_path': g_paths} # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '.mat'), result) # dist_mat_dict = {'dist_mat': distmat} # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '_dist.mat'), dist_mat_dict) print("Start computing CMC and mAP") start_time = time.time() cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=use_metric_cuhk03, use_cython=False) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Evaluate test data time (h:m:s): {}.".format(elapsed)) print("Test data results ----------") print("Epoch {} temAP: {:.2%}".format(epoch, mAP)) print("CMC curve") for r in ranks: print("Epoch {} teRank-{:<3}: {:.2%}".format(epoch, r, cmc[r - 1])) print("------------------") if return_distmat: return distmat return cmc[0], mAP
def train(self, epoch, max_epoch, writer, print_freq=10, fixbase_epoch=0, open_layers=None): losses = AverageMeter() accs = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() self.model.train() if (epoch + 1) <= fixbase_epoch and open_layers is not None: print('* Only train {} (epoch: {}/{})'.format( open_layers, epoch + 1, fixbase_epoch)) open_specified_layers(self.model, open_layers) else: open_all_layers(self.model) num_batches = len(self.train_loader) end = time.time() for batch_idx, data in enumerate(self.train_loader): data_time.update(time.time() - end) imgs, pids = self._parse_data_for_train(data) if self.use_gpu: imgs = imgs.cuda() pids = pids.cuda() self.optimizer.zero_grad() outputs = self.model(imgs) loss = self._compute_loss(self.criterion, outputs, pids) loss.backward() self.optimizer.step() batch_time.update(time.time() - end) losses.update(loss.item(), pids.size(0)) accs.update(accuracy(outputs, pids)[0].item()) if (batch_idx + 1) % print_freq == 0: # estimate remaining time eta_seconds = batch_time.avg * (num_batches - (batch_idx + 1) + (max_epoch - (epoch + 1)) * num_batches) eta_str = str(datetime.timedelta(seconds=int(eta_seconds))) print('Epoch: [{0}/{1}][{2}/{3}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc {acc.val:.2f} ({acc.avg:.2f})\t' 'Lr {lr:.6f}\t' 'eta {eta}'.format( epoch + 1, max_epoch, batch_idx + 1, num_batches, batch_time=batch_time, data_time=data_time, loss=losses, acc=accs, lr=self.optimizer.param_groups[0]['lr'], eta=eta_str)) if writer is not None: n_iter = epoch * num_batches + batch_idx writer.add_scalar('Train/Time', batch_time.avg, n_iter) writer.add_scalar('Train/Data', data_time.avg, n_iter) writer.add_scalar('Train/Loss', losses.avg, n_iter) writer.add_scalar('Train/Acc', accs.avg, n_iter) writer.add_scalar('Train/Lr', self.optimizer.param_groups[0]['lr'], n_iter) end = time.time() if self.scheduler is not None: self.scheduler.step()
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] end = time.time() for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch)) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1])) print("------------------") return cmc[0]
def testCATfeature(model1, model2, queryloader, galleryloader, test_batch, loss_type, euclidean_distance_loss, epoch, use_metric_cuhk03=False, ranks=[1, 5, 10, 20], return_distmat=False): batch_time = AverageMeter() model1.eval() model2.eval() with torch.no_grad(): tqf, tq_pids, tq_camids = [], [], [] for batch_idx, (imgs1, imgs2, _, pids, camids) in enumerate(queryloader): imgs1 = Variable(imgs1.cuda()) imgs2 = Variable(imgs2.cuda()) end = time.time() features1 = model1(imgs1) features2 = model2(imgs2) features = torch.cat((features1, features2), 1) #features = features1 batch_time.update(time.time() - end) features = features.data.cpu() tqf.append(features) tq_pids.extend(pids) tq_camids.extend(camids) tqf = torch.cat(tqf, 0) tq_pids = np.asarray(tq_pids) tq_camids = np.asarray(tq_camids) print( "Extracted features for train_query set, obtained {}-by-{} matrix". format(tqf.size(0), tqf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, test_batch)) tgf, tg_pids, tg_camids = [], [], [] for batch_idx, (imgs1, imgs2, _, pids, camids) in enumerate(galleryloader): imgs1 = imgs1.cuda() imgs2 = imgs2.cuda() end = time.time() features1 = model1(imgs1) features2 = model2(imgs2) features = torch.cat((features1, features2), 1) #features=features1 batch_time.update(time.time() - end) features = features.data.cpu() tgf.append(features) tg_pids.extend(pids) tg_camids.extend(camids) tgf = torch.cat(tgf, 0) tg_pids = np.asarray(tg_pids) tg_camids = np.asarray(tg_camids) print( "Extracted features for train_gallery set, obtained {}-by-{} matrix" .format(tgf.size(0), tgf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, test_batch)) print("Start compute distmat.") if loss_type in euclidean_distance_loss: m, n = tqf.size(0), tgf.size(0) distmat = torch.pow(tqf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(tgf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, tqf, tgf.t()) distmat = distmat.numpy() elif loss_type == 'angle': tvec_dot = torch.matmul(tqf, tgf.t()) tqf_len = tqf.norm(dim=1, keepdim=True) tgf_len = tgf.norm(dim=1, keepdim=True) tvec_len = torch.matmul(tqf_len, tgf_len.t()) + 1e-5 distmat = -torch.div(tvec_dot, tvec_len).numpy() else: raise KeyError("Unsupported loss: {}".format(loss_type)) print("Compute distmat done.") print("distmat shape:", distmat.shape) print("Start computing CMC and mAP") start_time = time.time() cmc, mAP = evaluate(distmat, tq_pids, tg_pids, tq_camids, tg_camids, use_metric_cuhk03=use_metric_cuhk03, use_cython=False) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Evaluate train data time (h:m:s): {}.".format(elapsed)) print("Train data results ----------") print("Epoch {} trmAP: {:.2%}".format(epoch, mAP)) print("CMC curve") for r in ranks: print("Epoch {} trRank-{:<3}: {:.2%}".format(epoch, r, cmc[r - 1])) print("------------------") if return_distmat: return distmat return cmc[0], mAP
def train(epoch, model, criterion_xent, criterion_htri, criterion_mask, criterion_split, criterion_cluster, optimizer, trainloader, use_gpu): xent_losses = AverageMeter() htri_losses = AverageMeter() mask_losses = AverageMeter() split_losses = AverageMeter() appearance_losses = AverageMeter() distance_losses = AverageMeter() accs = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() model.train() end = time.time() for batch_idx, (imgs, fore_masks, pids, _, _, split_param2, split_param3, _) in enumerate(trainloader): split_param2 = split_param2.long() split_param3 = split_param3.long() if use_gpu: imgs, pids = imgs.cuda(), pids.cuda() fore_masks = fore_masks.cuda() split_param2 = split_param2.cuda() #[bs, 4] split_param3 = split_param3.cuda() #[bs, 4] # measure data loading time data_time.update(time.time() - end) # zero the parameter gradients optimizer.zero_grad() # forward outputs, features, a_fore, part_logit, distance_list = model(imgs) # combine hard triplet loss with cross entropy loss xent_loss = criterion_xent(outputs, pids) htri_loss = criterion_htri(features, pids) loss = xent_loss + htri_loss mask_loss1 = criterion_mask(a_fore[0], fore_masks) mask_loss2 = criterion_mask(a_fore[1], fore_masks) mask_loss = (mask_loss1 + mask_loss2) * 0.5 split_loss1 = criterion_split(part_logit[0], split_param2) split_loss2 = criterion_split(part_logit[1], split_param3) split_loss = (split_loss1 + split_loss2) * 0.5 appearance_loss1 = criterion_cluster(distance_list[0][0]) appearance_loss2 = criterion_cluster(distance_list[1][0]) appearance_loss = (appearance_loss1 + appearance_loss2) * 0.5 distance_loss1 = criterion_cluster(distance_list[0][1]) distance_loss2 = criterion_cluster(distance_list[1][1]) distance_loss = (distance_loss1 + distance_loss2) * 0.5 total_loss = loss + args.alpha0 * mask_loss + args.alpha1 * split_loss + \ args.alpha2 * appearance_loss + args.alpha3 * distance_loss # backward + optimize total_loss.backward() optimizer.step() # statistics _, preds = torch.max(outputs.data, 1) accs.update( torch.sum(preds == pids.data).float() / pids.size(0), pids.size(0)) xent_losses.update(xent_loss.item(), pids.size(0)) htri_losses.update(htri_loss.item(), pids.size(0)) mask_losses.update(mask_loss.item(), pids.size(0)) split_losses.update(split_loss.item(), pids.size(0)) appearance_losses.update(appearance_loss.item(), pids.size(0)) distance_losses.update(distance_loss.item(), pids.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() print('Epoch{0} ' 'Time:{batch_time.sum:.1f}s ' 'Data:{data_time.sum:.1f}s ' 'xentLoss:{xent_loss.avg:.4f} ' 'triLoss:{tri_loss.avg:.4f} ' 'MaskLoss:{mask_loss.avg:.4f} ' 'SplitLoss:{split_loss.avg:.4f} ' 'AppearanceLoss:{appearance_loss.avg:.4f} ' 'DistanceLoss:{distance_loss.avg:.4f} ' 'Acc:{acc.avg:.2%} '.format(epoch + 1, batch_time=batch_time, data_time=data_time, xent_loss=xent_losses, tri_loss=htri_losses, mask_loss=mask_losses, split_loss=split_losses, appearance_loss=appearance_losses, distance_loss=distance_losses, acc=accs))
def _train_epoch(self, epoch, lr, train_loader, memory_replay=None, use_reparam=False): self.encoder.train() self.memory.train() self.decoder.train() batch_time = AverageMeter() data_time = AverageMeter() losses_clsf = AverageMeter() losses_recons = AverageMeter() accs = AverageMeter() end = time.time() for batch_idx, (inputs, targets) in enumerate(train_loader): # print('targets:', len(targets), targets[targets > 54]) data_time.update(time.time() - end) if USE_GPU: inputs = inputs.cuda() targets = targets.cuda() bs = inputs.shape[0] if len(inputs.shape) == 5: # episode, batch size = 1 inputs = inputs.squeeze(0) # [25, 3, 84, 84] targets = targets.squeeze(0) bs = inputs.shape[0] # === Encoder & Decoder forward outputs, fms = self.encoder(inputs, return_fm=True) img_recons = self.decoder(fms[3], scale_factor=self.decoder_scale_factor, out_size=self.decoder_output_size) if self.dsae: loss_recons = self.mse_loss(img_recons, fms[2]) # reconstruction loss else: loss_recons = self.mse_loss(img_recons, inputs) # === MemoryK forward # loss_e = torch.tensor(0.) preds, loss_memory = self.memory(outputs, targets) acc = preds.eq(targets).sum().float() / bs accs.update(acc.item(), bs) loss_all = self.gamma * loss_memory + (1 - self.gamma) * loss_recons self.optimizer_all.zero_grad() loss_all.backward() self.optimizer_all.step() if batch_idx % 90 == 0: print(batch_idx, '; memory loss:', loss_memory.item(), '; decoder loss:', loss_recons.item()) losses_clsf.update(loss_memory.item(), bs) losses_recons.update(loss_recons.item(), bs) batch_time.update(time.time() - end) end = time.time() # memory_replay = None if memory_replay is not None: with torch.no_grad(): m_inputs, m_targets = memory_replay # [nonempty, key_dim] # m_inputs = m_inputs.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 3, 3) # [nonempty, key_dim, 3, 3] m_inputs_aug = [] m_targets_aug = [] n_classes = 60 + self.args.n_novel * (self.cur_session - 1) rand_C_classes = np.random.choice(n_classes, self.args.n_novel, replace=False) for v in rand_C_classes: # rand_C_classes: # range(n_classes) for all classes m_inputs_v = m_inputs[torch.eq(m_targets, v)] # print('m_inputs_v:', m_inputs_v.shape) if use_reparam: # re-parameterize m_mean_v = m_inputs_v.mean(dim=0) m_std_v = m_inputs_v.std(dim=0) for i in range(self.args.n_shot * 2): v_aug = torch.normal(mean=m_mean_v, std=m_std_v) # print('v_aug:', v_aug.shape) m_inputs_aug.append(v_aug) m_targets_aug.append(v) else: # random sample n_v = m_inputs_v.size(0) if n_v == 0: continue for i in range(self.args.n_shot): rand_idxs = np.random.choice(n_v, 3, replace=True) rand_w = F.normalize(torch.rand([3]), p=1, dim=0) v_aug = (rand_w[0] * m_inputs_v[rand_idxs[0]] + rand_w[1] * m_inputs_v[rand_idxs[1]] + rand_w[2] * m_inputs_v[rand_idxs[2]]) m_inputs_aug.append(v_aug) m_targets_aug.append(v) m_inputs = torch.stack(m_inputs_aug, dim=0) m_inputs = m_inputs.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 3, 3) if self.need_norm: m_inputs = F.normalize(m_inputs, p=2, dim=1) m_targets = torch.tensor(m_targets_aug, dtype=torch.long) # Shuffle sfl_idxs = torch.randperm(m_inputs.size(0)) m_inputs = m_inputs[sfl_idxs] m_targets = m_targets[sfl_idxs] print('Memory replay size:', m_inputs.size(0)) m_inputs = self.decoder(m_inputs, scale_factor=self.decoder_scale_factor, out_size=self.decoder_output_size) batch_size = 128 n_sample = m_targets.size(0) n_batch = math.ceil(n_sample / batch_size) inputs = m_inputs.chunk(chunks=n_batch, dim=0) targets = m_targets.chunk(chunks=n_batch, dim=0) print('After chunk, inputs:', inputs[0].shape, '; targets:', targets[0].shape) m_train_loader = list(zip(inputs, targets)) for batch_idx, (inputs, targets) in enumerate(m_train_loader): data_time.update(time.time() - end) if USE_GPU: inputs = inputs.cuda() targets = targets.cuda() # === Encoder & Decoder forward outputs = self.encoder(inputs, return_fm=False, feed_fm=self.dsae) img_recons = self.decoder(outputs, scale_factor=self.decoder_scale_factor, out_size=self.decoder_output_size) loss_recons = self.mse_loss(img_recons, inputs) # === MemoryK forward preds, loss_memory = self.memory(outputs, targets, upd_memory=False) loss_all = self.gamma * loss_memory + (1 - self.gamma) * loss_recons self.optimizer_all.zero_grad() loss_all.backward() self.optimizer_all.step() acc_avg = accs.avg loss_c_avg = losses_clsf.avg loss_r_avg = losses_recons.avg self.writer.add_scalar('Loss/train/Classification', loss_c_avg, global_step=epoch) self.writer.add_scalar('Loss/train/Reconstruction', loss_r_avg, global_step=epoch) print( '-Train- Epoch: {}, Lr: {:.6f}, Time: {:.1f}s, Data: {:.1f}s, ' 'Loss(C|R): {:.4f} | {:.4f}, Acc: {:.2%}'.format( epoch, lr, batch_time.sum, data_time.sum, loss_c_avg, loss_r_avg, acc_avg))
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False): batch_time = AverageMeter() model.eval() with torch.no_grad(): query_process = ShowProcess(len(queryloader), "Done") qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): query_process.show_process() if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1))) gallery_process = ShowProcess(len(galleryloader), "Done") gf, g_pids, g_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): gallery_process.show_process() if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch)) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() print("Computing CMC and mAP") cmc, mAP = eval(distmat, q_pids, g_pids, q_camids, g_camids) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") if return_distmat: return distmat return cmc, mAP
def main(): #GENERAL torch.cuda.empty_cache() root = "/home/kuru/Desktop/veri-gms-master_noise/" train_dir = '/home/kuru/Desktop/veri-gms-master_noise/VeRispan/image_train/' source = {'verispan'} target = {'verispan'} workers = 4 height = 280 width = 280 train_size = 32 train_sampler = 'RandomSampler' #AUGMENTATION random_erase = True jitter = True aug = True #OPTIMIZATION opt = 'adam' lr = 0.0003 weight_decay = 5e-4 momentum = 0.9 sgd_damp = 0.0 nesterov = True warmup_factor = 0.01 warmup_method = 'linear' #HYPERPARAMETER max_epoch = 80 start = 0 train_batch_size = 8 test_batch_size = 100 #SCHEDULER lr_scheduler = 'multi_step' stepsize = [30, 60] gamma = 0.1 #LOSS margin = 0.3 num_instances = 4 lambda_tri = 1 #MODEL #arch = 'resnet101' arch='resnet101_ibn_a' no_pretrained = False #TEST SETTINGS load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet101_ibn_a.pth' #load_weights = None start_eval = 0 eval_freq = -1 #MISC use_gpu = True print_freq = 10 seed = 1 resume = '' save_dir = '/home/kuru/Desktop/veri-gms-master_noise/spanningtree_verinoise_101_stride2/' gpu_id = 0,1 vis_rank = True query_remove = True evaluate = False dataset_kwargs = { 'source_names': source, 'target_names': target, 'root': root, 'height': height, 'width': width, 'train_batch_size': train_batch_size, 'test_batch_size': test_batch_size, 'train_sampler': train_sampler, 'random_erase': random_erase, 'color_jitter': jitter, 'color_aug': aug } transform_kwargs = { 'height': height, 'width': width, 'random_erase': random_erase, 'color_jitter': jitter, 'color_aug': aug } optimizer_kwargs = { 'optim': opt, 'lr': lr, 'weight_decay': weight_decay, 'momentum': momentum, 'sgd_dampening': sgd_damp, 'sgd_nesterov': nesterov } lr_scheduler_kwargs = { 'lr_scheduler': lr_scheduler, 'stepsize': stepsize, 'gamma': gamma } use_gpu = torch.cuda.is_available() log_name = 'log_test.txt' if evaluate else 'log_train.txt' sys.stdout = Logger(osp.join(save_dir, log_name)) print('Currently using GPU ', gpu_id) cudnn.benchmark = True print('Initializing image data manager') dataset = init_imgreid_dataset(root='/home/kuru/Desktop/veri-gms-master_noise/', name='verispan') train = [] num_train_pids = 0 num_train_cams = 0 print(len( dataset.train)) for img_path, pid, camid, subid, countid in dataset.train: #print(img_path) path = img_path[56+6:90+6] #print(path) folder = path[1:4] #print(folder) pid += num_train_pids newidd=0 train.append((path, folder, pid, camid,subid,countid)) num_train_pids += dataset.num_train_pids num_train_cams += dataset.num_train_cams pid = 0 pidx = {} for img_path, pid, camid, subid, countid in dataset.train: path = img_path[56+6:90+6] folder = path[1:4] pidx[folder] = pid pid+= 1 sub=[] final=0 xx=dataset.train newids=[] print(train[0:2]) train2={} for k in range(0,770): for img_path, pid, camid, subid, countid in dataset.train: if k==pid: newid=final+subid sub.append(newid) #print(pid,subid,newid) newids.append(newid) train2[img_path]= newid #print(img_path, pid, camid, subid, countid, newid) final=max(sub) #print(final) print(len(newids),final) #train=train2 #print(train2) train3=[] for img_path, pid, camid, subid, countid in dataset.train: #print(img_path,pid,train2[img_path]) path = img_path[56:90+6] #print(path) folder = path[1:4] newid=train2[img_path] #print((path, folder, pid, camid, subid, countid,newid )) train3.append((path, folder, pid, camid, subid, countid,newid )) train = train3 path = '/home/kuru/Desktop/adhi/veri-final-draft-master_noise/gmsNoise776/' pkl = {} #pkl[0] = pickle.load('/home/kuru/Desktop/veri-gms-master/gms/620.pkl') entries = os.listdir(path) for name in entries: f = open((path+name), 'rb') ccc=(path+name) #print(ccc) if name=='featureMatrix.pkl': s = name[0:13] else: s = name[0:3] #print(s) #with open (ccc,"rb") as ff: # pkl[s] = pickle.load(ff) #print(pkl[s]) pkl[s] = pickle.load(f) f.close #print(len(pkl)) with open('cids.pkl', 'rb') as handle: b = pickle.load(handle) #print(b) with open('index.pkl', 'rb') as handle: c = pickle.load(handle) transform_t = train_transforms(**transform_kwargs) data_tfr = vdspan(pkl_file='index_veryspan_noise.pkl', dataset = train, root_dir='/home/kuru/Desktop/veri-gms-master_noise/VeRispan/image_train/', transform=transform_t) print("lllllllllllllllllllllllllllllllllllllllllllline 433") df2=[] data_tfr_old=data_tfr for (img,label,index,pid, cid,subid,countid,newid) in data_tfr : #print((img,label,index,pid, cid,subid,countid,newid) ) #print("datframe",(label)) #print(countid) if countid > 4 : #print(countid) df2.append((img,label,index,pid, cid,subid,countid,newid)) print("filtered final trainset length",len(df2)) data_tfr=df2 trainloader = DataLoader(data_tfr, sampler=None,batch_size=train_batch_size, shuffle=True, num_workers=workers,pin_memory=True, drop_last=True) #data_tfr = vd(pkl_file='index.pkl', dataset = train, root_dir=train_dir,transform=transforms.Compose([Rescale(64),RandomCrop(32),ToTensor()])) #dataloader = DataLoader(data_tfr, batch_size=batch_size, shuffle=False, num_workers=0) for batch_idx, (img,label,index,pid, cid,subid,countid,newid) in enumerate(trainloader): #print("trainloader",batch_idx, (label,index,pid, cid,subid,countid,newid)) print("trainloader",batch_idx, (label)) break print('Initializing test data manager') dm = ImageDataManager(use_gpu, **dataset_kwargs) testloader_dict = dm.return_dataloaders() print('Initializing model: {}'.format(arch)) model = models.init_model(name=arch, num_classes=num_train_pids, loss={'xent', 'htri'}, pretrained=not no_pretrained, last_stride =2 ) print('Model size: {:.3f} M'.format(count_num_param(model))) if load_weights is not None: print("weights loaded") load_pretrained_weights(model, load_weights) print(torch.cuda.device_count()) model = nn.DataParallel(model).cuda() if use_gpu else model optimizer = init_optimizer(model, **optimizer_kwargs) #optimizer = init_optimizer(model) scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs) criterion_xent = CrossEntropyLoss(num_classes=num_train_pids, use_gpu=use_gpu, label_smooth=True) criterion_htri = TripletLoss(margin=margin) ranking_loss = nn.MarginRankingLoss(margin = margin) if evaluate: print('Evaluate only') for name in target: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] _, distmat = test(model, queryloader, galleryloader, train_batch_size, use_gpu, return_distmat=True) if vis_rank: visualize_ranked_results( distmat, dm.return_testdataset_by_name(name), save_dir=osp.join(save_dir, 'ranked_results', name), topk=20 ) return time_start = time.time() ranklogger = RankLogger(source, target) print('=> Start training') data_index = search(pkl) print(len(data_index)) for epoch in range(start, max_epoch): losses = AverageMeter() #xent_losses = AverageMeter() htri_losses = AverageMeter() accs = AverageMeter() batch_time = AverageMeter() xent_losses=AverageMeter() model.train() for p in model.parameters(): p.requires_grad = True # open all layers end = time.time() for batch_idx, (img,label,index,pid, cid,subid,countid,newid) in enumerate(trainloader): trainX, trainY = torch.zeros((train_batch_size*3,3,height, width), dtype=torch.float32), torch.zeros((train_batch_size*3), dtype = torch.int64) #pids = torch.zeros((batch_size*3), dtype = torch.int16) for i in range(train_batch_size): #print("dfdsfs") labelx = label[i] indexx = index[i] cidx = pid[i] if indexx >len(pkl[labelx])-1: indexx = len(pkl[labelx])-1 #maxx = np.argmax(pkl[labelx][indexx]) a = pkl[labelx][indexx] minpos = np.argmin(ma.masked_where(a==0, a)) #print(minpos) #print(np.array(data_index).shape) #print(data_index[cidx][1]) pos_dic = data_tfr_old[data_index[cidx][1]+minpos] neg_label = int(labelx) while True: neg_label = random.choice(range(1, 770)) #print(neg_label) if neg_label is not int(labelx) and os.path.isdir(os.path.join('/home/kuru/Desktop/adiusb/veri-split/train', strint(neg_label))) is True: break negative_label = strint(neg_label) neg_cid = pidx[negative_label] neg_index = random.choice(range(0, len(pkl[negative_label]))) neg_dic = data_tfr_old[data_index[neg_cid][1]+neg_index] trainX[i] = img[i] trainX[i+train_batch_size] = pos_dic[0] trainX[i+(train_batch_size*2)] = neg_dic[0] trainY[i] = cidx trainY[i+train_batch_size] = pos_dic[3] trainY[i+(train_batch_size*2)] = neg_dic[3] trainX = trainX.cuda() trainY = trainY.cuda() outputs, features = model(trainX) xent_loss = criterion_xent(outputs[0:train_batch_size], trainY[0:train_batch_size]) htri_loss = criterion_htri(features, trainY) #tri_loss = ranking_loss(features) #ent_loss = xent_loss(outputs[0:batch_size], trainY[0:batch_size], num_train_pids) loss = htri_loss+xent_loss optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) losses.update(loss.item(), trainY.size(0)) htri_losses.update(htri_loss.item(), trainY.size(0)) xent_losses.update(xent_loss.item(), trainY.size(0)) accs.update(accuracy(outputs[0:train_batch_size], trainY[0:train_batch_size])[0]) if (batch_idx) % 50 == 0: print('Train ', end=" ") print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'TriLoss {loss.val:.4f} ({loss.avg:.4f})\t' 'XLoss {xloss.val:.4f} ({xloss.avg:.4f})\t' 'OveralLoss {oloss.val:.4f} ({oloss.avg:.4f})\t' 'Acc {acc.val:.2f} ({acc.avg:.2f})\t' 'lr {lrrr} \t'.format( epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time, loss = htri_losses, xloss = xent_losses, oloss = losses, acc=accs , lrrr=lrrr, )) end = time.time() scheduler.step() print('=> Test') for name in target: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] rank1, distmat = test(model, queryloader, galleryloader, test_batch_size, use_gpu) ranklogger.write(name, epoch + 1, rank1) rank2, distmat2 = test_rerank(model, queryloader, galleryloader, test_batch_size, use_gpu) ranklogger.write(name, epoch + 1, rank2) #if (epoch + 1) == max_epoch: if (epoch + 1) % 2 == 0: print('=> Test') for name in target: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] rank1, distmat = test(model, queryloader, galleryloader, test_batch_size, use_gpu) ranklogger.write(name, epoch + 1, rank1) # if vis_rank: # visualize_ranked_results( # distmat, dm.return_testdataset_by_name(name), # save_dir=osp.join(save_dir, 'ranked_results', name), # topk=20) save_checkpoint({ 'state_dict': model.state_dict(), 'rank1': rank1, 'epoch': epoch + 1, 'arch': arch, 'optimizer': optimizer.state_dict(), }, save_dir)
def run(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print('Extracted features for query set, obtained {}-by-{} matrix'. format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print('Extracted features for gallery set, obtained {}-by-{} matrix'. format(gf.size(0), gf.size(1))) print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format( batch_time.avg, args.test_batch_size)) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() return distmat, q_pids, g_pids, q_camids, g_camids
def main(): #GENERAL root = "/home/kuru/Desktop/veri-gms-master/" train_dir = '/home/kuru/Desktop/veri-gms-master/VeRispan/image_train/' source = {'veri'} target = {'veri'} workers = 2 height = 320 width = 320 train_sampler = 'RandomSampler' #AUGMENTATION random_erase = True jitter = True aug = True #OPTIMIZATION opt = 'adam' lr = 0.0003 weight_decay = 5e-4 momentum = 0.9 sgd_damp = 0.0 nesterov = True warmup_factor = 0.01 warmup_method = 'linear' #HYPERPARAMETER max_epoch = 80 start = 0 train_batch_size = 16 test_batch_size = 50 #SCHEDULER lr_scheduler = 'multi_step' stepsize = [30, 60] gamma = 0.1 #LOSS margin = 0.3 num_instances = 6 lambda_tri = 1 #MODEL arch = 'resnet101_ibn_a' no_pretrained = False #TEST SETTINGS load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet101_ibn_a.pth' #load_weights = None start_eval = 0 eval_freq = -1 #MISC use_gpu = True use_amp = True print_freq = 50 seed = 1 resume = '' save_dir = '/home/kuru/Desktop/veri-gms-master/logapex/' gpu_id = 0 vis_rank = True query_remove = True evaluate = False dataset_kwargs = { 'source_names': source, 'target_names': target, 'root': root, 'height': height, 'width': width, 'train_batch_size': train_batch_size, 'test_batch_size': test_batch_size, 'train_sampler': train_sampler, 'random_erase': random_erase, 'color_jitter': jitter, 'color_aug': aug } transform_kwargs = { 'height': height, 'width': width, 'random_erase': random_erase, 'color_jitter': jitter, 'color_aug': aug } optimizer_kwargs = { 'optim': opt, 'lr': lr, 'weight_decay': weight_decay, 'momentum': momentum, 'sgd_dampening': sgd_damp, 'sgd_nesterov': nesterov } lr_scheduler_kwargs = { 'lr_scheduler': lr_scheduler, 'stepsize': stepsize, 'gamma': gamma } use_gpu = torch.cuda.is_available() log_name = 'log_test.txt' if evaluate else 'log_train.txt' sys.stdout = Logger(osp.join(save_dir, log_name)) print('Currently using GPU ', gpu_id) cudnn.benchmark = True print('Initializing image data manager') dataset = init_imgreid_dataset(root='/home/kuru/Desktop/veri-gms-master/', name='veri') train = [] num_train_pids = 0 num_train_cams = 0 for img_path, pid, camid in dataset.train: path = img_path[52:77] #print(path) folder = path[1:4] pid += num_train_pids camid += num_train_cams train.append((path, folder, pid, camid)) num_train_pids += dataset.num_train_pids num_train_cams += dataset.num_train_cams pid = 0 pidx = {} for img_path, pid, camid in dataset.train: path = img_path[52:77] folder = path[1:4] pidx[folder] = pid pid += 1 path = '/home/kuru/Desktop/veri-gms-master/gms/' pkl = {} entries = os.listdir(path) for name in entries: f = open((path + name), 'rb') if name == 'featureMatrix.pkl': s = name[0:13] else: s = name[0:3] pkl[s] = pickle.load(f) f.close transform_t = train_transforms(**transform_kwargs) data_tfr = vd( pkl_file='index.pkl', dataset=train, root_dir='/home/kuru/Desktop/veri-gms-master/VeRi/image_train/', transform=transform_t) trainloader = DataLoader(data_tfr, sampler=None, batch_size=train_batch_size, shuffle=True, num_workers=workers, pin_memory=False, drop_last=True) #data_tfr = vd(pkl_file='index.pkl', dataset = train, root_dir=train_dir,transform=transforms.Compose([Rescale(64),RandomCrop(32),ToTensor()])) #dataloader = DataLoader(data_tfr, batch_size=batch_size, shuffle=False, num_workers=0) print('Initializing test data manager') dm = ImageDataManager(use_gpu, **dataset_kwargs) testloader_dict = dm.return_dataloaders() print('Initializing model: {}'.format(arch)) model = models.init_model(name=arch, num_classes=num_train_pids, loss={'xent', 'htri'}, last_stride=1, pretrained=not no_pretrained, use_gpu=use_gpu) print('Model size: {:.3f} M'.format(count_num_param(model))) if load_weights is not None: print("weights loaded") load_pretrained_weights(model, load_weights) model = (model).cuda() if use_gpu else model #model = nn.DataParallel(model).cuda() if use_gpu else model optimizer = init_optimizer(model, **optimizer_kwargs) #optimizer = init_optimizer(model) model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=True, loss_scale="dynamic") model = nn.DataParallel(model).cuda() if use_gpu else model scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs) criterion_xent = CrossEntropyLoss(num_classes=num_train_pids, use_gpu=use_gpu, label_smooth=True) criterion_htri = TripletLoss(margin=margin) ranking_loss = nn.MarginRankingLoss(margin=margin) if evaluate: print('Evaluate only') for name in target: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] _, distmat = test(model, queryloader, galleryloader, train_batch_size, use_gpu, return_distmat=True) if vis_rank: visualize_ranked_results(distmat, dm.return_testdataset_by_name(name), save_dir=osp.join( save_dir, 'ranked_results', name), topk=20) return time_start = time.time() ranklogger = RankLogger(source, target) print('=> Start training') data_index = search(pkl) for epoch in range(start, max_epoch): losses = AverageMeter() #xent_losses = AverageMeter() htri_losses = AverageMeter() accs = AverageMeter() batch_time = AverageMeter() model.train() for p in model.parameters(): p.requires_grad = True # open all layers end = time.time() for batch_idx, (img, label, index, pid, cid) in enumerate(trainloader): trainX, trainY = torch.zeros( (train_batch_size * 3, 3, height, width), dtype=torch.float32), torch.zeros((train_batch_size * 3), dtype=torch.int64) #pids = torch.zeros((batch_size*3), dtype = torch.int16) for i in range(train_batch_size): labelx = str(label[i]) indexx = int(index[i]) cidx = int(pid[i]) if indexx > len(pkl[labelx]) - 1: indexx = len(pkl[labelx]) - 1 #maxx = np.argmax(pkl[labelx][indexx]) a = pkl[labelx][indexx] minpos = np.argmax(ma.masked_where(a == 0, a)) pos_dic = data_tfr[data_index[cidx][1] + minpos] neg_label = int(labelx) while True: neg_label = random.choice(range(1, 770)) if neg_label is not int(labelx) and os.path.isdir( os.path.join( '/home/kuru/Desktop/adiusb/veri-split/train', strint(neg_label))) is True: break negative_label = strint(neg_label) neg_cid = pidx[negative_label] neg_index = random.choice(range(0, len(pkl[negative_label]))) neg_dic = data_tfr[data_index[neg_cid][1] + neg_index] trainX[i] = img[i] trainX[i + train_batch_size] = pos_dic[0] trainX[i + (train_batch_size * 2)] = neg_dic[0] trainY[i] = cidx trainY[i + train_batch_size] = pos_dic[3] trainY[i + (train_batch_size * 2)] = neg_dic[3] #print("anc",labelx,'posdic', pos_dic[1],pos_dic[2],'negdic', neg_dic[1],neg_dic[2]) trainX = trainX.cuda() trainY = trainY.cuda() outputs, features = model(trainX) xent_loss = criterion_xent(outputs[0:train_batch_size], trainY[0:train_batch_size]) htri_loss = criterion_htri(features, trainY) #tri_loss = ranking_loss(features) #ent_loss = xent_loss(outputs[0:batch_size], trainY[0:batch_size], num_train_pids) loss = htri_loss + xent_loss optimizer.zero_grad() if use_amp: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() #loss.backward() optimizer.step() batch_time.update(time.time() - end) losses.update(loss.item(), trainY.size(0)) htri_losses.update(htri_loss.item(), trainY.size(0)) accs.update( accuracy(outputs[0:train_batch_size], trainY[0:train_batch_size])[0]) if (batch_idx) % print_freq == 0: print('Train ', end=" ") print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format( epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time, loss=htri_losses, acc=accs)) end = time.time() scheduler.step() print('=> Test') for name in target: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] rank1, distmat = test(model, queryloader, galleryloader, test_batch_size, use_gpu) ranklogger.write(name, epoch + 1, rank1) rank2, distmat2 = test_rerank(model, queryloader, galleryloader, test_batch_size, use_gpu) ranklogger.write(name, epoch + 1, rank2) del queryloader del galleryloader del distmat #print(torch.cuda.memory_allocated(),torch.cuda.memory_cached()) torch.cuda.empty_cache() if (epoch + 1) == max_epoch: #if (epoch + 1) % 10 == 0: print('=> Test') save_checkpoint( { 'state_dict': model.state_dict(), 'rank1': rank1, 'epoch': epoch + 1, 'arch': arch, 'optimizer': optimizer.state_dict(), }, save_dir) if vis_rank: visualize_ranked_results(distmat, dm.return_testdataset_by_name(name), save_dir=osp.join( save_dir, 'ranked_results', name), topk=20)
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() # print('features', features.size(), features) qf.append(features) # print('qf', len(qf)) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] end = time.time() for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, args.test_batch)) # print('qf', qf.size(), qf) # print('gf', gf.size(), gf) m, n = qf.size(0), gf.size(0) # print('m,n',m,n) # print('qf', qf.size(), qf) # qf_pow = torch.pow(qf, 2) # # print('qf_pow', qf_pow.size(), qf_pow) # qf_sum = qf_pow.sum(dim=1, keepdim=True) # # print('qf_sum', qf_sum.size(), qf_sum) # qf_exp = qf_sum.expand(m, n) # # print('qf_exp', qf_exp.size(), qf_exp) # # print('gf', gf.size(), gf) # gf_pow = torch.pow(gf, 2) # # print('gf_pow', gf_pow.size(), gf_pow) # gf_sum = gf_pow.sum(dim=1, keepdim=True) # # print('gf_sum', gf_sum.size(), gf_sum) # gf_exp = gf_sum.expand(n, m) # # print('gf_exp', gf_exp.size(), gf_exp) # gf_t = gf_exp.t() # print('gf_t', gf_t.size(), gf_t) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() # distmat = qf_exp + gf_t # # print('distmat', distmat.size(), distmat) # # print('mm', distmat.size, qf.size, gf.size, gf.t().size) # qf = qf.numpy() # gf = gf.numpy() # mm = np.dot(qf, gf.T) # # mm = torch.mm(qf, gf.t()) # print('mm', mm.shape, distmat.shape, qf.shape, gf.T.shape) # distmat = distmat.numpy() + mm*(-2) distmat.addmm_(1, -2, qf, gf.t()) # print('distmat', distmat.shape, distmat) distmat = distmat.numpy() print("Computing CMC and mAP") # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, dataset_type=args.dataset) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") return cmc[0]
def main(): #GENERAL torch.cuda.empty_cache() root = "/home/kuru/Desktop/veri-gms-master/" train_dir = '/home/kuru/Desktop/veri-gms-master/VeRispan/image_train/' source = {'verispan'} target = {'verispan'} workers = 4 height = 320 width = 320 train_sampler = 'RandomSampler' #AUGMENTATION random_erase = True jitter = True aug = True #OPTIMIZATION opt = 'adam' lr = 0.001 weight_decay = 5e-4 momentum = 0.9 sgd_damp = 0.0 nesterov = True warmup_factor = 0.01 warmup_method = 'linear' STEPS = (30, 60) GAMMA = 0.1 WARMUP_FACTOR = 0.01 WARMUP_EPOCHS = 10 WARMUP_METHOD = 'linear' #HYPERPARAMETER max_epoch = 80 start = 0 train_batch_size = 16 test_batch_size = 50 #SCHEDULER lr_scheduler = 'multi_step' stepsize = [30, 60] gamma = 0.1 #LOSS margin = 0.3 num_instances = 4 lambda_tri = 1 #MODEL #arch = 'resnet101' arch = 'resnet50_ibn_a' no_pretrained = False #TEST SETTINGS #load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet101_ibn_a.pth' #load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet101_ibn_a.pth' load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet50_ibn_a.pth' #load_weights = None start_eval = 0 eval_freq = -1 num_classes = 776 feat_dim = 2048 CENTER_LR = 0.5 CENTER_LOSS_WEIGHT = 0.0005 center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=True) optimizer_center = torch.optim.SGD(center_criterion.parameters(), lr=CENTER_LR) #MISC use_gpu = True #use_gpu = False print_freq = 10 seed = 1 resume = '' save_dir = '/home/kuru/Desktop/veri-gms-master_noise/spanningtree_veri_pure/' gpu_id = 0, 1 vis_rank = True query_remove = True evaluate = False dataset_kwargs = { 'source_names': source, 'target_names': target, 'root': root, 'height': height, 'width': width, 'train_batch_size': train_batch_size, 'test_batch_size': test_batch_size, 'train_sampler': train_sampler, 'random_erase': random_erase, 'color_jitter': jitter, 'color_aug': aug } transform_kwargs = { 'height': height, 'width': width, 'random_erase': random_erase, 'color_jitter': jitter, 'color_aug': aug } optimizer_kwargs = { 'optim': opt, 'lr': lr, 'weight_decay': weight_decay, 'momentum': momentum, 'sgd_dampening': sgd_damp, 'sgd_nesterov': nesterov } lr_scheduler_kwargs = { 'lr_scheduler': lr_scheduler, 'stepsize': stepsize, 'gamma': gamma } use_gpu = torch.cuda.is_available() log_name = 'log_test.txt' if evaluate else 'log_train.txt' sys.stdout = Logger(osp.join(save_dir, log_name)) print('Currently using GPU ', gpu_id) cudnn.benchmark = True print('Initializing image data manager') #dataset = init_imgreid_dataset(root='/home/kuru/Desktop/veri-gms-master/', name='veri') dataset = init_imgreid_dataset(root='/home/kuru/Desktop/veri-gms-master/', name='verispan') train = [] num_train_pids = 0 num_train_cams = 0 print(len(dataset.train)) for img_path, pid, camid, subid, countid in dataset.train: #print(img_path) path = img_path[56:90 + 6] #print(path) folder = path[1:4] #print(folder) #print(img_path, pid, camid,subid,countid) pid += num_train_pids camid += num_train_cams newidd = 0 train.append((path, folder, pid, camid, subid, countid)) #print(train) #break num_train_pids += dataset.num_train_pids num_train_cams += dataset.num_train_cams pid = 0 pidx = {} for img_path, pid, camid, subid, countid in dataset.train: path = img_path[56:90 + 6] folder = path[1:4] pidx[folder] = pid pid += 1 #print(pidx) sub = [] final = 0 xx = dataset.train newids = [] print(train[0:2]) train2 = {} for k in range(0, 770): for img_path, pid, camid, subid, countid in dataset.train: if k == pid: newid = final + subid sub.append(newid) #print(pid,subid,newid) newids.append(newid) train2[img_path] = newid #print(img_path, pid, camid, subid, countid, newid) final = max(sub) #print(final) print(len(newids), final) #train=train2 #print(train2) train3 = [] for img_path, pid, camid, subid, countid in dataset.train: #print(img_path,pid,train2[img_path]) path = img_path[56:90 + 6] #print(path) folder = path[1:4] newid = train2[img_path] #print((path, folder, pid, camid, subid, countid,newid )) train3.append((path, folder, pid, camid, subid, countid, newid)) train = train3 # for (path, folder, pid, camid, subid, countid,newid) in train: # print(path, folder) #path = '/home/kuru/Desktop/adhi/veri-final-draft-master_noise/gmsNoise776/' path = '/home/kuru/Desktop/veri-gms-master/gms/' pkl = {} #pkl[0] = pickle.load('/home/kuru/Desktop/veri-gms-master/gms/620.pkl') entries = os.listdir(path) for name in entries: f = open((path + name), 'rb') ccc = (path + name) #print(ccc) if name == 'featureMatrix.pkl': s = name[0:13] else: s = name[0:3] #print(s) #with open (ccc,"rb") as ff: # pkl[s] = pickle.load(ff) #print(pkl[s]) pkl[s] = pickle.load(f) f.close #print(len(pkl)) print('=> pickle indexing') data_index = search(pkl) print(len(data_index)) transform_t = train_transforms(**transform_kwargs) #print(train[0],train[10]) #data_tfr = vd(pkl_file='index.pkl', dataset = train, root_dir='/home/kuru/Desktop/veri-gms-master/VeRi/image_train/', transform=transform_t) data_tfr = vdspan( pkl_file='index_veryspan.pkl', dataset=train, root_dir='/home/kuru/Desktop/veri-gms-master/VeRispan/image_train/', transform=transform_t) #print(data_tfr) #print(trainloader) #data_tfr2=list(data_tfr) print("lllllllllllllllllllllllllllllllllllllllllllline 433") df2 = [] data_tfr_old = data_tfr for (img, label, index, pid, cid, subid, countid, newid) in data_tfr: #print((img,label,index,pid, cid,subid,countid,newid) ) #print("datframe",(label)) #print(countid) if countid > 4: #print(countid) df2.append((img, label, index, pid, cid, subid, countid, newid)) print("filtered final trainset length", len(df2)) data_tfr = df2 # with open('df2noise_ex.pkl', 'wb') as handle: # b = pickle.dump(df2, handle, protocol=pickle.HIGHEST_PROTOCOL) # with open('df2noise.pkl', 'rb') as handle: # df2 = pickle.load(handle) # data_tfr=df2 # for (img,label,index,pid, cid,subid,countid,newid) in data_tfr : # print("datframe",(label)) #data_tfr = vdspansort( dataset = train, root_dir='/home/kuru/Desktop/veri-gms-master_noise/VeRispan/image_train/', transform=transform_t) #trainloader = DataLoader(df2, sampler=None,batch_size=train_batch_size, shuffle=True, num_workers=workers,pin_memory=True, drop_last=True) trainloader = DataLoader(data_tfr, sampler=None, batch_size=train_batch_size, shuffle=True, num_workers=workers, pin_memory=True, drop_last=True) for batch_idx, (img, label, index, pid, cid, subid, countid, newid) in enumerate(trainloader): #print("trainloader",batch_idx, (label,index,pid, cid,subid,countid,newid)) print("trainloader", batch_idx, (label)) break print('Initializing test data manager') dm = ImageDataManager(use_gpu, **dataset_kwargs) testloader_dict = dm.return_dataloaders() print('Initializing model: {}'.format(arch)) model = models.init_model(name=arch, num_classes=num_train_pids, loss={'xent', 'htri'}, pretrained=not no_pretrained, last_stride=2) print('Model size: {:.3f} M'.format(count_num_param(model))) if load_weights is not None: print("weights loaded") load_pretrained_weights(model, load_weights) #checkpoint = torch.load('/home/kuru/Desktop/veri-gms-master/logg/model.pth.tar-19') #model._load_from_state_dict(checkpoint['state_dict']) #model.load_state_dict(checkpoint['state_dict']) #optimizer.load_state_dict(checkpoint['optimizer']) #print(checkpoint['epoch']) #print(checkpoint['rank1']) os.environ['CUDA_VISIBLE_DEVICES'] = '0' print(torch.cuda.device_count()) model = nn.DataParallel(model).cuda() if use_gpu else model optimizer = init_optimizer(model, **optimizer_kwargs) #optimizer = init_optimizer(model) #optimizer.load_state_dict(checkpoint['optimizer']) scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs) # scheduler = WarmupMultiStepLR(optimizer, STEPS, GAMMA, # WARMUP_FACTOR, # WARMUP_EPOCHS, WARMUP_METHOD) criterion_xent = CrossEntropyLoss(num_classes=num_train_pids, use_gpu=use_gpu, label_smooth=True) criterion_htri = TripletLoss(margin=margin) ranking_loss = nn.MarginRankingLoss(margin=margin) if evaluate: print('Evaluate only') for name in target: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] _, distmat = test(model, queryloader, galleryloader, train_batch_size, use_gpu, return_distmat=True) if vis_rank: visualize_ranked_results(distmat, dm.return_testdataset_by_name(name), save_dir=osp.join( save_dir, 'ranked_results', name), topk=20) return time_start = time.time() ranklogger = RankLogger(source, target) # # checkpoint = torch.load('/home/kuru/Desktop/market_all/ibna_model/model.pth.tar-79') # # model.load_state_dict(checkpoint['state_dict']) # # optimizer.load_state_dict(checkpoint['optimizer']) # # print(checkpoint['epoch']) # # start_epoch=checkpoint['epoch'] # # start=start_epoch # checkpoint = torch.load('/home/kuru/Desktop/veri-gms-master/spanningtreeveri/model.pth.tar-2') # model.load_state_dict(checkpoint['state_dict']) # optimizer.load_state_dict(checkpoint['optimizer']) # print(checkpoint['epoch']) # start_epoch=checkpoint['epoch'] # start=start_epoch ##start_epoch=resume_from_checkpoint('/home/kuru/Desktop/veri-gms-master/logg/model.pth.tar-20', model, optimizer=None) print('=> Start training') for epoch in range(start, max_epoch): print(epoch, scheduler.get_lr()[0]) #print( torch.cuda.memory_allocated(0)) losses = AverageMeter() #xent_losses = AverageMeter() htri_losses = AverageMeter() accs = AverageMeter() batch_time = AverageMeter() xent_losses = AverageMeter() model.train() for p in model.parameters(): p.requires_grad = True # open all layers end = time.time() for batch_idx, (img, label, index, pid, cid, subid, countid, newid) in enumerate(trainloader): trainX, trainY = torch.zeros( (train_batch_size * 3, 3, height, width), dtype=torch.float32), torch.zeros((train_batch_size * 3), dtype=torch.int64) #pids = torch.zeros((batch_size*3), dtype = torch.int16) #batchcount=0 for i in range(train_batch_size): if (countid[i] > 4): #batchcount=batchcount+1 #print("dfdsfs") labelx = label[i] indexx = index[i] cidx = pid[i] if indexx > len(pkl[labelx]) - 1: indexx = len(pkl[labelx]) - 1 #maxx = np.argmax(pkl[labelx][indexx]) a = pkl[labelx][indexx] minpos = np.argmin(ma.masked_where(a == 0, a)) # print(len(a)) # print(a) # print(ma.masked_where(a==0, a)) # print(labelx,index,pid,cidx,minpos) # print(np.array(data_index).shape) # print(data_index[cidx][1]) pos_dic = data_tfr_old[data_index[cidx][1] + minpos] #print('posdic', pos_dic) neg_label = int(labelx) while True: neg_label = random.choice(range(1, 770)) #print(neg_label) if neg_label is not int(labelx) and os.path.isdir( os.path.join( '/home/kuru/Desktop/veri-gms-master_noise/veriNoise_train_spanning_folder', strint(neg_label))) is True: break negative_label = strint(neg_label) neg_cid = pidx[negative_label] neg_index = random.choice( range(0, len(pkl[negative_label]))) #print(negative_label,neg_cid,neg_index,data_index[neg_cid] ) neg_dic = data_tfr_old[data_index[neg_cid][1] + neg_index] #print('negdic', neg_dic) trainX[i] = img[i] trainX[i + train_batch_size] = pos_dic[0] trainX[i + (train_batch_size * 2)] = neg_dic[0] trainY[i] = cidx trainY[i + train_batch_size] = pos_dic[3] trainY[i + (train_batch_size * 2)] = neg_dic[3] # trainY[i+train_batch_size] = pos_dic[7] # trainY[i+(train_batch_size*2)] = neg_dic[7] #break # else: # print("skiped",countid[i],subid[i],label[i]) #break #print(batchcount) trainX = trainX.cuda() trainY = trainY.cuda() outputs, features = model(trainX) xent_loss = criterion_xent(outputs[0:train_batch_size], trainY[0:train_batch_size]) htri_loss = criterion_htri(features, trainY) centerloss = CENTER_LOSS_WEIGHT * center_criterion( features, trainY) #tri_loss = ranking_loss(features) #ent_loss = xent_loss(outputs[0:batch_size], trainY[0:batch_size], num_train_pids) loss = htri_loss + xent_loss + centerloss loss = htri_loss + xent_loss optimizer.zero_grad() optimizer_center.zero_grad() loss.backward() optimizer.step() # for param in center_criterion.parameters(): # param.grad.data *= (1. /CENTER_LOSS_WEIGHT) # optimizer_center.step() for param_group in optimizer.param_groups: #print(param_group['lr'] ) lrrr = str(param_group['lr']) batch_time.update(time.time() - end) losses.update(loss.item(), trainY.size(0)) htri_losses.update(htri_loss.item(), trainY.size(0)) xent_losses.update(xent_loss.item(), trainY.size(0)) accs.update( accuracy(outputs[0:train_batch_size], trainY[0:train_batch_size])[0]) if (batch_idx) % 50 == 0: print('Train ', end=" ") print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'TriLoss {loss.val:.4f} ({loss.avg:.4f})\t' 'XLoss {xloss.val:.4f} ({xloss.avg:.4f})\t' 'OveralLoss {oloss.val:.4f} ({oloss.avg:.4f})\t' 'Acc {acc.val:.2f} ({acc.avg:.2f})\t' 'lr {lrrr} \t'.format( epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time, loss=htri_losses, xloss=xent_losses, oloss=losses, acc=accs, lrrr=lrrr, )) end = time.time() # del loss # del htri_loss # del xent_loss # del htri_losses # del losses # del outputs # del features # del accs # del trainX # del trainY scheduler.step() print('=> Test') save_checkpoint( { 'state_dict': model.state_dict(), #'rank1': rank1, 'epoch': epoch + 1, 'arch': arch, 'optimizer': optimizer.state_dict(), }, save_dir) GPUtil.showUtilization() print(torch.cuda.memory_allocated(), torch.cuda.memory_cached()) for name in target: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] rank1, distmat = test(model, queryloader, galleryloader, test_batch_size, use_gpu) ranklogger.write(name, epoch + 1, rank1) rank2, distmat2 = test_rerank(model, queryloader, galleryloader, test_batch_size, use_gpu) ranklogger.write(name, epoch + 1, rank2) del queryloader del galleryloader del distmat print(torch.cuda.memory_allocated(), torch.cuda.memory_cached()) torch.cuda.empty_cache() if (epoch + 1) == max_epoch: #if (epoch + 1) % 10 == 0: print('=> Test') save_checkpoint( { 'state_dict': model.state_dict(), 'rank1': rank1, 'epoch': epoch + 1, 'arch': arch, 'optimizer': optimizer.state_dict(), }, save_dir) for name in target: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] rank1, distmat = test(model, queryloader, galleryloader, test_batch_size, use_gpu) ranklogger.write(name, epoch + 1, rank1) # del queryloader # del galleryloader # del distmat if vis_rank: visualize_ranked_results( distmat, dm.return_testdataset_by_name(name), save_dir=osp.join(save_dir, 'ranked_results', name), topk=20)
def train(epoch, model, criterion, optimizer, trainloader, loss_type, print_freq, freeze_bn=False): losses = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() end = time.time() for batch_idx, image_labels in enumerate(trainloader): data_time.update(time.time() - end) imgs, pids, group_labels, _, _, _ = image_labels imgs, pids, group_labels = imgs.cuda(), pids.cuda(), group_labels.cuda( ) model.train() if freeze_bn: model.apply(set_bn_to_eval) outputs = model(imgs) if loss_type == 'xent': # loss_xent_p1 = criterion[0](outputs[0], pids) # loss_xent_p2 = criterion[1](outputs[1], pids) # loss = 0.5 * (loss_xent_p1 + loss_xent_p2) # loss = loss_xent_p1 + loss_xent_p2 loss_xent = [criterion(embed_clfy, pids) for embed_clfy in outputs] loss_xent = sum(loss_xent) / len(loss_xent) loss = 2.0 * loss_xent # loss = sum(loss_xent) / len(loss_xent) elif loss_type in [ 'xent_triplet', 'xent_tripletv2', 'xent_triplet_sqrt', 'xent_triplet_squa' ]: loss = criterion(outputs[0], outputs[1], pids, group_labels) else: raise KeyError("Unsupported loss: {}".format(loss_type)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) losses.update(loss.item(), pids.size(0)) if (batch_idx + 1) % print_freq == 0: print( 'Epoch: [{0}][{1}/{2}]\t' 'Train time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' 'Load data time {data_time.val:.4f}s ({data_time.avg:.4f}s)\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time, data_time=data_time, loss=losses)) end = time.time()
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu): xent_losses = AverageMeter( ) #AverageMeter():Computes and stores the average and current value htri_losses = AverageMeter() # .update(curr_value) accs = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() model.train() #设置为训练模式 for p in model.parameters(): p.requires_grad = True # open all layers 打开自动求导 end = time.time() for batch_idx, (imgs, pids, _, _) in enumerate(trainloader): #加载一个batch的训练图像 data_time.update(time.time() - end) if use_gpu: imgs, pids = imgs.cuda(), pids.cuda() # 前向计算 outputs, features = model( imgs ) #imgs -> y,v (https://github.com/KevinQian97/ELECTRICITY-MTMC/blob/ce5f173aabdc9ae6733ca36d1fdcfc53fa3d3d6e/identifier/models/resnet.py#L221) # 计算2种loss if isinstance(outputs, (tuple, list)): xent_loss = DeepSupervision(criterion_xent, outputs, pids) else: xent_loss = criterion_xent(outputs, pids) if isinstance(features, (tuple, list)): htri_loss = DeepSupervision(criterion_htri, features, pids) else: htri_loss = criterion_htri(features, pids) #综合loss=2种loss求和 loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss optimizer.zero_grad( ) #对于每个batch,梯度(loss关于weight的导数)置零,目的是清空上一步的残余更新参数值 loss.backward() #反向传播 optimizer.step() #更新参数 batch_time.update(time.time() - end) xent_losses.update(xent_loss.item(), pids.size(0)) #记录 htri_losses.update(htri_loss.item(), pids.size(0)) accs.update(accuracy(outputs, pids)[0]) #隔一段时间,打印训练状态 if (batch_idx + 1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.4f} ({data_time.avg:.4f})\t' 'Xent {xent.val:.4f} ({xent.avg:.4f})\t' 'Htri {htri.val:.4f} ({htri.avg:.4f})\t' 'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format( epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time, data_time=data_time, xent=xent_losses, htri=htri_losses, acc=accs)) end = time.time()
def test(model, queryloader, galleryloader, pool, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() b, s, c, h, w = imgs.size() imgs = imgs.view(b * s, c, h, w) end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.view(b, s, -1) if pool == 'avg': features = torch.mean(features, 1) else: features, _ = torch.max(features, 1) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() b, s, c, h, w = imgs.size() imgs = imgs.view(b * s, c, h, w) end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.view(b, s, -1) if pool == 'avg': features = torch.mean(features, 1) else: features, _ = torch.max(features, 1) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, args.test_batch * args.seq_len)) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") return cmc[0]
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): since = time.time() batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): end = time.time() if use_gpu: imgs = imgs.cuda() n, c, h, w = imgs.size() features = torch.FloatTensor(n, model.module.feat_dim).zero_() for i in range(2): if (i == 1): imgs = fliplr(imgs, use_gpu) f = model(imgs) f = f.data.cpu() features = features + f batch_time.update(time.time() - end) qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): end = time.time() if use_gpu: imgs = imgs.cuda() n, c, h, w = imgs.size() features = torch.FloatTensor(n, model.module.feat_dim).zero_() for i in range(2): if (i == 1): imgs = fliplr(imgs, use_gpu) f = model(imgs) f = f.data.cpu() features = features + f batch_time.update(time.time() - end) gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, args.test_batch)) m, n = qf.size(0), gf.size(0) distmat = torch.zeros((m, n)) if args.distance == 'euclidean': distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) else: q_norm = torch.norm(qf, p=2, dim=1, keepdim=True) g_norm = torch.norm(gf, p=2, dim=1, keepdim=True) qf = qf.div(q_norm.expand_as(qf)) gf = gf.div(g_norm.expand_as(gf)) distmat = -torch.mm(qf, gf.t()) distmat = distmat.numpy() print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") return cmc[0]
def test(model, queryloader, galleryloader, batch_size, use_gpu, ranks=[1, 5, 10], return_distmat=False): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print('Extracted features for query set, obtained {}-by-{} matrix'. format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print('Extracted features for gallery set, obtained {}-by-{} matrix'. format(gf.size(0), gf.size(1))) print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format( batch_time.avg, batch_size)) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() #distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3) print('Computing CMC and mAP') # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, target_names) cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, 10) print('Results ----------') print('mAP: {:.1%}'.format(mAP)) print('CMC curve') for r in ranks: print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1])) print('------------------') return cmc[0], distmat
queryloader = testloader_dict['query'] galleryloader = testloader_dict['test'] # distmat, q_pids, g_pids, q_camids, g_camids = run(model, queryloader, galleryloader, use_gpu, return_distmat=True) batch_time = AverageMeter() model.eval() with torch.no_grad(): gf, g_pids, g_camids = [], [], [] for batch_idx, (imgs, pids, camids, _) in enumerate(tqdm(galleryloader)): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print('Extracted features for gallery set, obtained {}-by-{} matrix'. format(gf.size(0), gf.size(1))) qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids, _) in enumerate(tqdm(queryloader)): if use_gpu: imgs = imgs.cuda() end = time.time()