def test2(clothes_type,net1_path, net2_path): global predicts test_sel = 'all' num_keypoints = len(FashionData.class_points[clothes_type]) normalize = transforms.Normalize(mean=FashionData.mean_std[clothes_type]['mean'], std=FashionData.mean_std[clothes_type]['std']) transform = transforms.Compose( [transforms.ToTensor(), normalize]) testSet = FashionData.fashionSet(selected=test_sel, classname=clothes_type, root='/home/wuxiaodong/fa/test/Images/', train='test', transform=transform) testloader = torch.utils.data.DataLoader(testSet, batch_size=40, shuffle=False,num_workers=2) net1 = kres152.get_kres152(state_dict=torch.load(net1_path, map_location=lambda storage, loc: storage),num_keypoints=num_keypoints) net2 = kres152.get_kres152(state_dict=torch.load(net2_path, map_location=lambda storage, loc: storage),num_keypoints=num_keypoints) net1.cuda() net2.cuda() net1.eval() net2.eval() for batch_idx, (data, label) in enumerate(testloader): batch_num = label.size()[0] batch_size = testloader.batch_size data = Variable(data.cuda(), volatile=True) output1 = net1(data)[:, :2*num_keypoints] output2 = net2(data)[:, :2*num_keypoints] output = output1 + output2 output = output.cpu().data for i in range(batch_num): image_id = 'Images/{}/{}'.format(clothes_type, testSet.selected_imgs[batch_idx*batch_size+i]) predict = {} for j in range(num_keypoints): point_name = FashionData.class_points[clothes_type][j] pre_new_x = output[i, 2*j] pre_new_y = output[i, 2*j+1] ori_resize = label[i, 0] ori_scale = label[i, 1] ori_left_pad = label[i, 2] ori_top_pad = label[i, 3] ori_x = int(((pre_new_x+0.5)*ori_resize - ori_left_pad)/ori_scale) ori_y = int(((pre_new_y+0.5)*ori_resize - ori_top_pad)/ori_scale) width = int((ori_resize-ori_left_pad)/ori_scale) height = int((ori_resize-ori_top_pad)/ori_scale) if ori_x<0 or ori_x>width: ori_x = int(width/2) if ori_y<0 or ori_y>height: ori_y = int(height/2) predict[point_name] = '{}_{}_{}'.format(ori_x, ori_y, 1) predicts[image_id]=predict
import torchvision.transforms as transforms from torch.autograd import Variable import kres152 import FashionData from train import * # losswei[1] must be 0 losswei = [1, 0] viswei = [0, 0, 0] LR = 0.01 postfix = 'stage2_dense161_' net1_path = "/home/wuxiaodong/fa/models/resnet152/snapshots/dense161__epoch280.pth" net1 = kres152.get_kres152(state_dict=torch.load(net1_path), num_keypoints=num_keypoints) net1.cuda() net2 = kres152.get_kres152(state_dict=torch.load(net1_path), num_keypoints=num_keypoints) net2.cuda() criterion = kres152.KEYPointLoss(weight=losswei, vis_weight=viswei, num_keypoints=num_keypoints).cuda() trainloader = torch.utils.data.DataLoader(trainSet, batch_size=20, shuffle=True, num_workers=2) valloader = torch.utils.data.DataLoader(valSet,
net.eval() for batch_idx, (data, label) in enumerate(valloader): data = Variable(data.cuda(), volatile=True) label = Variable(label.cuda()) output = net(data) # loss = criterion(output, label) loss = criterion(output[:, :2 * num_keypoints], label[:, :2 * num_keypoints]) loss_sum += loss.data[0] del loss, output, label print('test loss = {}'.format(loss_sum / batch_idx)) if __name__ == '__main__': net = kres152.get_kres152( num_keypoints=len(FashionData.class_points[clothes_type])) net.cuda() criterion = kres152.KEYPointLoss( weight=losswei, vis_weight=viswei, num_keypoints=len(FashionData.class_points[clothes_type])).cuda() for epoch in range(300): if epoch == 10: losswei = [1, 0.01] viswei = [10, 10, 1] LR = LR * 0.1 if epoch == 100: losswei = [1, 0.001] viswei = [10, 10, 1] LR = LR * 0.1
shuffle=False, num_workers=2) state = {} state['loss_window'] = [0] * 200 state['total_batch_num'] = 0 state['epoch'] = 0 state['loss_avg'] = 0.0 state['ne'] = 0.0 state['test_loss'] = 0.0 net1_path = "/home/wuxiaodong/fa/models/trousers/snapshots/trousers_e2estage2_net1_epoch60.pth" net2_path = "/home/wuxiaodong/fa/models/trousers/snapshots/trousers_e2estage2_net2_epoch60.pth" net1 = kres152.get_kres152(state_dict=torch.load( net1_path, map_location=lambda storage, loc: storage), num_keypoints=num_keypoints) net2 = kres152.get_kres152(state_dict=torch.load( net2_path, map_location=lambda storage, loc: storage), num_keypoints=num_keypoints) net1.cuda() net2.cuda() def train2(): net1.train() net2.train() optimizer = torch.optim.SGD([ { 'params': net2.parameters(), 'lr': LR