def main(): global args global support_imgs, support_gt, support_imgs_fea args = parser.parse_args() with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) residual = Residual(k_nn=args.n) residual = residual.cuda() counter = CSRNet() counter = counter.cuda() # load counter params checkpoint = torch.load('./saved_models/countercheckpoint.pth.tar') counter.load_state_dict(checkpoint['state_dict_model']) # load residual regressor params checkpoint = torch.load('./saved_models/residualcheckpoint.pth.tar') residual.load_state_dict(checkpoint['state_dict_res']) support_imgs = checkpoint['support_imgs'].cuda() support_gt = checkpoint['support_gt'].cuda() counter(support_imgs) support_imgs_fea = counter.features app, res, final = validate(val_list, counter, residual)
def count(img): model = CSRNet() if C["cuda"]: model = model.cuda() pth = torch.load(C["pth"]) else: model = model.cpu() pth = torch.load(C["pth"], map_location="cpu") model.load_state_dict(pth["state_dict"]) # img = 255.0 * to_tensor(Image.open(img_path).convert("RGB")) # for i in range(3): # img[i,:,:] = img[i,:,:] + C["img_corr"][i] transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=C["mean"], std=C["std"]) ]) img = transform(img.convert("RGB")) if C["cuda"]: img = img.cuda() else: img = img.cpu() output = model(img.unsqueeze(0)).detach().cpu() dmap = np.asarray(output.reshape(output.shape[2], output.shape[3])) count = int(np.sum(dmap)) return count
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() # 将变量以标签-真值的字典形式存入args字典中 args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 400 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 with open(args.train_json, 'r') as outfile: # 打开训练集.json文件 train_list = json.load(outfile) ##得到训练集图片全部的地址,里面是以列表形式存储的所有图片的绝对路径 with open(args.test_json, 'r') as outfile: # 打开测试集.json文件 val_list = json.load(outfile) #得到验证集图片的全部地址 os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: ##预训练模型给出 if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, optimizer, epoch) prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) #mae的比较 print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint({ ##保存 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer' : optimizer.state_dict(), }, is_best,args.task)
def load_model(weight): # load model with trained weights print('Loading model.....') model = CSRNet() model = model.cuda() checkpoint = torch.load(weight) model.load_state_dict(checkpoint['state_dict']) model.eval() print('Loaded.') return model
def main(): global args print(args.path_testing_image) transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) img_paths = [] for img_path in glob.glob(os.path.join(args.path_testing_image, '*.png')): img_paths.append(img_path) model = CSRNet() #defining the model model = model.cuda() #loading the trained weights checkpoint = torch.load(args.best_model_csrnet_path) model.load_state_dict(checkpoint['state_dict']) img = transform(Image.open(args.path_testing_image).convert('RGB')).cuda() output = model(img.unsqueeze(0)) print("Predicted Count : ", int(output.detach().cpu().sum().numpy())) temp = np.asarray(output.detach().cpu().reshape( output.detach().cpu().shape[2], output.detach().cpu().shape[3])) plt.imshow(temp, cmap=c.jet) plt.axis('off') plt.savefig("predicted_dt.png", bbox_inches='tight') temp = h5py.File(args.path_testing_image.replace('.png', '.h5')) temp_1 = np.asarray(temp['density']) plt.imshow(temp_1, cmap=c.jet) print(" Original Count : ", int(np.sum(temp_1)) + 1) plt.axis('off') plt.savefig("original_dt.png", bbox_inches='tight') print("Original Image") plt.imshow(plt.imread(args.path_testing_image)) plt.axis('off') plt.savefig("original_image.png", bbox_inches='tight') f = open('results.txt', 'w') f.write("Predicted Count : " + str(int(output.detach().cpu().sum().numpy())) + " Original Count : " + str(int(np.sum(temp_1)) + 1)) f.close()
class PeopleCounter: def __init__(self, checkpoint_path='0model_best.pth.tar'): self.transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) self.model = CSRNet() self.model = self.model.cuda() self.checkpoint = torch.load(checkpoint_path) self.model.load_state_dict(self.checkpoint['state_dict']) def countPeople(self, img): img = self.transform(img.convert('RGB')).cuda() output = self.model(img.unsqueeze(0)) return int(output.detach().cpu().sum().numpy())
def prediction(path): transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) img = transform(Image.open(path).convert('RGB')).cuda() model_best = CSRNet() model_best = model_best.cuda() checkpoint = torch.load('model_best.pth.tar') model_best.load_state_dict(checkpoint['state_dict']) output = model_best(img.unsqueeze(0)) pred = int(output.detach().cpu().sum().numpy()) fake_pred1 = random.randint(0, 100) + pred fake_pred2 = pred - random.randint(0, 50) return pred, fake_pred1, fake_pred2
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-5 args.lr = 1e-5 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 100 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 with open(args.fish_train, 'r') as outfile: train_list = outfile.read().split(',') with open(args.fish_val, 'r') as outfile: val_list = outfile.read().split(',') os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() # model = nn.DataParallel(model) model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() # criterion = nn.MSELoss(size_average=False) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, optimizer, epoch) prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task)
def get_model(): model = CSRNet() model = model.cuda() checkpoint = torch.load('0model_best.pth.tar') model.load_state_dict(checkpoint['state_dict']) return model
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 100 args.steps = [-1, 1, 100, 150] # adjust learning rate args.scales = [1, 1, 1, 1] # adjust learning rate args.workers = 4 args.seed = time.time() args.print_freq = 30 args.arch = 'cse547_CSRNet_original_A' with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed( args.seed ) #The cuda manual seed should be set if you want to have reproducible results when using random generation on the gpu, for example if you do torch.cuda.FloatTensor(100).uniform_() model = CSRNet() model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, optimizer, epoch) prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) line = ' * best MAE {mae:.3f} '.format(mae=best_prec1) with open('logs/{}_{}.log'.format(time_stp, args.arch), 'a+') as flog: print(line) flog.write('{}\n'.format(line)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task)
def main(): global args, best_prec1 args = make_meow_args() best_prec1 = 1e6 args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 400 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) DATA_PATH = "/data/cv_data/shanghaitech-with-people-density-map/ShanghaiTech/part_A/train_data" train_list, val_list = get_train_val_list(DATA_PATH) model = CSRNet() model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, optimizer, epoch) prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task)
def run_test(model_path, test): transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) root = os.path.dirname(os.path.abspath(__file__)) # now generate the ShanghaiA's ground truth part_A_train = os.path.join(root, 'part_A_final/train_data', 'images') part_A_test = os.path.join(root, 'part_A_final/test_data', 'images') part_B_train = os.path.join(root, 'part_B_final/train_data', 'images') part_B_test = os.path.join(root, 'part_B_final/test_data', 'images') path_sets = [part_B_test] if test == 'B' else [part_A_test] model_name = model_path img_paths = [] for path in path_sets: for img_path in glob.glob(os.path.join(path, '*.jpg')): img_paths.append(img_path) model = CSRNet() model = model.cuda() checkpoint = torch.load(model_name) model.load_state_dict(checkpoint['state_dict']) from matplotlib import cm mae = 0 mse = 0 ssim = 0 psnr = 0 swd_result = 0 for i in range(len(img_paths)): plane_image = Image.open(img_paths[i]).convert('RGB') img = transform(plane_image).cuda() gt_file = h5py.File( img_paths[i].replace('.jpg', '.h5').replace('images', 'ground'), 'r') groundtruth = np.asarray(gt_file['density']) sum_convovled_kernel = np.ones((8, 8)) target = sg.convolve2d(groundtruth, sum_convovled_kernel[::-1, ::-1], mode='valid')[::8, ::8] output_turch = model(img.unsqueeze(0)) output = np.array(output_turch.data.cpu()[0, 0, :, :]) cur_mae = abs(output.sum() - np.sum(groundtruth)) cur_mse = np.square(output.sum() - np.sum(groundtruth)) mae += cur_mae mse += cur_mse cur_psnr = compare_psnr(target, output, data_range=1.0) psnr += cur_psnr cur_ssim = compare_ssim(target, output) ssim += cur_ssim target_turch = torch.from_numpy(target) target_turch = target_turch.type( torch.FloatTensor).unsqueeze(0).unsqueeze(0).cuda() target_turch = Variable(target_turch) cur_swd = swd.swd( target_turch, output_turch).detach().float().unsqueeze(0).data[0].float().item() swd_result += cur_swd print('---------total results --------') print('the model: ', model_path) print('\nthe mae') print(mae / len(img_paths)) print('the psnr') print(psnr / len(img_paths)) print('the swd result') print(swd_result / len(img_paths)) final_result = {} final_result['mae'] = mae / len(img_paths) final_result['mse'] = mse / len(img_paths) final_result['psnr'] = psnr / len(img_paths) final_result['ssim'] = ssim / len(img_paths) final_result['swd'] = swd_result / len(img_paths) final_result['model_name'] = model_path.split('/')[-1].split('.')[0] return final_result
def run_test(model_path, test): transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) test = test very_small = '<10' if test == 'B' else '<200' small = '10-50' if test == 'B' else '200-500' medium = '50-100' if test == 'B' else '500-1000' large = '100-200' if test == 'B' else '1000-1500' very_large = '200<' if test == 'B' else '1000<' very_small_criterion = 10 if test == 'B' else 200 small_criterion = 50 if test == 'B' else 500 medium_criterion = 100 if test == 'B' else 1000 large_criterion = 200 if test == 'B' else 1500 result_dict_mea = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_msa = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_ssim = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_psnr = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_gt = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_swd = { very_small: [], small: [], medium: [], large: [], very_large: [] } root = os.path.dirname(os.path.abspath(__file__)) # now generate the ShanghaiA's ground truth part_A_train = os.path.join(root, 'part_A_final/train_data', 'images') part_A_test = os.path.join(root, 'part_A_final/test_data', 'images') part_B_train = os.path.join(root, 'part_B_final/train_data', 'images') part_B_test = os.path.join(root, 'part_B_final/test_data', 'images') path_sets = [part_B_test] if test == 'B' else [part_A_test] model_name = model_path print(model_name) print('test set: ', path_sets) img_paths = [] for path in path_sets: for img_path in glob.glob(os.path.join(path, '*.jpg')): img_paths.append(img_path) model = CSRNet() model = model.cuda() checkpoint = torch.load(model_name) model.load_state_dict(checkpoint['state_dict']) print(checkpoint['epoch']) mae = 0 mse = 0 ssim = 0 psnr = 0 psnr_resize = 0 ssim_resize = 0 swd_result = 0 swd_list = [] image_list = [] gt_list = [] predict_list = [] ssim_list = [] mae_list = [] psnr_list = [] gt_number_list = [] predict_number_list = [] original_gt_list = [] correct_resize_gt_list = [] reg_resize_list = [] mae_list_between_correct_resize = [] mae_list_between_non_correct_resize = [] original_image_list = [] path = os.path.join(root, 'results_evel') os.makedirs(path, exist_ok=True) for i in range(len(img_paths)): plane_image = Image.open(img_paths[i]).convert('RGB') img = transform(plane_image).cuda() gt_file = h5py.File( img_paths[i].replace('.jpg', '.h5').replace('images', 'ground'), 'r') groundtruth = np.asarray(gt_file['density']) sum_convovled_kernel = np.ones((8, 8)) target = sg.convolve2d(groundtruth, sum_convovled_kernel[::-1, ::-1], mode='valid')[::8, ::8] output_turch = model(img.unsqueeze(0)) output = np.array(output_turch.data.cpu()[0, 0, :, :]) cur_mae = abs(output.sum() - np.sum(groundtruth)) cur_mse = np.square(output.sum() - np.sum(groundtruth)).sum() mae += cur_mae mse += cur_mse cur_psnr = compare_psnr(target, output, data_range=1.0) psnr += cur_psnr cur_ssim = compare_ssim(target, output) ssim += cur_ssim gt_sum = np.sum(groundtruth) target_turch = torch.from_numpy(target) target_turch = target_turch.type( torch.FloatTensor).unsqueeze(0).unsqueeze(0).cuda() target_turch = Variable(target_turch) cur_swd = swd.swd( target_turch, output_turch).detach().float().unsqueeze(0).data[0].float().item() swd_result += cur_swd if gt_sum <= very_small_criterion: criterion = very_small elif gt_sum > very_small_criterion and gt_sum <= small_criterion: criterion = small elif gt_sum > small_criterion and gt_sum <= medium_criterion: criterion = medium elif gt_sum > medium_criterion and gt_sum <= large_criterion: criterion = large else: criterion = very_large result_dict_swd[criterion].append(cur_swd) result_dict_gt[criterion].append(gt_sum) result_dict_mea[criterion].append(cur_mae) result_dict_msa[criterion].append(cur_mse) result_dict_psnr[criterion].append(cur_psnr) result_dict_ssim[criterion].append(cur_ssim) resize_output = cv2.resize( output, (int(groundtruth.shape[1]), int(groundtruth.shape[0])), interpolation=cv2.INTER_CUBIC) / 64 resize_target = cv2.resize( target, (int(groundtruth.shape[1]), int(groundtruth.shape[0])), interpolation=cv2.INTER_CUBIC) / 64 original_image_list.append(np.array(plane_image).astype(np.uint8)) original_gt_list.append(groundtruth) correct_resize_gt_list.append(target) reg_resize_list.append(resize_target) cur_mae_correct = abs(target.sum() - np.sum(groundtruth)) cur_mae_non_correct = abs(resize_target.sum() - np.sum(groundtruth)) mae_list_between_correct_resize.append(round(cur_mae_correct, 2)) mae_list_between_non_correct_resize.append( round(cur_mae_non_correct, 2)) run_it = False if run_it: image_list.append(np.array(plane_image).astype(np.uint8)) gt_list.append(resize_target) predict_list.append(resize_output) ssim_list.append(round(cur_ssim, 2)) mae_list.append(round(cur_mae, 2)) swd_list.append(round(cur_swd, 2)) psnr_list.append(round(cur_psnr, 2)) gt_number_list.append(round(np.sum(groundtruth), 2)) predict_number_list.append(round(output.sum(), 2)) if run_it and len(image_list) > 4: fig = create_heat_map_compare(image_list, gt_list, predict_list, swd_list, mae_list, psnr_list, gt_number_list, predict_number_list) name = model_name.split('/')[-1].split('.')[0] cur_path = os.path.join(path, name) os.makedirs(cur_path, exist_ok=True) fig.savefig( os.path.join(cur_path, name + '_compare_images_to_gt_' + str(i) + '.png')) image_list = [] gt_list = [] predict_list = [] swd_list = [] mae_list = [] psnr_list = [] gt_number_list = [] predict_number_list = [] print('---------') print('gt: ', np.sum(groundtruth)) print('pred: ', output.sum()) print('the mae') print(i, cur_mae) print('cur avg mae') print(i, mae / (i + 1)) print('the mse') print(i, cur_mse) print('the ssim') print(i, cur_ssim) print('the psnr') print(i, cur_psnr) print('swd') print(cur_swd) print('---------') name = model_name.split('/')[-1].split('.')[0] cur_path = os.path.join(path, name) cur_result_dict_mea = {} cur_result_dict_msa = {} for key in result_dict_mea.keys(): cur_result_dict_mea[key] = np.sum(result_dict_mea[key]) / np.sum( result_dict_gt[key]) if np.sum(result_dict_gt[key]) > 0 else 0 cur_result_dict_msa[key] = np.sqrt( np.sum(result_dict_msa[key]) / len(result_dict_gt[key])) if np.sum(result_dict_gt[key]) > 0 else 0 fig_mea = create_graph_per_number_of_gt(deepcopy(cur_result_dict_mea), 'MAE') fig_msa = create_graph_per_number_of_gt(deepcopy(cur_result_dict_msa), 'MSE') fig_ssim = create_graph_per_number_of_gt(deepcopy(result_dict_ssim), 'SSIM') fig_psnr = create_graph_per_number_of_gt(deepcopy(result_dict_psnr), 'PSNR') fig_swd = create_graph_per_number_of_gt(deepcopy(result_dict_swd), 'SWD') os.makedirs(cur_path, exist_ok=True) fig_mea.savefig( os.path.join(cur_path, name + '_mea_bar_' + str(i) + '.png')) os.makedirs(cur_path, exist_ok=True) fig_msa.savefig( os.path.join(cur_path, name + '_MSA_bar_' + str(i) + '.png')) os.makedirs(cur_path, exist_ok=True) fig_psnr.savefig( os.path.join(cur_path, name + '_PSNR_bar_' + str(i) + '.png')) os.makedirs(cur_path, exist_ok=True) fig_swd.savefig( os.path.join(cur_path, name + '_swd_bar_' + str(i) + '.png')) os.makedirs(cur_path, exist_ok=True) fig_ssim.savefig( os.path.join(cur_path, name + '_ssim_bar_' + str(i) + '.png')) print('---------total results --------') print('the mae') print(mae / len(img_paths)) print('the mse') print(np.sqrt(mse / len(img_paths))) print('the ssim') print(ssim / len(img_paths)) print('the ssim_resize') print(ssim_resize / len(img_paths)) print('the psnr') print(psnr / len(img_paths)) print('the psnr resize') print(psnr_resize / len(img_paths)) print('the swd result') print(swd_result / len(img_paths))
def main(): global args, best_prec1 best_prec1 = 1e6 best_prec2 = 1e6 args = parser.parse_args() # args.original_lr = 1e-7 args.original_lr = 1e-6 # 学习率改为1e-6 # args.lr = 1e-7 args.lr = 1e-6 args.batch_size = 1 args.momentum = 0.95 args.decay = 5*1e-4 args.start_epoch = 0 # args.epochs = 400 args.epochs = 200 args.steps = [-1,1,100,150] args.scales = [1, 1, 0.1, 0.1] # 学习率调整 # args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() # args.print_freq = 30 args.print_freq = 100 with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) # optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] best_prec2 = checkpoint['best_prec2'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): start = time.time() adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, optimizer, epoch) prec1, mse = validate(val_list, model, criterion, epoch) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) best_prec2 = min(mse, best_prec2) writer.add_scalar('MAE(MSE)/mae', best_prec1, epoch) writer.add_scalar('MAE(MSE)/mse', mse, epoch) print(' * best MAE {mae:.3f},best MSE {mse:.3f} ' .format(mae=best_prec1, mse=best_prec2)) save_checkpoint({ 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'best_prec2': mse, 'optimizer' : optimizer.state_dict(), }, is_best, args.task) during = time.time()-start print('Training complete in {:.0f}m {:.0f}s'.format(during/60, during % 60))
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 400 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.cuda() # criterion = nn.MSELoss(size_average=False).cuda() criterion = swd optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) data_loader = dataset.listDataset(train_list, shuffle=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), train=True, seen=model.seen, batch_size=args.batch_size, num_workers=args.workers) data_loader_val = dataset.listDataset(val_list, shuffle=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), train=False) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(model, criterion, optimizer, epoch, data_loader) prec1 = validate(model, args.task, data_loader_val) data_loader.shuffle() data_loader_val.shuffle() is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task)
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-5 args.lr = 1e-5 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 100 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 with open(args.fish_train, 'r') as outfile: train_list = outfile.read().split(',') # print(train_list) with open(args.fish_val, 'r') as outfile: val_list = outfile.read().split(',') # print(val_list) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) #加载模型 model = CSRNet() # model = nn.DataParallel(model) model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() # criterion = nn.MSELoss(size_average=False) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) # 加载断点保存的模型 if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) #可视化 viz = Visdom() viz.line([0.], [0], win='train_loss', opts=dict(title='train_loss')) viz.line([0.], [0], win='val_acc', opts=dict(title='val_loss')) global_step = 0 losses = 0 accuracy = 0 Loss_list = [] Accuracy_list = [] # 保存模型 for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, optimizer, epoch) # 绘制实时损失函数曲线 viz.line([losses], [global_step], win='loss', update='append') Loss_list.append(losses) # 验证集 prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task) #画准确率曲线 viz.line([accuracy], [global_step], win='accuracy', update='append') Accuracy_list.append(accuracy) global_step += 1 csv_save(Loss_list) csv_save(Accuracy_list)
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 800 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 args.train_json = './json/mypart_A_train.json' args.test_json = './json/mypart_A_test.json' args.gpu = '0' args.task = 'shanghaiA' # args.pre = 'shanghaiAcheckpoint.pth.tar' with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.cuda() # model = nn.DataParallel(model, device_ids=[0, 1, 2]) criterion = nn.MSELoss(size_average=False).cuda() criterion1 = nn.L1Loss().cuda() # criterion1 = myloss().cuda() # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), args.lr) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, criterion1, optimizer, epoch) prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task)
img_paths = [] for path in path_sets: for img_path in glob.glob(os.path.join(path, '*.jpg')): img_paths.append(img_path) # In[6]: model = CSRNet() # In[7]: model = model.cuda() # In[38]: checkpoint = torch.load('0model_best.pth.tar') # In[39]: model.load_state_dict(checkpoint['state_dict']) # In[45]:
app = Flask(__name__) app.config['TEMPLATES_AUTO_RELOAD'] = True from VideoGet import VideoGet import tensorflow as tf from torchvision import datasets, transforms import time import yaml transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) model = CSRNet() if torch.cuda.is_available(): model.cuda() from flask import g graph = tf.get_default_graph() DATABASE = 'peopleCount.db' table_name = 'peopleCount' fn_yaml = "cam1.yml" last_pos = 0 with open(fn_yaml, 'r') as stream: observ_points = yaml.load(stream) contours = [] bounding_rects = [] sec_to_wait = 4 allpoints = []
def run_test(model_path_1, model_path_2, test): transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) root = os.path.dirname(os.path.abspath(__file__)) # In[4]: very_small = '<10' if test == 'B' else '<200' small = '10-50' if test == 'B' else '200-500' medium = '50-100' if test == 'B' else '500-1000' large = '100-200' if test == 'B' else '1000-1500' very_large = '200<' if test == 'B' else '1500<' very_small_criterion = 10 if test == 'B' else 200 small_criterion = 50 if test == 'B' else 500 medium_criterion = 100 if test == 'B' else 1000 large_criterion = 200 if test == 'B' else 1500 result_dict_mea = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_msa = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_msa_pixel = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_psnr = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_gt = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_swd = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_mea_1 = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_msa_1 = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_msa_pixel_1 = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_psnr_1 = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_gt_1 = { very_small: [], small: [], medium: [], large: [], very_large: [] } result_dict_swd_1 = { very_small: [], small: [], medium: [], large: [], very_large: [] } # now generate the ShanghaiA's ground truth part_A_train = os.path.join(root, 'part_A_final/train_data', 'images') part_A_test = os.path.join(root, 'part_A_final/test_data', 'images') part_B_train = os.path.join(root, 'part_B_final/train_data', 'images') part_B_test = os.path.join(root, 'part_B_final/test_data', 'images') path_sets = [part_B_test] if test == 'B' else [part_A_test] model_name_1 = model_path_1 model_name_2 = model_path_2 img_paths = [] for path in path_sets: for img_path in glob.glob(os.path.join(path, '*.jpg')): img_paths.append(img_path) model_1 = CSRNet() model_1 = model_1.cuda() checkpoint = torch.load(model_name_1) model_1.load_state_dict(checkpoint['state_dict']) model_2 = CSRNet() model_2 = model_2.cuda() checkpoint = torch.load(model_name_2) model_2.load_state_dict(checkpoint['state_dict']) from matplotlib import cm image_list = [] gt_list = [] predict_list_1 = [] predict_list_2 = [] mse_list = [] mae_list = [] psnr_list = [] gt_number_list = [] predict_number_list = [] swd_list = [] original_gt_list = [] correct_resize_gt_list = [] reg_resize_list = [] original_image_list = [] for i in range(len(img_paths)): plane_image = Image.open(img_paths[i]).convert('RGB') img = transform(plane_image).cuda() gt_file = h5py.File( img_paths[i].replace('.jpg', '.h5').replace('images', 'ground'), 'r') groundtruth = np.asarray(gt_file['density']) sum_convovled_kernel = np.ones((8, 8)) target = sg.convolve2d(groundtruth, sum_convovled_kernel[::-1, ::-1], mode='valid')[::8, ::8] output_turch_1 = model_1(img.unsqueeze(0)) output_1 = np.array(output_turch_1.data.cpu()[0, 0, :, :]) cur_mae_1 = abs(output_1.sum() - np.sum(groundtruth)) cur_mse_1 = np.square(output_1.sum() - target.sum()) cur_image_mse_1 = np.square(output_1 - target).sum() / output_1.size gt_sum = np.sum(groundtruth) output_turch_2 = model_2(img.unsqueeze(0)) output_2 = np.array(output_turch_2.data.cpu()[0, 0, :, :]) cur_mae_2 = abs(output_2.sum() - np.sum(groundtruth)) cur_mse_2 = np.square(output_2.sum() - target.sum()) cur_image_mse_2 = np.square(output_2 - target).sum() / ( output_2.shape[0] * output_2.shape[1]) cur_psnr_1 = compare_psnr(target, output_1, data_range=1.0) cur_ssim_1 = compare_ssim(target, output_1) target_turch = torch.from_numpy(target) target_turch = target_turch.type( torch.FloatTensor).unsqueeze(0).unsqueeze(0).cuda() target_turch = Variable(target_turch) cur_swd_1 = swd.swd(target_turch, output_turch_1).detach().float().unsqueeze( 0).data[0].float().item() resize_output_1 = cv2.resize( output_1, (int(groundtruth.shape[1]), int(groundtruth.shape[0])), interpolation=cv2.INTER_CUBIC) / 64 resize_output_2 = cv2.resize( output_2, (int(groundtruth.shape[1]), int(groundtruth.shape[0])), interpolation=cv2.INTER_CUBIC) / 64 resize_target = cv2.resize( target, (int(groundtruth.shape[1]), int(groundtruth.shape[0])), interpolation=cv2.INTER_CUBIC) / 64 cur_psnr_2 = compare_psnr(target, output_2, data_range=1.0) cur_swd_2 = swd.swd(target_turch, output_turch_2).detach().float().unsqueeze( 0).data[0].float().item() original_image_list.append(np.array(plane_image).astype(np.uint8)) original_gt_list.append(groundtruth) correct_resize_gt_list.append(target) reg_resize_list.append(resize_target) image_list.append(np.array(plane_image).astype(np.uint8)) gt_list.append(resize_target) predict_list_1.append(resize_output_1) predict_list_2.append(resize_output_2) mae_list.append((round(cur_mae_1, 2), round(cur_mae_2, 2))) mse_list.append((round(cur_mse_1, 2), round(cur_mse_2, 2))) psnr_list.append((round(cur_psnr_1, 2), round(cur_psnr_2, 2))) swd_list.append((round(cur_swd_1, 2), round(cur_swd_2, 2))) gt_number_list.append(round(np.sum(groundtruth), 2)) predict_number_list.append((round(output_1.sum(), 2), round(output_2.sum(), 2))) run_it = True if gt_sum <= very_small_criterion: criterion = very_small elif gt_sum > very_small_criterion and gt_sum <= small_criterion: criterion = small elif gt_sum > small_criterion and gt_sum <= medium_criterion: criterion = medium elif gt_sum > medium_criterion and gt_sum <= large_criterion: criterion = large else: criterion = very_large result_dict_swd[criterion].append(cur_swd_1) result_dict_gt[criterion].append(gt_sum) result_dict_mea[criterion].append(cur_mae_1) result_dict_msa[criterion].append(cur_mse_1) result_dict_msa_pixel[criterion].append(cur_image_mse_1) result_dict_psnr[criterion].append(cur_psnr_1) result_dict_swd_1[criterion].append(cur_swd_2) result_dict_gt_1[criterion].append(gt_sum) result_dict_mea_1[criterion].append(cur_mae_2) result_dict_msa_1[criterion].append(cur_mse_2) result_dict_msa_pixel_1[criterion].append(cur_image_mse_2) result_dict_psnr_1[criterion].append(cur_psnr_2) if run_it and len(image_list) > 4: #image_list, gt_list, predict_list_1, predict_list_2, mae_list, psnr_list, gt_number_list, predict_number_list,swd_list fig = create_heat_map_compare_two_models( image_list, gt_list, predict_list_1, predict_list_2, mae_list, mse_list, psnr_list, gt_number_list, predict_number_list, swd_list) name_1 = model_name_1.split('/')[-1].split('.')[0] name_2 = model_name_2.split('/')[-1].split('.')[0] path = os.path.join(root, 'compare_models', name_1 + '_' + name_2) os.makedirs(path, exist_ok=True) fig.savefig( os.path.join( path, name_1 + '_' + name_2 + '_compare_models_' + str(i) + '.png')) # fig_mea = crate_graph_per_number_of_gt(deepcopy(result_dict_mea),'MEA') # fig_msa = crate_graph_per_number_of_gt(deepcopy(result_dict_msa), 'MSA') # fig_ssim = crate_graph_per_number_of_gt(deepcopy(result_dict_ssim), 'SSIM') # fig_psnr = crate_graph_per_number_of_gt(deepcopy(result_dict_psnr), 'PSNR') # fig_swd = crate_graph_per_number_of_gt(deepcopy(result_dict_swd), 'SWD') image_list = [] gt_list = [] predict_list_1 = [] predict_list_2 = [] ssim_list = [] mae_list = [] psnr_list = [] gt_number_list = [] predict_number_list = [] print('the psnr_1') print(i, cur_psnr_1) print('the psnr_2') print(i, cur_psnr_2) print('the mae_1') print(i, cur_mae_1) print('the mae_2') print(i, cur_mae_2) print('the mse_1') print(i, cur_mse_1) print('the mse_2') print(i, cur_mse_2) print('the mse_pixel_1') print(i, cur_image_mse_1) print('the mse_pixel_1') print(i, cur_image_mse_2) name_1 = model_name_1.split('/')[-1].split('.')[0] name_2 = model_name_2.split('/')[-1].split('.')[0] cur_path = os.path.join(root, 'compare_models', name_1 + '_' + name_2) os.makedirs(cur_path, exist_ok=True) cur_result_dict_mea_1 = {} cur_result_dict_msa_1 = {} cur_result_dict_mea_2 = {} cur_result_dict_msa_2 = {} for key in result_dict_mea.keys(): cur_result_dict_mea_1[key] = np.sum(result_dict_mea[key]) / np.sum( result_dict_gt[key]) if np.sum(result_dict_gt[key]) > 0 else 0 cur_result_dict_msa_1[key] = result_dict_msa[key] cur_result_dict_mea_2[key] = np.sum(result_dict_mea_1[key]) / np.sum( result_dict_gt[key]) if np.sum(result_dict_gt[key]) > 0 else 0 cur_result_dict_msa_2[key] = result_dict_msa_1[key] fig_mea = create_graph_per_number_of_gt_vs_two_models( deepcopy(cur_result_dict_mea_1), deepcopy(cur_result_dict_mea_2), 'MAE') fig_msa = create_graph_per_number_of_gt_vs_two_models( deepcopy(cur_result_dict_msa_1), deepcopy(cur_result_dict_msa_2), 'MSE') # fig_ssim = create_graph_per_number_of_gt_vs_two_models(deepcopy(result_dict_ssim),deepcopy(cur_result_dict_mea), 'SSIM') fig_psnr = create_graph_per_number_of_gt_vs_two_models( deepcopy(result_dict_psnr), deepcopy(result_dict_psnr_1), 'PSNR') fig_swd = create_graph_per_number_of_gt_vs_two_models( deepcopy(result_dict_swd), deepcopy(result_dict_swd_1), 'SWD') fig_mse_pixel = create_graph_per_number_of_gt_vs_two_models( deepcopy(result_dict_msa_pixel), deepcopy(result_dict_msa_pixel_1), 'Pixel MSE') os.makedirs(cur_path, exist_ok=True) fig_mea.savefig(os.path.join(cur_path, 'MEA_bar_' + str(i) + '.png')) os.makedirs(cur_path, exist_ok=True) fig_msa.savefig(os.path.join(cur_path, 'MSA_bar_' + str(i) + '.png')) os.makedirs(cur_path, exist_ok=True) fig_psnr.savefig(os.path.join(cur_path, 'PSNR_bar_' + str(i) + '.png')) os.makedirs(cur_path, exist_ok=True) fig_swd.savefig(os.path.join(cur_path, 'swd_bar_' + str(i) + '.png')) os.makedirs(cur_path, exist_ok=True) fig_mse_pixel.savefig( os.path.join(cur_path, 'per_pixel_mse_bar_' + str(i) + '.png'))
def main(): global args, best_prec1 global train_loader, test_loader, train_loader_len global losses, batch_time, data_time global writer best_prec1 = 1e6 args = parser.parse_args() args.original_lr = args.lr args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'].cpu() model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) losses = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() writer = SummaryWriter('runs/{}'.format(args.task)) train_loader = torch.utils.data.DataLoader( dataset.listDataset(train_list, shuffle=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]), train=True, batch_size=args.batch_size, num_workers=args.workers), batch_size=args.batch_size) test_loader = torch.utils.data.DataLoader( dataset.listDataset(val_list, shuffle=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]), train=False), batch_size=args.batch_size) train_loader_len = len(train_loader) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(model, criterion, optimizer, epoch) print('Epoch time: {} s'.format(batch_time.sum)) losses.reset() batch_time.reset() data_time.reset() torch.cuda.empty_cache() prec1 = validate(model) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} ' .format(mae=best_prec1)) writer.add_scalar('validation_loss', prec1, epoch) for param_group in optimizer.param_groups: writer.add_scalar('lr', param_group['lr'], epoch) break save_checkpoint({ 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict() }, is_best, args.task, '_' + str(epoch) + '.pth.tar') writer.close()