def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 800 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 args.train_json = './json/mypart_A_train.json' args.test_json = './json/mypart_A_test.json' args.gpu = '0' args.task = 'shanghaiA' # args.pre = 'shanghaiAcheckpoint.pth.tar' with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.cuda() # model = nn.DataParallel(model, device_ids=[0, 1, 2]) criterion = nn.MSELoss(size_average=False).cuda() criterion1 = nn.L1Loss().cuda() # criterion1 = myloss().cuda() # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), args.lr) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, criterion1, optimizer, epoch) prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task)
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 #args.epochs = 400 args.epochs = 100 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() # SGD con 1e-7 lr optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) # modelo pre entrenado si hay if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) # entrenamiento for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, optimizer, epoch) prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task)
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 400 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.cuda() # criterion = nn.MSELoss(size_average=False).cuda() criterion = swd optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) data_loader = dataset.listDataset(train_list, shuffle=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), train=True, seen=model.seen, batch_size=args.batch_size, num_workers=args.workers) data_loader_val = dataset.listDataset(val_list, shuffle=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), train=False) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(model, criterion, optimizer, epoch, data_loader) prec1 = validate(model, args.task, data_loader_val) data_loader.shuffle() data_loader_val.shuffle() is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task)
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 400 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 0 args.seed = time.time() args.print_freq = 30 with open(args.train_json, "r") as outfile: train_list = json.load(outfile) with open(args.test_json, "r") as outfile: val_list = json.load(outfile) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu model = CSRNet() model = model.to("cuda") criterion = nn.MSELoss(reduction="sum").to("cuda") optimizer = flow.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = flow.load(args.pre) args.start_epoch = checkpoint["epoch"] best_prec1 = checkpoint["best_prec1"] model.load_state_dict(checkpoint["state_dict"]) optimizer.load_state_dict(checkpoint["optimizer"]) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint["epoch"])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, optimizer, epoch) prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(" * best MAE {mae:.3f} ".format(mae=best_prec1)) save_checkpoint( { "epoch": epoch + 1, "arch": args.pre, "state_dict": model.state_dict(), "best_prec1": best_prec1, }, is_best, str(epoch + 1), args.modelPath, )
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-5 args.lr = 1e-5 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 100 args.steps = [-1, 20, 40, 60] args.scales = [1, 0.1, 0.1, 0.1] args.workers = 4 args.seed = time.time() args.print_freq = 30 # with open(args.train_json, 'r') as outfile: # train_list = json.load(outfile) # with open(args.test_json, 'r') as outfile: # val_list = json.load(outfile) csv_train_path = args.train_csv csv_test_path = args.test_csv # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # torch.cuda.manual_seed(args.seed) device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') model = CSRNet() #summary(model, (3, 256, 256)) model = model.to(device) criterion = nn.MSELoss(size_average=False).to(device) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) precs = [] for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(csv_train_path, model, criterion, optimizer, epoch) prec1 = validate(csv_test_path, model, criterion) precs.append(prec1) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} '.format(mae=best_prec1)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), 'MAE_history': precs }, is_best, args.task)
def main(): global args,best_prec1 best_prec1 = 1e6 args = parser.parse_args() print(args) args.original_lr = 1e-7 args.lr = 1e-7 # args.batch_size = 9 args.momentum = 0.95 args.decay = 5*1e-4 args.start_epoch = 0 args.epochs = 400 args.steps = [-1,1,100,150] args.scales = [1,1,1,1] args.workers = 4 args.seed = time.time() args.print_freq = 30 train_list, test_list = getTrainAndTestListFromPath(args.train_path, args.test_path) splitRatio = 0.8 print('batch size is ', args.batch_size) print('cuda available? {}'.format(torch.cuda.is_available())) device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.to(device) criterion = nn.MSELoss(size_average=False).to(device) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) subsetTrain, subsetValid = getTrainAndValidateList(train_list, splitRatio) train(subsetTrain, model, criterion, optimizer, epoch, device) prec1 = validate(subsetValid, model, criterion, device) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} ' .format(mae=best_prec1)) save_checkpoint({ 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer' : optimizer.state_dict(), }, is_best,args.task)
cfg.writer.add_scalar('Train_Loss', epoch_loss / len(train_dataloader), epoch) model.eval() with torch.no_grad(): epoch_mae = 0.0 for i, data in enumerate(tqdm(test_dataloader)): image = data['image'].to(cfg.device) gt_densitymap = data['densitymap'].to(cfg.device) et_densitymap = model(image).detach() # forward propagation mae = abs(et_densitymap.data.sum() - gt_densitymap.data.sum()) epoch_mae += mae.item() epoch_mae /= len(test_dataloader) if epoch_mae < min_mae: min_mae, min_mae_epoch = epoch_mae, epoch torch.save(model.state_dict(), os.path.join(cfg.checkpoints, str(epoch) + ".pth")) # save checkpoints print('Epoch ', epoch, ' MAE: ', epoch_mae, ' Min MAE: ', min_mae, ' Min Epoch: ', min_mae_epoch) # print information cfg.writer.add_scalar('Val_MAE', epoch_mae, epoch) cfg.writer.add_image( str(epoch) + '/Image', denormalize(image[0].cpu())) cfg.writer.add_image( str(epoch) + '/Estimate density count:' + str('%.2f' % (et_densitymap[0].cpu().sum())), et_densitymap[0] / torch.max(et_densitymap[0])) cfg.writer.add_image( str(epoch) + '/Ground Truth count:' + str('%.2f' % (gt_densitymap[0].cpu().sum())),
def main(): global args, best_prec1 best_prec1 = 1e6 args = parser.parse_args() args.original_lr = 1e-7 args.lr = 1e-7 args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.epochs = 100 args.steps = [-1, 1, 100, 150] # adjust learning rate args.scales = [1, 1, 1, 1] # adjust learning rate args.workers = 4 args.seed = time.time() args.print_freq = 30 args.arch = 'cse547_CSRNet_original_A' with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed( args.seed ) #The cuda manual seed should be set if you want to have reproducible results when using random generation on the gpu, for example if you do torch.cuda.FloatTensor(100).uniform_() model = CSRNet() model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(train_list, model, criterion, optimizer, epoch) prec1 = validate(val_list, model, criterion) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) line = ' * best MAE {mae:.3f} '.format(mae=best_prec1) with open('logs/{}_{}.log'.format(time_stp, args.arch), 'a+') as flog: print(line) flog.write('{}\n'.format(line)) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), }, is_best, args.task)
def main(): global args, best_prec1 global train_loader, test_loader, train_loader_len global losses, batch_time, data_time global writer best_prec1 = 1e6 args = parser.parse_args() args.original_lr = args.lr args.batch_size = 1 args.momentum = 0.95 args.decay = 5 * 1e-4 args.start_epoch = 0 args.steps = [-1, 1, 100, 150] args.scales = [1, 1, 1, 1] args.workers = 4 args.seed = time.time() args.print_freq = 30 with open(args.train_json, 'r') as outfile: train_list = json.load(outfile) with open(args.test_json, 'r') as outfile: val_list = json.load(outfile) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.cuda.manual_seed(args.seed) model = CSRNet() model = model.cuda() criterion = nn.MSELoss(size_average=False).cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.decay) if args.pre: if os.path.isfile(args.pre): print("=> loading checkpoint '{}'".format(args.pre)) checkpoint = torch.load(args.pre) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'].cpu() model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.pre, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.pre)) losses = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() writer = SummaryWriter('runs/{}'.format(args.task)) train_loader = torch.utils.data.DataLoader( dataset.listDataset(train_list, shuffle=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]), train=True, batch_size=args.batch_size, num_workers=args.workers), batch_size=args.batch_size) test_loader = torch.utils.data.DataLoader( dataset.listDataset(val_list, shuffle=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]), train=False), batch_size=args.batch_size) train_loader_len = len(train_loader) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) train(model, criterion, optimizer, epoch) print('Epoch time: {} s'.format(batch_time.sum)) losses.reset() batch_time.reset() data_time.reset() torch.cuda.empty_cache() prec1 = validate(model) is_best = prec1 < best_prec1 best_prec1 = min(prec1, best_prec1) print(' * best MAE {mae:.3f} ' .format(mae=best_prec1)) writer.add_scalar('validation_loss', prec1, epoch) for param_group in optimizer.param_groups: writer.add_scalar('lr', param_group['lr'], epoch) break save_checkpoint({ 'epoch': epoch + 1, 'arch': args.pre, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict() }, is_best, args.task, '_' + str(epoch) + '.pth.tar') writer.close()