Esempio n. 1
0
def main():
    global args, best_prec1
    best_prec1 = 1e6
    args = parser.parse_args()  # 将变量以标签-真值的字典形式存入args字典中
    args.original_lr = 1e-7
    args.lr = 1e-7
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 400
    args.steps = [-1, 1, 100, 150]
    args.scales = [1, 1, 1, 1]
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30
    with open(args.train_json, 'r') as outfile:  # 打开训练集.json文件
        train_list = json.load(outfile)  ##得到训练集图片全部的地址,里面是以列表形式存储的所有图片的绝对路径
    with open(args.test_json, 'r') as outfile:  # 打开测试集.json文件
        val_list = json.load(outfile)  #得到验证集图片的全部地址

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(args.seed)
    model = CSRNet()
    model = model.cuda()
    criterion = nn.MSELoss(size_average=False).cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)
    if args.pre:  ##预训练模型给出
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        train(train_list, model, criterion, optimizer, epoch)
        prec1 = validate(val_list, model, criterion)
        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)  #mae的比较
        print(' * best MAE {mae:.3f} '.format(mae=best_prec1))
        save_checkpoint({ ##保存
            'epoch': epoch + 1,
            'arch': args.pre,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer' : optimizer.state_dict(),
        }, is_best,args.task)
Esempio n. 2
0
def main():
    global args, best_prec1
    args = make_meow_args()

    best_prec1 = 1e6

    args.original_lr = 1e-7
    args.lr = 1e-7
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 400
    args.steps = [-1, 1, 100, 150]
    args.scales = [1, 1, 1, 1]
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(args.seed)

    DATA_PATH = "/data/cv_data/shanghaitech-with-people-density-map/ShanghaiTech/part_A/train_data"
    train_list, val_list = get_train_val_list(DATA_PATH)

    model = CSRNet()

    model = model.cuda()

    criterion = nn.MSELoss(size_average=False).cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        train(train_list, model, criterion, optimizer, epoch)
        prec1 = validate(val_list, model, criterion)

        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best MAE {mae:.3f} '.format(mae=best_prec1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.pre,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.task)
def main():
    global args, best_prec1
    best_prec1 = 1e6

    args = parser.parse_args()
    args.original_lr = 1e-7
    args.lr = 1e-7
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 400
    args.steps = [-1, 1, 100, 150]
    args.scales = [1, 1, 1, 1]
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30

    # with open(args.train_json, 'r') as outfile:
    #     train_list = json.load(outfile)

    train_list = [
        os.path.join(args.train_path, i) for i in os.listdir(args.train_path)
    ]
    # with open(args.test_json, 'r') as outfile:
    #     val_list = json.load(outfile)

    print(train_list)
    val_list = [
        os.path.join(args.train_path, j) for j in os.listdir(args.test_path)
    ]

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(args.seed)

    model = CSRNet()

    model = model.to(device)

    criterion = nn.MSELoss(size_average=False).to(device)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))

    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        train(train_list, model, criterion, optimizer, epoch)
        prec1 = validate(val_list, model, criterion)

        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best MAE {mae:.3f} '.format(mae=best_prec1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.pre,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.task)
Esempio n. 4
0
--train_image_gt_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/crane_labeled
--train_image_density_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/density_map

--test_image_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/crane
--test_image_gt_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/crane_labeled
--test_image_density_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/density_map
"""


if __name__=="__main__":
    # argument parsing.
    args = parser.parse_args()
    cfg = Config(args)                                                          # configuration
    model = CSRNet().to(cfg.device)                                         # model
    criterion = nn.MSELoss(size_average=False)                              # objective
    optimizer = torch.optim.Adam(model.parameters(),lr=cfg.lr)              # optimizer

    train_dataloader = create_train_dataloader(cfg.train_dataset_root, use_flip=True, batch_size=cfg.batch_size)
    test_dataloader  = create_test_dataloader(cfg.test_dataset_root)             # dataloader

    min_mae = sys.maxsize
    min_mae_epoch = -1
    for epoch in range(1, cfg.epochs):                          # start training
        model.train()
        epoch_loss = 0.0
        for i, data in enumerate(tqdm(train_dataloader)):
            image = data['image'].to(cfg.device)
            gt_densitymap = data['densitymap'].to(cfg.device) * 16# todo 1/4 rescale effect때문에
            et_densitymap = model(image)                        # forward propagation
            loss = criterion(et_densitymap,gt_densitymap)       # calculate loss
            epoch_loss += loss.item()


def save_dictionary(dictpath_json, dictionary_data):
    a_file = open(dictpath_json, "w")
    json.dump(dictionary_data, a_file)
    a_file.close()

#Select the image extension

with open(args.test_json, 'r') as outfile:
    img_paths = json.load(outfile)

#
model = CSRNet()
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(pytorch_total_params)

#defining the model
model = model.cuda()

#loading the trained weights
checkpoint = torch.load(os.path.join(args.output,'model_best.pth.tar'))
#load best model
model.load_state_dict(checkpoint['state_dict'])

mae = 0
pred= []
gt = []

dictionary_counts={}
Esempio n. 6
0
    # 参数配置
    cfg_path = './config/gcc2shhb.yml'
    (dataset, data_path, target_data_path, log_path, pre_trained_path,
     batch_size, lr, epoch_num, steps, decay_rate, start_epoch, snap_shot,
     resize, val_size) = parse_params_and_print(cfg_path)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    logger = Logger(log_path)

    train_loader, _ = loading_data(data_path,
                                   mode='train',
                                   batch_size=batch_size)
    val_loader, _ = loading_data(target_data_path, mode='val')

    net = CSRNet().to(device)
    optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
                          lr=lr,
                          momentum=0.9)
    criterion_dens = nn.MSELoss(size_average=False)
    criterion_count = nn.L1Loss(size_average=False)

    # 开始训练和验证
    best_mae = sys.maxsize
    for epoch in range(start_epoch, epoch_num + 1):
        print('Epoch {}/{}'.format(epoch, epoch_num))
        # 训练阶段
        optimizer = update_lr(optimizer, epoch, steps, decay_rate)
        net.train()

        running_loss = 0.0
        running_mse = 0.0
Esempio n. 7
0
def main():
    global args, best_prec1

    best_prec1 = 1e6

    args = parser.parse_args()
    args.original_lr = 1e-5
    args.lr = 1e-5
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 100
    args.steps = [-1, 1, 100, 150]
    args.scales = [1, 1, 1, 1]
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30
    with open(args.fish_train, 'r') as outfile:
        train_list = outfile.read().split(',')
        # print(train_list)
    with open(args.fish_val, 'r') as outfile:
        val_list = outfile.read().split(',')
        # print(val_list)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(args.seed)

    #加载模型
    model = CSRNet()

    # model = nn.DataParallel(model)
    model = model.cuda()

    criterion = nn.MSELoss(size_average=False).cuda()
    # criterion = nn.MSELoss(size_average=False)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    # 加载断点保存的模型
    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))

    #可视化
    viz = Visdom()
    viz.line([0.], [0], win='train_loss', opts=dict(title='train_loss'))
    viz.line([0.], [0], win='val_acc', opts=dict(title='val_loss'))
    global_step = 0
    losses = 0
    accuracy = 0
    Loss_list = []
    Accuracy_list = []
    # 保存模型
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        train(train_list, model, criterion, optimizer, epoch)
        # 绘制实时损失函数曲线
        viz.line([losses], [global_step], win='loss', update='append')
        Loss_list.append(losses)

        # 验证集
        prec1 = validate(val_list, model, criterion)

        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best MAE {mae:.3f} '.format(mae=best_prec1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.pre,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.task)

        #画准确率曲线
        viz.line([accuracy], [global_step], win='accuracy', update='append')
        Accuracy_list.append(accuracy)
        global_step += 1

    csv_save(Loss_list)
    csv_save(Accuracy_list)
Esempio n. 8
0
def main():
    
    global args, best_prec1
    
    best_prec1 = 1e6
    best_prec2 = 1e6
    
    args = parser.parse_args()
#     args.original_lr = 1e-7
    args.original_lr = 1e-6  
    # 学习率改为1e-6
    
#     args.lr = 1e-7
    args.lr = 1e-6

    args.batch_size = 1
    args.momentum = 0.95

    args.decay = 5*1e-4
    args.start_epoch = 0
    # args.epochs = 400
    args.epochs = 200

    args.steps = [-1,1,100,150]
    args.scales = [1, 1, 0.1, 0.1]  # 学习率调整
    # args.scales = [1, 1, 1, 1]
    args.workers = 4
    args.seed = time.time()
#     args.print_freq = 30
    args.print_freq = 100

    with open(args.train_json, 'r') as outfile:        
        train_list = json.load(outfile)
    with open(args.test_json, 'r') as outfile:       
        val_list = json.load(outfile)
    
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(args.seed)
    
    model = CSRNet()
    
    model = model.cuda()
    
    criterion = nn.MSELoss(size_average=False).cuda()
    
    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    # optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.decay)

    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            best_prec2 = checkpoint['best_prec2']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))
            
    
    for epoch in range(args.start_epoch, args.epochs):

        start = time.time()
        adjust_learning_rate(optimizer, epoch)
        
        train(train_list, model, criterion, optimizer, epoch)
        prec1, mse = validate(val_list, model, criterion, epoch)
        
        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        best_prec2 = min(mse, best_prec2)
        writer.add_scalar('MAE(MSE)/mae', best_prec1, epoch)
        writer.add_scalar('MAE(MSE)/mse', mse, epoch)

        print(' * best MAE {mae:.3f},best MSE {mse:.3f} '
              .format(mae=best_prec1, mse=best_prec2))
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.pre,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'best_prec2': mse,
            'optimizer' : optimizer.state_dict(),
        }, is_best, args.task)
        
        during = time.time()-start
        print('Training complete in {:.0f}m {:.0f}s'.format(during/60, during % 60))
Esempio n. 9
0
def main():

    global args, best_prec1

    best_prec1 = 1e6

    args = parser.parse_args()
    args.original_lr = 1e-7
    args.lr = 1e-7
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 800
    args.steps = [-1, 1, 100, 150]
    args.scales = [1, 1, 1, 1]
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30
    args.train_json = './json/mypart_A_train.json'
    args.test_json = './json/mypart_A_test.json'
    args.gpu = '0'
    args.task = 'shanghaiA'
    # args.pre = 'shanghaiAcheckpoint.pth.tar'
    with open(args.train_json, 'r') as outfile:
        train_list = json.load(outfile)
    with open(args.test_json, 'r') as outfile:
        val_list = json.load(outfile)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(args.seed)

    model = CSRNet()

    model = model.cuda()
    # model = nn.DataParallel(model, device_ids=[0, 1, 2])

    criterion = nn.MSELoss(size_average=False).cuda()
    criterion1 = nn.L1Loss().cuda()
    # criterion1 = myloss().cuda()
    # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), args.lr)
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))

    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        train(train_list, model, criterion, criterion1, optimizer, epoch)
        prec1 = validate(val_list, model, criterion)

        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best MAE {mae:.3f} '.format(mae=best_prec1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.pre,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.task)
Esempio n. 10
0
def main():
    global args, best_prec1
    best_prec1 = 1e6
    args = parser.parse_args()
    args.original_lr = 1e-7
    args.lr = 1e-7
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 400
    args.steps = [-1, 1, 100, 150]
    args.scales = [1, 1, 1, 1]
    args.workers = 0
    args.seed = time.time()
    args.print_freq = 30
    with open(args.train_json, "r") as outfile:
        train_list = json.load(outfile)
    with open(args.test_json, "r") as outfile:
        val_list = json.load(outfile)

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    model = CSRNet()
    model = model.to("cuda")
    criterion = nn.MSELoss(reduction="sum").to("cuda")
    optimizer = flow.optim.SGD(model.parameters(),
                               args.lr,
                               momentum=args.momentum,
                               weight_decay=args.decay)
    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = flow.load(args.pre)
            args.start_epoch = checkpoint["epoch"]
            best_prec1 = checkpoint["best_prec1"]
            model.load_state_dict(checkpoint["state_dict"])

            optimizer.load_state_dict(checkpoint["optimizer"])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre, checkpoint["epoch"]))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        train(train_list, model, criterion, optimizer, epoch)
        prec1 = validate(val_list, model, criterion)
        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(" * best MAE {mae:.3f} ".format(mae=best_prec1))
        save_checkpoint(
            {
                "epoch": epoch + 1,
                "arch": args.pre,
                "state_dict": model.state_dict(),
                "best_prec1": best_prec1,
            },
            is_best,
            str(epoch + 1),
            args.modelPath,
        )
Esempio n. 11
0
def main():

    global args, best_prec1

    best_prec1 = 1e6

    args = parser.parse_args()
    args.original_lr = 1e-7
    args.lr = 1e-7
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 400
    args.steps = [-1, 1, 100, 150]
    args.scales = [1, 1, 1, 1]
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30
    with open(args.train_json, 'r') as outfile:
        train_list = json.load(outfile)
    with open(args.test_json, 'r') as outfile:
        val_list = json.load(outfile)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(args.seed)

    model = CSRNet()

    model = model.cuda()

    # criterion = nn.MSELoss(size_average=False).cuda()
    criterion = swd
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))
    data_loader = dataset.listDataset(train_list,
                                      shuffle=True,
                                      transform=transforms.Compose([
                                          transforms.ToTensor(),
                                          transforms.Normalize(
                                              mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225]),
                                      ]),
                                      train=True,
                                      seen=model.seen,
                                      batch_size=args.batch_size,
                                      num_workers=args.workers)
    data_loader_val = dataset.listDataset(val_list,
                                          shuffle=False,
                                          transform=transforms.Compose([
                                              transforms.ToTensor(),
                                              transforms.Normalize(
                                                  mean=[0.485, 0.456, 0.406],
                                                  std=[0.229, 0.224, 0.225]),
                                          ]),
                                          train=False)
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        train(model, criterion, optimizer, epoch, data_loader)
        prec1 = validate(model, args.task, data_loader_val)
        data_loader.shuffle()
        data_loader_val.shuffle()
        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best MAE {mae:.3f} '.format(mae=best_prec1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.pre,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.task)
Esempio n. 12
0
def main():

    global args, best_prec1

    best_prec1 = 1e6

    args = parser.parse_args()
    args.original_lr = 1e-5
    args.lr = 1e-5
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 100
    args.steps = [-1, 20, 40, 60]
    args.scales = [1, 0.1, 0.1, 0.1]
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30
    # with open(args.train_json, 'r') as outfile:
    #     train_list = json.load(outfile)
    # with open(args.test_json, 'r') as outfile:
    #     val_list = json.load(outfile)

    csv_train_path = args.train_csv
    csv_test_path = args.test_csv

    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    # torch.cuda.manual_seed(args.seed)

    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    model = CSRNet()

    #summary(model, (3, 256, 256))

    model = model.to(device)

    criterion = nn.MSELoss(size_average=False).to(device)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))
    precs = []
    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        train(csv_train_path, model, criterion, optimizer, epoch)
        prec1 = validate(csv_test_path, model, criterion)
        precs.append(prec1)
        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best MAE {mae:.3f} '.format(mae=best_prec1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.pre,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
                'MAE_history': precs
            }, is_best, args.task)
Esempio n. 13
0
def main():
    
    global args,best_prec1
    
    best_prec1 = 1e6
    
    args = parser.parse_args()
    print(args)
    args.original_lr = 1e-7
    args.lr = 1e-7
#     args.batch_size    = 9
    args.momentum      = 0.95
    args.decay         = 5*1e-4
    args.start_epoch   = 0
    args.epochs = 400
    args.steps         = [-1,1,100,150]
    args.scales        = [1,1,1,1]
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30
    
    train_list, test_list = getTrainAndTestListFromPath(args.train_path, args.test_path)
    splitRatio = 0.8
    
    print('batch size is ', args.batch_size)
    print('cuda available? {}'.format(torch.cuda.is_available()))
    
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    
#     os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
#     torch.cuda.manual_seed(args.seed)
    
    model = CSRNet()
    
    model = model.to(device)
    
    criterion = nn.MSELoss(size_average=False).to(device)
    
    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))
    
    for epoch in range(args.start_epoch, args.epochs):
        
        adjust_learning_rate(optimizer, epoch)
        
        subsetTrain, subsetValid = getTrainAndValidateList(train_list, splitRatio)
        
        train(subsetTrain, model, criterion, optimizer, epoch, device)
        prec1 = validate(subsetValid, model, criterion, device)
        
        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best MAE {mae:.3f} '
              .format(mae=best_prec1))
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.pre,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer' : optimizer.state_dict(),
        }, is_best,args.task)
Esempio n. 14
0
def main():

    global args, best_prec1

    best_prec1 = 1e6

    args = parser.parse_args()
    args.original_lr = 1e-7
    args.lr = 1e-7
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 100
    args.steps = [-1, 1, 100, 150]  # adjust learning rate
    args.scales = [1, 1, 1, 1]  # adjust learning rate
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30
    args.arch = 'cse547_CSRNet_original_A'
    with open(args.train_json, 'r') as outfile:
        train_list = json.load(outfile)
    with open(args.test_json, 'r') as outfile:
        val_list = json.load(outfile)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(
        args.seed
    )  #The cuda manual seed should be set if you want to have reproducible results when using random generation on the gpu, for example if you do torch.cuda.FloatTensor(100).uniform_()

    model = CSRNet()

    model = model.cuda()

    criterion = nn.MSELoss(size_average=False).cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))

    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        train(train_list, model, criterion, optimizer, epoch)
        prec1 = validate(val_list, model, criterion)

        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)

        line = ' * best MAE {mae:.3f} '.format(mae=best_prec1)
        with open('logs/{}_{}.log'.format(time_stp, args.arch), 'a+') as flog:
            print(line)
            flog.write('{}\n'.format(line))

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.pre,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.task)
Esempio n. 15
0
def main():

    global args, best_prec1
    global train_loader, test_loader, train_loader_len
    global losses, batch_time, data_time
    global writer

    best_prec1 = 1e6

    args = parser.parse_args()
    args.original_lr = args.lr
    args.batch_size = 1
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.steps = [-1, 1, 100, 150]
    args.scales = [1, 1, 1, 1]
    args.workers = 4
    args.seed = time.time()
    args.print_freq = 30
    with open(args.train_json, 'r') as outfile:
        train_list = json.load(outfile)
    with open(args.test_json, 'r') as outfile:
        val_list = json.load(outfile)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(args.seed)

    model = CSRNet()
    model = model.cuda()

    criterion = nn.MSELoss(size_average=False).cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    if args.pre:
        if os.path.isfile(args.pre):
            print("=> loading checkpoint '{}'".format(args.pre))
            checkpoint = torch.load(args.pre)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1'].cpu()
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre))

    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    writer = SummaryWriter('runs/{}'.format(args.task))

    train_loader = torch.utils.data.DataLoader(
        dataset.listDataset(train_list,
                            shuffle=True,
                            transform=transforms.Compose([
                                transforms.ToTensor(),
                                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                     std=[0.229, 0.224, 0.225])
                            ]),
                            train=True,
                            batch_size=args.batch_size,
                            num_workers=args.workers),
        batch_size=args.batch_size)
    test_loader = torch.utils.data.DataLoader(
        dataset.listDataset(val_list,
                            shuffle=False,
                            transform=transforms.Compose([
                                transforms.ToTensor(),
                                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                     std=[0.229, 0.224, 0.225])
                            ]),  train=False),
        batch_size=args.batch_size)
    train_loader_len = len(train_loader)
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        train(model, criterion, optimizer, epoch)
        print('Epoch time: {} s'.format(batch_time.sum))
        losses.reset()
        batch_time.reset()
        data_time.reset()

        torch.cuda.empty_cache()

        prec1 = validate(model)

        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best MAE {mae:.3f} '
              .format(mae=best_prec1))

        writer.add_scalar('validation_loss', prec1, epoch)
        for param_group in optimizer.param_groups:
            writer.add_scalar('lr', param_group['lr'], epoch)
            break

        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.pre,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer': optimizer.state_dict()
        }, is_best, args.task, '_' + str(epoch) + '.pth.tar')

    writer.close()