示例#1
0
def evaluate_model(trained_model,
                   data_loader,
                   save_test_results=False,
                   plot_save_dir="/tmp/"):
    net = CrowdCounter()
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0

    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        idx_data = blob['idx']
        im_data_norm = im_data / 255.0
        density_map = net(im_data_norm, gt_data)
        density_map = density_map.data.cpu().numpy()
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))

        if save_test_results:
            print("Plotting results")
            mkdir_if_missing(plot_save_dir)
            save_results(im_data, gt_data, density_map, idx_data,
                         plot_save_dir)
    mae = mae / data_loader.get_num_samples()
    mse = np.sqrt(mse / data_loader.get_num_samples())
    return mae, mse
示例#2
0
def evaluate_model(trained_model,
                   data_loader,
                   epoch=0,
                   save_test_results=False,
                   plot_save_dir="/tmp/",
                   den_factor=1e3):
    net = CrowdCounter()
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0

    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        idx_data = blob['idx']
        new_shape = blob['new_shape']
        orig_shape = blob['orig_shape']
        im_data_norm = im_data / 127.5 - 1.  #normalize between -1 and 1
        gt_data = gt_data * den_factor

        density_map = net(im_data_norm, epoch=epoch)
        density_map = density_map.data.cpu().numpy()
        density_map /= den_factor
        gt_data /= den_factor
        im_data, gt_data = data_loader.recontruct_test(im_data, gt_data,
                                                       orig_shape, new_shape)
        _, density_map = data_loader.recontruct_test(im_data_norm, density_map,
                                                     orig_shape, new_shape)
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        print("image {} gt {:.3f} es {:.3f}".format(idx_data[0], gt_count,
                                                    et_count))
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))

        if save_test_results:
            print("Plotting results")
            mkdir_if_missing(plot_save_dir)
            utils.save_results(im_data, gt_data, density_map, idx_data,
                               plot_save_dir)

    mae = mae / data_loader.get_num_samples()
    mse = np.sqrt(mse / data_loader.get_num_samples())
    return mae, mse
示例#3
0
文件: train.py 项目: EillotY/paper
def train(train_test_unit, out_dir_root):
    output_dir = osp.join(out_dir_root, train_test_unit.metadata['name'])
    mkdir_if_missing(output_dir)
    sys.stdout = Logger(osp.join(output_dir, 'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    dataset_name = train_test_unit.metadata['name']
    train_path = train_test_unit.train_dir_img
    train_gt_path = train_test_unit.train_dir_den
    val_path = train_test_unit.test_dir_img
    val_gt_path = train_test_unit.test_dir_den

    # training configuration
    start_step = args.start_epoch
    end_step = args.max_epoch
    lr = args.lr

    # log frequency
    disp_interval = args.train_batch * 20

    # ------------
    rand_seed = args.seed
    if rand_seed is not None:
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        torch.cuda.manual_seed(rand_seed)

    # load net
    net = CrowdCounter()
    if not args.resume:
        network.weights_normal_init(net, dev=0.01)
    else:
        # network.weights_normal_init(net, dev=0.01) #init all layers in case of partial net load
        if args.resume[-3:] == '.h5':
            pretrained_model = args.resume
        else:
            resume_dir = osp.join(args.resume, pu.metadata['name'])
            pretrained_model = osp.join(resume_dir, 'best_model.h5')
        network.load_net(pretrained_model, net)
        print('Will apply fine tunning over', pretrained_model)
    net.cuda()
    net.train()

    optimizer_d_large = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                                net.d_large.parameters()),
                                         lr=lr,
                                         betas=(args.beta1, args.beta2))
    optimizer_d_small = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                                net.d_small.parameters()),
                                         lr=lr,
                                         betas=(args.beta1, args.beta2))
    optimizer_g_large = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                                net.g_large.parameters()),
                                         lr=lr,
                                         betas=(args.beta1, args.beta2))
    optimizer_g_small = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                                net.g_small.parameters()),
                                         lr=lr,
                                         betas=(args.beta1, args.beta2))

    # training
    train_loss = 0
    step_cnt = 0
    re_cnt = False
    t = Timer()
    t.tic()

    # preprocess flags
    overlap_test = True if args.overlap_test else False

    data_loader = ImageDataLoader(train_path,
                                  train_gt_path,
                                  shuffle=True,
                                  batch_size=args.train_batch,
                                  test_loader=False)
    data_loader_val = ImageDataLoader(val_path,
                                      val_gt_path,
                                      shuffle=False,
                                      batch_size=1,
                                      test_loader=True,
                                      img_width=args.size_x,
                                      img_height=args.size_y,
                                      test_overlap=overlap_test)
    best_mae = sys.maxsize

    for epoch in range(start_step, end_step + 1):
        step = 0
        train_loss_gen_small = 0
        train_loss_gen_large = 0
        train_loss_dis_small = 0
        train_loss_dis_large = 0

        for blob in data_loader:
            step = step + args.train_batch
            im_data = blob['data']
            gt_data = blob['gt_density']
            idx_data = blob['idx']
            im_data_norm = im_data / 127.5 - 1.  # normalize between -1 and 1
            gt_data = gt_data * args.den_factor

            optimizer_d_large.zero_grad()
            optimizer_d_small.zero_grad()
            density_map = net(im_data_norm,
                              gt_data,
                              epoch=epoch,
                              mode="discriminator")
            loss_d_small = net.loss_dis_small
            loss_d_large = net.loss_dis_large
            loss_d_small.backward()
            loss_d_large.backward()
            optimizer_d_small.step()
            optimizer_d_large.step()

            optimizer_g_large.zero_grad()
            optimizer_g_small.zero_grad()
            density_map = net(im_data_norm,
                              gt_data,
                              epoch=epoch,
                              mode="generator")
            loss_g_small = net.loss_gen_small
            loss_g_large = net.loss_gen_large
            loss_g = net.loss_gen
            loss_g.backward()  # loss_g_large + loss_g_small
            optimizer_g_small.step()
            optimizer_g_large.step()

            density_map /= args.den_factor
            gt_data /= args.den_factor

            train_loss_gen_small += loss_g_small.data.item()
            train_loss_gen_large += loss_g_large.data.item()
            train_loss_dis_small += loss_d_small.data.item()
            train_loss_dis_large += loss_d_large.data.item()

            step_cnt += 1
            if step % disp_interval == 0:
                duration = t.toc(average=False)
                fps = step_cnt / duration
                density_map = density_map.data.cpu().numpy()
                train_batch_size = gt_data.shape[0]
                gt_count = np.sum(gt_data.reshape(train_batch_size, -1),
                                  axis=1)
                et_count = np.sum(density_map.reshape(train_batch_size, -1),
                                  axis=1)

                if args.save_plots:
                    plot_save_dir = osp.join(output_dir, 'plot-results-train/')
                    mkdir_if_missing(plot_save_dir)
                    utils.save_results(im_data,
                                       gt_data,
                                       density_map,
                                       idx_data,
                                       plot_save_dir,
                                       loss=args.loss)

                print(
                    "epoch: {0}, step {1}/{5}, Time: {2:.4f}s, gt_cnt: {3:.4f}, et_cnt: {4:.4f}, mean_diff: {6:.4f}"
                    .format(epoch, step, 1. / fps, gt_count[0], et_count[0],
                            data_loader.num_samples,
                            np.mean(np.abs(gt_count - et_count))))
                re_cnt = True

            if re_cnt:
                t.tic()
                re_cnt = False

        save_name = os.path.join(
            output_dir, '{}_{}_{}.h5'.format(train_test_unit.to_string(),
                                             dataset_name, epoch))
        network.save_net(save_name, net)

        # calculate error on the validation dataset
        mae, mse = evaluate_model(save_name,
                                  data_loader_val,
                                  epoch=epoch,
                                  den_factor=args.den_factor)
        if mae < best_mae:
            best_mae = mae
            best_mse = mse
            best_model = '{}_{}_{}.h5'.format(train_test_unit.to_string(),
                                              dataset_name, epoch)
            network.save_net(os.path.join(output_dir, "best_model.h5"), net)

        print(
            "Epoch: {0}, MAE: {1:.4f}, MSE: {2:.4f}, loss gen small: {3:.4f}, loss gen large: {4:.4f}, loss dis small: {5:.4f}, loss dis large: {6:.4f}, loss: {7:.4f}"
            .format(
                epoch, mae, mse, train_loss_gen_small, train_loss_gen_large,
                train_loss_dis_small, train_loss_dis_large,
                train_loss_gen_small + train_loss_gen_large +
                train_loss_dis_small + train_loss_dis_large))
        print("Best MAE: {0:.4f}, Best MSE: {1:.4f}, Best model: {2}".format(
            best_mae, best_mse, best_model))
示例#4
0
def evaluate_model(trained_model,
                   data_loader,
                   model,
                   save_test_results=False,
                   plot_save_dir="/tmp/",
                   den_scale_factor=1e3,
                   cam=False,
                   **kwargs):
    mkdir_if_missing(plot_save_dir)
    net = CrowdCounter(model=model, channel_param=kwargs['channel_param'])
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0
    if cam:
        net.net.cam = True
    res = {
        'img': [],
        'et': [],
        'gt': [],
        'abs_err': [],
        'err^2': [],
        'err/gt': []
    }

    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        idx_data = blob['idx']
        im_data_norm = im_data / 127.5 - 1.
        density_map = net(im_data_norm)
        if cam in ['grad-cam', 'all']:
            apply_cam(net, blob, density_map,
                      osp.join(plot_save_dir, 'grad-cam'), 'grad-cam')
        if cam in ['grad-cam++', 'all']:
            apply_cam(net, blob, density_map,
                      osp.join(plot_save_dir, 'grad-cam++'), 'grad-cam++')
        #if cam in ['eigen-cam', 'all']:
        #    apply_cam(net, blob, density_map, osp.join(plot_save_dir, 'eigen-cam'), 'eigen-cam')
        if cam in ['layer-cam', 'all']:
            apply_cam(net, blob, density_map,
                      osp.join(plot_save_dir, 'layer-cam'), 'layer-cam')
        density_map = density_map.data.cpu().numpy()
        density_map /= den_scale_factor
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))

        res['img'].append(idx_data)
        res['et'].append(et_count)
        res['gt'].append(gt_count)
        res['abs_err'].append(abs(gt_count - et_count))
        res['err^2'].append((gt_count - et_count) * (gt_count - et_count))
        res['err/gt'].append(abs(gt_count - et_count) / gt_count)

        if save_test_results:
            print("Plotting results")
            mkdir_if_missing(plot_save_dir)
            save_results(im_data, gt_data, density_map, idx_data,
                         plot_save_dir)
    mae = mae / data_loader.get_num_samples()
    mse = np.sqrt(mse / data_loader.get_num_samples())
    df = pd.DataFrame(res).sort_values('err/gt', axis=0, ascending=False)
    df.to_csv(osp.join(plot_save_dir, 'error_by_img.csv'),
              sep=',',
              index=False,
              float_format='%.5f')

    return mae, mse