def eval_test(net, test_path):
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = False 

    output_dir = './output'
    model_path = args.Dataset+ '_trained_model.h5'
    model_name = os.path.basename(model_path).split('.')[0]

    if not os.path.exists(output_dir):
           os.mkdir(output_dir)
    output_dir = os.path.join(output_dir, 'dm_' + model_name)
    if not os.path.exists(output_dir):
            os.mkdir(output_dir)


    trained_model = os.path.join(model_path)
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()

    test_loader = ImageDataLoader(test_path, None, 'test_split', shuffle=False, gt_downsample=True, pre_load=True , Dataset=args.Dataset)

    for blob in test_loader:                        
        im_data = blob['data']
        net.training = False
        density_map = net(im_data)
        density_map = density_map.data.cpu().numpy()
        new_dm= density_map.reshape([ density_map.shape[2], density_map.shape[3] ])
        np.savetxt( output_dir + 'output_' + blob['fname'].split('.')[0] +'.csv', new_dm, delimiter=',', fmt='%.6f')
   
    return net
def evaluate_model_minibatch(trained_model, data_loader):
    # net = CrowdCounter()
    # net = CrowdCounter_counterr()
    # net = CrowdCounter_cnterr_l1()
    net = CrowdCounter_cnterr_l1_out_minibatch()
    # net = torch.nn.DataParallel(CrowdCounter_cnterr_l1_out(), device_ids=[4, 5])

    network.load_net(trained_model, net)

    net.cuda()

    net.eval()
    mae = 0.0
    mse = 0.0
    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        density_map = net(im_data, gt_data)
        density_map = density_map.data.cpu().numpy()
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))
    mae = mae / data_loader.get_num_samples()
    mse = np.sqrt(mse / data_loader.get_num_samples())
    return mae, mse
Exemple #3
0
    def init_model(self, model_path=None):
        if model_path is not None:
            network.load_net(model_path, self.model)
        else:
            network.weights_normal_init(self.model, dev=1e-6)
            # network.load_net('../../pruned_VGG.h5', self.model.front_end, skip=True)
            # network.load_net("../../vgg16.h5", self.model.front_end, skip=True)

        def calpara(model):
            print('---------- Networks initialized -------------')
            num_params = 0
            for param in model.parameters():
                num_params += param.numel()
            print('[Network] Total number of parameters : %.3f M' %
                  (num_params / 1e6))
            print('-----------------------------------------------')

        calpara(self.model)

        network.weights_normal_init(self.loss_fn_, dev=0.01)

        if len(self.opt.gpus) > 0:
            assert (torch.cuda.is_available())
            self.model.to(self.device)
            #self.model = torch.nn.DataParallel(self.model, self.opt.gpus)  # multi-GPUs
            if self.opt.loss is not None and 'SSIM' in self.opt.loss:
                self.loss_fn_.to(self.device)
                #self.loss_fn = torch.nn.DataParallel(self.loss_fn_, self.opt.gpus)  # multi-GPUs
            else:
                self.loss_fn = self.loss_fn_
def evaluate_model(trained_model, data_loader):
    #net = CrowdCounter_cnterr_l1_out()
    net = CrowdCounter_cnterr_LP()
    network.load_net(trained_model, net)

    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0

    # no gradient calculation
    with torch.no_grad():
        for blob in data_loader:
            im_data = blob['data']
            gt_data = blob['gt_density']
            im_data = im_data.cuda()
            gt_data = gt_data.cuda()
            density_map = net(im_data, gt_data)
            density_map = density_map.data.cpu().numpy()
            gt_data = gt_data.data.cpu().numpy()
            gt_count = np.sum(gt_data)
            et_count = np.sum(density_map)
            mae += abs(gt_count - et_count)
            mse += ((gt_count - et_count) * (gt_count - et_count))
        mae = mae / data_loader.get_num_samples()
        mse = np.sqrt(mse / data_loader.get_num_samples())
        return mae, mse
Exemple #5
0
def pre_MCNN():

    MCNN_model_path = './saved_models/mcnn_shtechA_58.h5' #MCNN模型路径
    net = CrowdCounter()
    trained_model = os.path.join(MCNN_model_path)
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    return net
Exemple #6
0
    def __init__(self):
        test_model_path = r'E:\PycharmProjects\crowdcount_8\best_models\model_20190318_DPLNet\shanghaitechA\pool_4_64.06\saved_models_shtA\shtechA_13.h5'
        # test_model_path = r'.\best_models\model_20190318_DPLNet\trancos\pool_4_3.08_4.39_5.79_7.62\saved_models_trancos\trancos_889.h5'

        self.alpha = 0.5

        self.net = CrowdCount()
        network.load_net(test_model_path, self.net)
        self.net.cuda()
        self.net.eval()
Exemple #7
0
def evaluate_model(trained_model, data_loader):
    net = CrowdCounter()
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0
    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        density_map = net.forward(im_data, gt_data)
        density_map = density_map.data.cpu().numpy()
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))
    mae = mae / data_loader.get_num_samples()
    mse = np.sqrt(mse / data_loader.get_num_samples())
    return mae, mse
Exemple #8
0
import os
import torch
import numpy as np
import cv2
from src.utils import gray_to_bgr
from src.crowd_count import CrowdCount
from src import network
from src.utils import ndarray_to_tensor

alpha = 0.5

test_model_path = test_model_path = r'./saved_models_shtA/shtechA_31_5217.h5'
net = CrowdCount()
network.load_net(test_model_path, net)

net.cuda()
net.eval()

# cap = cv2.VideoCapture(r'E:\PycharmProjects\data\video\MVI_1582.MOV')
cap = cv2.VideoCapture(r'E:\PycharmProjects\data\video\DJI_0001.MOV')
# cap = cv2.VideoCapture(0)
if not cap.isOpened():
    raise Exception('can not connect to camera')

out = cv2.VideoWriter('test_cam_output.mp4', cv2.VideoWriter_fourcc(*'H264'),
                      30.0, (1920, 1080))

index = 0

# calculate error on the test dataset
while cap.isOpened():
Exemple #9
0
model_path = 'C:/Users/jalee/Desktop/FYP/saved_models/mall/MCNN-final-1/MCNN-final-1_mall_6_crop_50.h5'
#model_path = 'C:/Users/jalee/Desktop/FYP/saved_models/schtechA/MCNN-ver3/MCNN-ver3_schtechA_35_crop_50.h5'
#model_path = 'C:/Users/jalee/Desktop/FYP/saved_models/schtechA/MCNN-ver2/MCNN-ver2_schtechA_54_crop_50.h5'
#input_video_path = 'C:/Users/jalee/Desktop/FYP/test/video/output.mp4'

input_video_path = 'C:/Users/jalee/Desktop/FYP/Test1.mp4'

vs = FileVideoStream(input_video_path).start()
# vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()

net = CrowdCounter_cnterr_LP()
net.cuda()
network.load_net(model_path, net)

frame_num = 0
clear_count = 0
array_init = []
array_final = []
path_list = []
new_pts_added = []

min_dist = 8
max_dist = 30

while True:
    frame = vs.read()
    frame = cv2.resize(frame, (640, 480))  # 720p - 1280 x 720
    unedited_frame = frame.copy()
Exemple #10
0
def open_model(model_path):
    model = CrowdCounter()
    network.load_net(model_path, model)
    model.eval()
    return model
def evaluate_model(model_path, data):
    net = CrowdCount()
    network.load_net(model_path, net)
    net.cuda()
    net.eval()

    build_ssim = SSIM(window_size=11)

    game = GridAverageMeanAbsoluteError()

    mae = 0.0
    mse = 0.0
    psnr = 0.0
    ssim = 0.0
    game_0 = 0.0
    game_1 = 0.0
    game_2 = 0.0
    game_3 = 0.0
    index = 0

    for blob in data:
        image_data = blob['image']
        ground_truth_data = blob['density']
        roi = blob['roi']
        # filename = blob['filename']

        if image_data.shape[0] != 1:
            raise Exception('invalid image batch size (%d) for evaluation' %
                            image_data.shape[0])

        estimate_map, _ = net(image_data, roi=roi)

        ground_truth_data = ground_truth_data.data.cpu().numpy()
        density_map = estimate_map.data.cpu().numpy()

        ground_truth_count = np.sum(ground_truth_data)
        estimate_count = np.sum(density_map)

        mae += abs(ground_truth_count - estimate_count)
        mse += (ground_truth_count - estimate_count)**2
        psnr += build_psnr(ground_truth_data, density_map)
        ssim += build_ssim(ndarray_to_tensor(ground_truth_data),
                           ndarray_to_tensor(density_map)).item()
        game_0 += game.calculate_error(ground_truth_data, density_map, 0)
        game_1 += game.calculate_error(ground_truth_data, density_map, 1)
        game_2 += game.calculate_error(ground_truth_data, density_map, 2)
        game_3 += game.calculate_error(ground_truth_data, density_map, 3)
        index += 1

    result_dict = dict()
    result_dict['name'] = os.path.basename(model_path)
    result_dict['number'] = int(index)
    result_dict['mae'] = float(mae / index)
    result_dict['mse'] = float(np.sqrt(mse / index))
    result_dict['psnr'] = float(psnr / index)
    result_dict['ssim'] = float(ssim / index)
    result_dict['game_0'] = float(game_0 / index)
    result_dict['game_1'] = float(game_1 / index)
    result_dict['game_2'] = float(game_2 / index)
    result_dict['game_3'] = float(game_3 / index)

    return result_dict
Exemple #12
0
    f.write('MAE: %0.2f, MSE: %0.2f' % (mae,mse))
    f.close()


if __name__ == '__main__':

    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = False

    # data_path = './data/original/shanghaitech/part_B_final/test_data/images/'
    # gt_path = './data/original/shanghaitech/part_B_final/test_data/ground_truth_csv/'

    data_path = './data/original/shanghaitech/part_A_final/test_data/images/'
    gt_path = './data/original/shanghaitech/part_A_final/test_data/ground_truth_csv/'

    model_path = './Shanghai_A_Retrain_eps_1_5/saved_models//MCNN_Shanghai_A.h5'

    model = CrowdCounter()
    trained_model = os.path.join(model_path)
    network.load_net(trained_model, model)
    model.to(device)
    model.eval()

    data_loader = ImageDataLoader(data_path, gt_path, shuffle=False, gt_downsample=True, pre_load=True)

    eps_list = [0, 0.25, 0.5, 0.75, 1]

    print(model_path)

    epoch_robust_bound(data_loader, model, device, epsilon_try=0.25)
Exemple #13
0
# load net
# net = CrowdCounter()
# net = CrowdCounter_cnterr_l1_out()
# net = CrowdCounter_cnterr_LA()
net = CrowdCounter_cnterr_LP()

use_model = True
if use_model:
    #model = 'C:/Users/jalee/Desktop/FYP/saved_models/UCSD/MCNN-no_prior_training/MCNN-no_prior_training_UCSD_3_crop_50.h5'
    #model = 'C:/Users/jalee/Desktop/FYP/saved_models/schtechA/MCNN-ver2/MCNN-ver2_schtechA_54_crop_50.h5'
    #model = 'C:/Users/jalee/Desktop/FYP/saved_models/schtechA/MCNN-ver1/MCNN-ver1_schtechA_43_crop_50.h5'
    #model = 'C:/Users/jalee/Desktop/FYP/saved_models/schtechA/MCNN-ver3/MCNN-ver3_schtechA_35_crop_50.h5'
    model = 'C:/Users/jalee/Desktop/FYP/saved_models/mall/MCNN-final-1/MCNN-final-1_mall_35_crop_50.h5'
    #model = 'C:/Users/jalee/Desktop/FYP/saved_models/mall/MCNN-final-2/MCNN-final-2_mall_191_crop_50.h5'

    network.load_net(model, net)
else:
    network.weights_normal_init(net, dev=0.01)


# load pretained model
# model_path = './final_models/acspnet_shtechA_400_crop_9.h5'

# single branch loading
# trained_model = os.path.join(model_path)
# network.load_net(trained_model, net)

net.cuda()
net.train()

params = list(net.parameters())
Exemple #14
0
def testimage(modelname, camname):
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = False
    vis = False
    save_output = False

    #test data and model file path
    if camname == 0:
        data_path = '../data/test/images/'
    else:
        data_path = '../data/test/images2/'

    if modelname == 'A':
        model_path = './final_models/cmtl_shtechA_204.h5'
    else:
        model_path = './final_models/cmtl_shtechB_768.h5'
    print("Model name:", modelname, " Camname: ", camname)
    gt_flag = False
    if gt_flag:
        gt_path = '../dataset/ShanghaiTech/part_A/test_data/ground_truth/'

    # =============================================================================
    # for i in range(1, 4):
    #     gt_name = os.path.join(gt_path,'img_' + format(i, '04') + '_ann.mat')
    #     print(gt_name)
    #     x = loadmat(gt_name)
    #     print (len(x['annPoints']))
    #
    # =============================================================================
    output_dir = './output/'

    model_name = os.path.basename(model_path).split('.')[0]
    file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    #load test data
    data_loader = ImageDataLoader(data_path,
                                  shuffle=False,
                                  gt_downsample=True,
                                  pre_load=True)

    net = CrowdCounter()

    trained_model = os.path.join(model_path)
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0
    i = 1
    #df = pd.read_csv("../etcount.csv")
    #df = df.set_index('IMG_NAME')
    #df['GROUND_TRUTH'] = 0.0
    #df['MTL-v4-A10'] = 0.0

    for blob in data_loader:
        if gt_flag:
            gt_name = os.path.join(
                gt_path, 'GT_' + format(blob['fname'].split('.')[0]) + '.mat')
            x = loadmat(gt_name)
            #gt_count = len(x['image_info'][0][0][0][0][0])
            #df.at[blob['fname'].split('.')[0], 'GROUND_TRUTH'] = gt_count
            i += 1
        im_data = blob['data']
        density_map = net(im_data)
        density_map = density_map.data.cpu().numpy()
        x = len(density_map[0][0])
        y = len(density_map[0][0][0])
        half = (int)(x / 2)
        density_map1 = density_map[0][0][0:half][:]
        density_map2 = density_map[0][0][half:][:]

        print(x, y)
        et_c1 = np.sum(density_map1)
        et_c2 = np.sum(density_map2)
        side = 'none'
        if et_c1 > et_c2:
            side = 'right'
        else:
            side = 'left'
        print(et_c1, et_c2)
        et_count = np.sum(density_map)

        print(blob['fname'].split('.')[0], ' Model Estimated count : ',
              et_count)
        #df.at[blob['fname'].split('.')[0], 'MTL-v4-A'] = et_count
        if vis:
            utils.display_results(im_data, density_map)
        if save_output:
            utils.save_density_map(
                density_map, output_dir,
                'output_' + blob['fname'].split('.')[0] + '.png')

    return (et_count, side)

    #df.to_csv('../etcount.csv')


#testimage('A', 1)
def single_img_estimate(input_path):
    data_path = input_path
    gt_path = './data/formatted_trainval/mall_dataset/rgb_val_den/' + input_path.split(
        '/')[-1].replace('.jpg', '.csv')
    # branch pre-train
    model_path = './final_models/mcnn_mall_perspective_28_ms.h5'

    output_dir = './demo_output/'
    gt_dir = './demo_gt/'
    model_name = os.path.basename(model_path).split('.')[0]
    file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    if not os.path.exists(gt_dir):
        os.mkdir(gt_dir)
    gt_dir = os.path.join(gt_dir, 'density_maps_' + model_name)
    if not os.path.exists(gt_dir):
        os.mkdir(gt_dir)

    net = CrowdCounter()

    trained_model = os.path.join(model_path)
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0

    # load test data
    # downsample = True
    data_loader = SingleImageDataLoader(data_path,
                                        gt_path,
                                        shuffle=False,
                                        gt_downsample=True,
                                        pre_load=False)

    # downsample = False
    # data_loader = ImageDataLoader(data_path, gt_path, shuffle=False, gt_downsample=True, pre_load=False)

    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        t.tic()
        density_map = net(im_data, gt_data, False)
        density_map = density_map.data.cpu().numpy()
        duration = t.toc()
        print("time duration:" + str(duration))
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))
        if vis:
            utils.display_results(im_data, gt_data, density_map)
        if save_output:
            utils.save_demo_density_map(
                density_map, output_dir,
                'output_' + blob['fname'].split('.')[0] + '.png')

            gt_data = 255 * gt_data / np.max(gt_data)
            gt_data = gt_data.astype(np.uint8)
            gt_data = cv2.applyColorMap(gt_data, cv2.COLORMAP_JET)
            cv2.imwrite(
                os.path.join(gt_dir,
                             'gt_' + blob['fname'].split('.')[0] + '.png'),
                gt_data)

        print('\nMAE: %0.2f, MSE: %0.2f' % (mae, mse))
        f = open(file_results, 'w')
        f.write('MAE: %0.2f, MSE: %0.2f' % (mae, mse))
        f.close()

        return (output_dir + '/output_' + blob['fname'].split('.')[0] + '.png',
                gt_dir + '/gt_' + blob['fname'].split('.')[0] + '.png', mae,
                mse, gt_count, et_count)
Exemple #16
0
gt_path = './data-oilpalm/oilpalm-test/ground_truth_csv/'
model_path = './oilpalm_saved_models/mcnn_oilpalm_70.h5'

output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
if not os.path.exists(output_dir):
    os.mkdir(output_dir)
output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

net = CrowdCounter()

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
rmse = 0.0
mrmse = 0.0

# load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=True)

for blob in data_loader:
    im_data = blob['data']
Exemple #17
0
def single_img_estimate(input_img):
    self_data_path = input_img
    # print('get data path!')
    gt_path = './data/mall_demo/rgb_den/' + self_data_path.split(
        '/')[-1].replace('.jpg', '.csv')
    # print('get gt path!')
    # branch pre-train
    model_path = './final_models/ms_pool_deconv_skip_mall_60_ms.h5'

    self_output_dir = './data/mall_demo/demo_output/'
    self_gt_dir = './data/mall_demo/demo_gt/'

    model_name = os.path.basename(model_path).split('.')[0]
    file_results = os.path.join(self_output_dir,
                                'results_' + model_name + '.txt')
    if not os.path.exists(self_output_dir):
        os.mkdir(self_output_dir)
    self_output_dir = os.path.join(self_output_dir, 'den_' + model_name)
    if not os.path.exists(self_output_dir):
        os.mkdir(self_output_dir)

    if not os.path.exists(self_gt_dir):
        os.mkdir(self_gt_dir)
    gt_dir = os.path.join(self_gt_dir, 'gt_' + model_name)
    if not os.path.exists(self_gt_dir):
        os.mkdir(self_gt_dir)

    # print('mkdir done!')
    net = CrowdCounter_cnterr_l1_out()

    trained_model = os.path.join(model_path)
    network.load_net(trained_model, net)
    net.cuda(3)
    net.eval()
    mae = 0.0
    mse = 0.0

    # load test data
    # downsample = True
    # data_loader = ImageDataLoader(data_path, gt_path, shuffle=False, gt_downsample=True, pre_load=False)

    # downsample = False
    data_loader = SingleImageDataLoader(self_data_path,
                                        gt_path,
                                        shuffle=False,
                                        gt_downsample=False,
                                        pre_load=False)

    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        t.tic()
        density_map = net(im_data, gt_data, is_training=False)
        density_map = density_map.data.cpu().numpy()
        duration = t.toc()
        print("time duration:" + str(duration))
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))
        if vis:
            utils.display_results(im_data, gt_data, density_map)
        if save_output:
            utils.save_demo_density_map(
                density_map, self_output_dir,
                'output_' + blob['fname'].split('.')[0] + '.png')

            gt_data = 255 * gt_data / np.max(gt_data)
            gt_data = gt_data.astype(np.uint8)
            gt_data = cv2.applyColorMap(gt_data, cv2.COLORMAP_JET)
            cv2.imwrite(
                os.path.join(self_gt_dir,
                             'gt_' + blob['fname'].split('.')[0] + '.png'),
                gt_data)

        print('\nMAE: %0.2f, MSE: %0.2f' % (mae, mse))
        # f = open(file_results, 'w')
        # f.write('MAE: %0.2f, MSE: %0.2f' % (mae, mse))
        # f.close()
        # et_path = self.output_dir + 'output_' + blob['fname'].split('.')[0] + '.png'
        # gt_path = self.gt_dir+'gt_'+blob['fname'].split('.')[0]+'.png'

        return (self_output_dir + '/output_' + blob['fname'].split('.')[0] +
                '.png',
                self_gt_dir + '/gt_' + blob['fname'].split('.')[0] + '.png',
                mae, mse, gt_count, et_count)
                              train_gt_path_list,
                              shuffle=True,
                              gt_downsample=False,
                              pre_load=True)
class_wts = data_loader.get_classifier_weights()
data_loader_val = ImageDataLoader(val_path_list,
                                  val_gt_path_list,
                                  shuffle=False,
                                  gt_downsample=False,
                                  pre_load=True)

#load net and initialize it
net = CrowdCounter(ce_weights=class_wts)
# network.weights_normal_init(net, dev=0.01)
network.load_net(
    '/home/yangxu/Experiments/crowdcount-cascaded-mtl/UCF_partB/final_models/cmtl_ucf_partB_1172.h5',
    net)
net.cuda()
net.train()

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    net.parameters()),
                             lr=lr)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# tensorboad
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard: