コード例 #1
0
def main():
    args = parse_args()

    net = CrowdCounter(is_cuda)
    network.load_net(args.model_path, net)
    if is_cuda:
        net.cuda()
    print('eval:', net.eval())

    video = cv2.VideoCapture(args.video)
    nframe = 0
    hist = []
    while 1:
        ok, frame = video.read()
        frame = cv2.resize(frame, (0, 0), fx=args.resize_fx, fy=args.resize_fx)
        nframe += 1
        if nframe % args.estimate_rate != 0:
            continue
        print('nframe', nframe)

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        imgarr = image_to_array(gray)

        den, cnt = run(net, imgarr)
        """
        if len(hist) > 5:
            hist.pop(0)
        hist.append(den)
        histden = sum(hist) / len(hist)
        """
        if show(frame, den, cnt):
            break
コード例 #2
0
def main():
    args = parse_args()

    net = CrowdCounter(is_cuda)
    network.load_net(args.model_path, net)
    if is_cuda:
        net.cuda()
    print('eval:', net.eval())

    for filename in args.image_files:
        img = cv2.imread(filename)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        imgarr = image_to_array(gray)
        den, cnt = run(net, imgarr)
        #if show(img, den, cnt): break
        display_results_count(img, den, cnt)
コード例 #3
0
def evaluate_model(trained_model, data_loader):
    net = CrowdCounter()
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0
    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        density_map = net(im_data, gt_data)
        density_map = density_map.data.cpu().numpy()
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))
    mae = mae / data_loader.get_num_samples()
    mse = np.sqrt(mse / data_loader.get_num_samples())
    return mae, mse
コード例 #4
0
data_path = './data/original/shanghaitech/part_A_final/test_data/images/'
gt_path = './data/original/shanghaitech/part_A_final/test_data/ground_truth_csv/'
model_path = './final_models/mcnn_shtechA_2000.h5'

output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
file_results = os.path.join(output_dir,'results_' + model_name + '_.txt')
if not os.path.exists(output_dir):
    os.mkdir(output_dir)
output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
if not os.path.exists(output_dir):
    os.mkdir(output_dir)


net = CrowdCounter()
      
trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
mse = 0.0

#load test data
data_loader = ImageDataLoader(data_path, gt_path, shuffle=False, gt_downsample=True, pre_load=True)

for blob in data_loader:                        
    im_data = blob['data']
    gt_data = blob['gt_density']
    density_map = net(im_data, gt_data)
コード例 #5
0
#Tensorboard  config
use_tensorboard = False
save_exp_name = method + '_' + dataset_name + '_' + 'v1'
remove_all_log = False  # remove all historical experiments in TensorBoard
exp_name = None  # the previous experiment name in TensorBoard

# ------------
rand_seed = 64678
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)

# load net
net = CrowdCounter()
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    net.parameters()),
                             lr=lr)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# tensorboad
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
コード例 #6
0
def main():
    parser = argparse.ArgumentParser(description='mcnn worldexp.')
    parser.add_argument('--preload', type=int, default=1)
    parser.add_argument('--data', type=str, default="/mnt/m2/mzcc/crowd_data/worldexpo", help='train, test, etc')
    args = parser.parse_args()
    method = 'mcnn'
    dataset_name = 'worldexpo'
    output_dir = './saved_models/'

    data_path = args.data
    train_path = data_path+'/train_frame'
    train_gt_path = data_path+'/train_dmap'
    train_mask_path = os.path.join(data_path,'train_roi')
    val_path = data_path+'/test_frame'
    val_gt_path = data_path+'/test_dmap'
    val_mask_path = os.path.join(data_path, 'test_roi')

    #training configuration
    start_step = 0
    end_step = 3000
    lr = 0.000001
    momentum = 0.9
    disp_interval = 500
    log_interval = 250


    #Tensorboard  config
    use_tensorboard = False
    save_exp_name = method + '_' + dataset_name + '_' + 'v1'
    remove_all_log = False   # remove all historical experiments in TensorBoard
    exp_name = None # the previous experiment name in TensorBoard

    # ------------
    rand_seed = 64678
    if rand_seed is not None:
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        torch.cuda.manual_seed(rand_seed)


    # load net
    net = CrowdCounter()
    network.weights_normal_init(net, dev=0.01)
    # network.weights_xavier_init(net, gain=0.01)
    net.cuda()
    net.train()

    params = list(net.parameters())
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # tensorboad
    use_tensorboard = use_tensorboard and CrayonClient is not None
    if use_tensorboard:
        cc = CrayonClient(hostname='127.0.0.1')
        if remove_all_log:
            cc.remove_all_experiments()
        if exp_name is None:
            exp_name = save_exp_name
            exp = cc.create_experiment(exp_name)
        else:
            exp = cc.open_experiment(exp_name)

    # training
    train_loss = 0
    step_cnt = 0
    re_cnt = False
    t = Timer()
    t.tic()

    data_loader = ExrImageDataLoader(train_path, train_gt_path, mask_path=train_mask_path,
                                     shuffle=True, gt_downsample=True, pre_load=args.preload)
    data_loader_val = ExrImageDataLoader(val_path, val_gt_path, mask_path=val_mask_path,
                                         shuffle=False, gt_downsample=True, pre_load=False)
    best_mae = 10000000

    for epoch in range(start_step, end_step+1):
        step = -1
        train_loss = 0
        for blob in data_loader:
            step = step + 1
            im_data = blob['data']
            gt_data = blob['gt_density']
            mask = blob['mask']
            density_map = net(im_data, gt_data, mask=mask)
            loss = net.loss
            train_loss += loss.item()#.data[0]
            step_cnt += 1
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % disp_interval == 0:
                print("current loss: {}".format(loss.item()))
                duration = t.toc(average=False)
                fps = step_cnt / duration
                gt_count = np.sum(gt_data)
                density_map = density_map.data.cpu().numpy()
                et_count = np.sum(density_map)
                utils.save_results(im_data,gt_data,density_map, output_dir)
                log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (epoch,
                    step, 1./fps, gt_count,et_count)
                log_print(log_text, color='green', attrs=['bold'])
                re_cnt = True


            if re_cnt:
                t.tic()
                re_cnt = False

        if (epoch % 2 == 0):
            save_name = os.path.join(output_dir, '{}_{}_{}.h5'.format(method,dataset_name,epoch))
            network.save_net(save_name, net)
            #calculate error on the validation dataset
            mae,mse = evaluate_model(save_name, data_loader_val)
            if mae < best_mae:
                best_mae = mae
                best_mse = mse
                best_model = '{}_{}_{}.h5'.format(method,dataset_name,epoch)
            log_text = 'EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (epoch,mae,mse)
            log_print(log_text, color='green', attrs=['bold'])
            log_text = 'BEST MAE: %0.1f, BEST MSE: %0.1f, BEST MODEL: %s' % (best_mae,best_mse, best_model)
            log_print(log_text, color='green', attrs=['bold'])
            if use_tensorboard:
                exp.add_scalar_value('MAE', mae, step=epoch)
                exp.add_scalar_value('MSE', mse, step=epoch)
                exp.add_scalar_value('train_loss', train_loss/data_loader.get_num_samples(), step=epoch)
コード例 #7
0
def main():
    parser = argparse.ArgumentParser(description='mcnn worldexp.')
    parser.add_argument('--preload', type=int, default=0)
    parser.add_argument('--data',
                        type=str,
                        default="./minibus_data",
                        help='data path')
    parser.add_argument('--model',
                        type=str,
                        default="./saved_models/mcnn_worldexpo_2000.h5",
                        help='model path')
    parser.add_argument('--saveden', type=int, default=1)
    parser.add_argument('--vis', type=int, default=0)

    args = parser.parse_args()
    method = 'mcnn'
    dataset_name = 'minibus'
    output_dir = './saved_models/'

    data_path = args.data
    vis = args.vis

    gt_path = None
    model_path = args.model
    save_output = args.saveden

    output_dir = './output/'
    model_name = os.path.basename(model_path).split('.')[0]
    file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    net = CrowdCounter()

    trained_model = os.path.join(model_path)
    network.load_net(trained_model, net)
    print("loaded: {}".format(trained_model))
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0

    #load test data
    data_loader = ExrImageDataLoader(data_path,
                                     None,
                                     mask_path=-1,
                                     shuffle=False,
                                     gt_downsample=True,
                                     pre_load=args.preload)

    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        mask = blob["mask"]
        density_map = net(im_data, gt_data, mask)
        density_map = density_map.data.cpu().numpy()
        gt_count = np.asscalar(np.sum(gt_data))
        et_count = np.asscalar(np.sum(density_map))
        log_text = 'fname: %s gt_cnt: %4.1f, et_cnt: %4.1f' % (
            blob['fname'], gt_count, et_count)
        print(log_text)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))
        if vis:
            utils.display_results(im_data, gt_data, density_map)
        if save_output:
            utils.save_density_map(
                density_map, output_dir,
                'output_' + blob['fname'].split('.')[0] + '.png')

    mae = mae / data_loader.get_num_samples()
    mse = np.sqrt(mse / data_loader.get_num_samples())
    print('\nMAE: %0.2f, MSE: %0.2f' % (mae, mse))