Пример #1
0
    def getCrowdCount(self, img_path):
        # Read image files
        im = loadImage(img_path, color=self.is_colored)

        if self.resize_im > 0:
            # Resize image
            im = utl.resizeMaxSize(im, self.resize_im)

        s = time.time()
        #	print 'pw:',self.pw
        npred, resImg, realmap = testOnImg(self.CNN, im, self.pw, self.mask)
        return npred, time.time() - s
Пример #2
0
def main(argv):
    # Init parameters
    use_cpu = False
    gpu_dev = 0

    # GAME max level
    mx_game = 4  # Max game target

    # Batch size
    b_size = -1

    # CNN vars
    prototxt_path = 'models/trancos/hydra2/hydra_deploy.prototxt'
    caffemodel_path = 'models/trancos/hydra2/trancos_hydra2.caffemodel'

    # Get parameters
    try:
        opts, _ = getopt.getopt(
            argv, "h:",
            ["prototxt=", "caffemodel=", "cpu_only", "dev=", "cfg="])
    except getopt.GetoptError as err:
        print "Error while parsing parameters: ", err
        dispHelp(argv[0])
        return

    for opt, arg in opts:
        if opt == '-h':
            dispHelp(argv[0])
            return
        elif opt in ("--prototxt"):
            prototxt_path = arg
        elif opt in ("--caffemodel"):
            caffemodel_path = arg
        elif opt in ("--cpu_only"):
            use_cpu = True
        elif opt in ("--dev"):
            gpu_dev = int(arg)
        elif opt in ("--cfg"):
            cfg_file = arg

    print "Loading configuration file: ", cfg_file
    (dataset, use_mask, mask_file, test_names_file, im_folder, dot_ending, pw,
     sigmadots, n_scales, perspective_path, use_perspective, is_colored,
     results_file, resize_im) = initTestFromCfg(cfg_file)

    print "Choosen parameters:"
    print "-------------------"
    print "Use only CPU: ", use_cpu
    print "GPU devide: ", gpu_dev
    print "Dataset: ", dataset
    print "Results files: ", results_file
    print "Test data base location: ", im_folder
    print "Test inmage names: ", test_names_file
    print "Dot image ending: ", dot_ending
    print "Use mask: ", use_mask
    print "Mask pattern: ", mask_file
    print "Patch width (pw): ", pw
    print "Sigma for each dot: ", sigmadots
    print "Number of scales: ", n_scales
    print "Perspective map: ", perspective_path
    print "Use perspective:", use_perspective
    print "Prototxt path: ", prototxt_path
    print "Caffemodel path: ", caffemodel_path
    print "Batch size: ", b_size
    print "Resize images: ", resize_im
    print "==================="

    print "----------------------"
    print "Preparing for Testing"
    print "======================"

    # Set GPU CPU setting
    if use_cpu:
        caffe.set_mode_cpu()
    else:
        # Use GPU
        caffe.set_device(gpu_dev)
        caffe.set_mode_gpu()

    print "Reading perspective file"
    if use_perspective:
        pers_file = h5py.File(perspective_path, 'r')
        pmap = np.array(pers_file['pmap'])
        pers_file.close()

    mask = None
    if dataset == 'UCSD':
        print "Reading mask"
        if use_mask:
            mask_f = h5py.File(mask_file, 'r')
            mask = np.array(mask_f['mask'])
            mask_f.close()

    print "Reading image file names:"
    im_names = np.loadtxt(test_names_file, dtype='str')

    # Perform test
    ntrueall = []
    npredall = []

    # Init GAME
    n_im = len(im_names)
    game_table = np.zeros((n_im, mx_game))

    # Init CNN
    CNN = CaffePredictor(prototxt_path, caffemodel_path, n_scales)

    print
    print "Start prediction ..."
    count = 0
    gt_vector = np.zeros((len(im_names)))
    pred_vector = np.zeros((len(im_names)))

    for ix, name in enumerate(im_names):
        # Get image paths
        im_path = utl.extendName(name, im_folder)
        dot_im_path = utl.extendName(name,
                                     im_folder,
                                     use_ending=True,
                                     pattern=dot_ending)

        # Read image files
        im = loadImage(im_path, color=is_colored)
        dot_im = loadImage(dot_im_path, color=True)

        # Generate features
        if use_perspective:
            dens_im = genPDensity(dot_im, sigmadots, pmap)
        else:
            dens_im = genDensity(dot_im, sigmadots)

        if resize_im > 0:
            # Resize image
            im = utl.resizeMaxSize(im, resize_im)
            gt_sum = dens_im.sum()
            dens_im = utl.resizeMaxSize(dens_im, resize_im)
            dens_im = dens_im * gt_sum / dens_im.sum()

        # Get mask if needed
        if dataset != 'UCSD':
            if use_mask:
                mask_im_path = utl.extendName(name,
                                              im_folder,
                                              use_ending=True,
                                              pattern=mask_file)
                mask = sio.loadmat(mask_im_path,
                                   chars_as_strings=1,
                                   matlab_compatible=1)
                mask = mask.get('BW')

        s = time.time()
        ntrue, npred, resImg, gtdots = testOnImg(CNN, im, dens_im, pw, mask)
        print "image : %d , ntrue = %.2f ,npred = %.2f , time =%.2f sec" % (
            count, ntrue, npred, time.time() - s)

        # Keep individual predictions
        gt_vector[ix] = ntrue
        pred_vector[ix] = npred

        # Hold predictions and originasl
        ntrueall.append(ntrue)
        npredall.append(npred)

        # Compute game metric
        for l in range(mx_game):
            game_table[count, l] = gameMetric(resImg, gtdots, l)

        count = count + 1

    ntrueall = np.asarray(ntrueall)
    npredall = np.asarray(npredall)
    print "done ! mean absolute error %.2f" % np.mean(
        np.abs(ntrueall - npredall))

    # Print Game results
    results = np.zeros(mx_game)
    for l in range(mx_game):
        results[l] = np.mean(game_table[:, l])
        print "GAME for level %d: %.2f " % (l, np.mean(game_table[:, l]))

    # Dump results into a txt file
    np.savetxt(results_file + '_pred.txt', npredall)
    np.savetxt(results_file + '_gt.txt', ntrueall)

    return 0
Пример #3
0
def main(argv):
    # Init cfg file
    cfg_file = ''

    # Get parameters
    try:
        opts, _ = getopt.getopt(argv, "h:", ["cfg="])
    except getopt.GetoptError:
        dispHelp()
        return

    for opt, arg in opts:
        if opt == '-h':
            dispHelp(argv[0])
            return
        elif opt in ("--cfg"):
            cfg_file = arg

    print "Loading configuration file: ", cfg_file
    (dataset, im_folder, im_list_file, output_file, feature_file_path,
     dot_ending, pw_base, pw_norm, pw_dens, sigmadots, Nr, n_scales,
     split_size, do_flip, perspective_path, use_perspective, is_colored,
     resize_im) = initGenFeatFromCfg(cfg_file)

    print "Choosen parameters:"
    print "-------------------"
    print "Dataset: ", dataset
    print "Data base location: ", im_folder
    print "Image names file: ", im_list_file
    print "Output file:", output_file
    print "Output feature names file:", feature_file_path
    print "Dot image ending: ", dot_ending
    print "Patch width (pw_base): ", pw_base
    print "Patch width (pw_norm): ", pw_norm
    print "Patch width (pw_dens): ", pw_dens
    print "Number of patches per image: ", Nr
    print "Perspective map: ", perspective_path
    print "Use perspective:", use_perspective
    print "Sigma for each dot: ", sigmadots
    print "Number of scales: ", n_scales
    print "Split size: ", split_size
    print "Flip images: ", do_flip
    print "Resize images: ", resize_im
    print "==================="

    print "Reading perspective file"
    if use_perspective:
        pers_file = h5py.File(perspective_path, 'r')
        pmap = np.array(pers_file['pmap'])
        pers_file.close()

    print "Creating feature names file:"
    feature_file = open(feature_file_path, 'w')
    feature_file.close()  # Create empty file

    print "Reading image file names:"
    im_names = np.loadtxt(im_list_file, dtype='str')

    ldens = []
    lpos = []
    lpatches = []
    file_count = 0
    for ix, name in enumerate(im_names):
        print "Processing image: ", name
        # Get image paths
        im_path = utl.extendName(name, im_folder)
        dot_im_path = utl.extendName(name,
                                     im_folder,
                                     use_ending=True,
                                     pattern=dot_ending)

        # Read image files
        im = loadImage(im_path, color=is_colored)
        dot_im = loadImage(dot_im_path, color=True)

        # Do ground truth
        if use_perspective:
            dens_im = genPDensity(dot_im, sigmadots, pmap)
        else:
            dens_im = genDensity(dot_im, sigmadots)

        if resize_im > 0:
            # Resize image
            im = utl.resizeMaxSize(im, resize_im)
            gt_sum = dens_im.sum()
            dens_im = utl.resizeMaxSize(dens_im, resize_im)
            dens_im = dens_im * gt_sum / dens_im.sum()

        # Collect features from random locations


#         height, width, _ = im.shape
#         pos = get_dense_pos(height, width, pw_base=pw_base, stride = 5 )
        pos = utl.genRandomPos(im.shape, pw_base, Nr)

        # Collect original patches
        patch = cropAtPos(im, pos, pw_base)
        #         patch = cropPerspective(im, pos, pmap, pw_base)

        # Collect dens patches
        dpatch = cropAtPos(dens_im, pos, pw_base)
        #         dpatch = cropPerspective(dens_im, pos, pmap, pw_base)

        # Resize images
        patch = utl.resizePatches(patch, (pw_norm, pw_norm))
        dpatch = utl.resizeListDens(
            dpatch, (pw_dens, pw_dens))  # 18 is the output size of the paper

        # Flip function
        if do_flip:
            fpatch = hFlipImages(patch)
            fdpatch = hFlipImages(dpatch)

            fscales = extractEscales(fpatch, n_scales)

            # Add flipped data
            lpatches.append(fscales)
            ldens.append(fdpatch)
            lpos.append(pos)

        # Store features and densities
        ldens.append(dpatch)
        lpos.append(pos)
        lpatches.append(extractEscales(patch, n_scales))

        # Save it into a file
        if split_size > 0 and (ix + 1) % split_size == 0:
            # Prepare for saving
            ldens = np.vstack(ldens)
            lpos = np.vstack(lpos)
            patches_list = np.vstack(lpatches[:])

            opt_num_name = output_file + str(file_count) + ".h5"
            print "Saving data file: ", opt_num_name
            print "Saving {} examples".format(len(ldens))

            # Compress data and save
            feature_file = open(feature_file_path, 'a')
            comp_kwargs = {'compression': 'gzip', 'compression_opts': 1}
            with h5py.File(opt_num_name, 'w') as f:
                f.create_dataset('label', data=ldens, **comp_kwargs)

                # Save all scales data
                for s in range(n_scales):
                    dataset_name = 'data_s{}'.format(s)
                    print "Creating dataset: ", dataset_name
                    f.create_dataset(dataset_name,
                                     data=trasposeImages(patches_list[:, s,
                                                                      ...]),
                                     **comp_kwargs)
                f.close()
            feature_file.write(opt_num_name + '\n')
            feature_file.close()

            # Increase file counter
            file_count += 1

            # Clean memory
            ldens = []
            lpos = []
            lpatches = []

    ## Last save
    if len(lpatches) > 0:
        # Prepare for saving
        ldens = np.vstack(ldens)
        lpos = np.vstack(lpos)
        patches_list = np.vstack(lpatches[:])

        opt_num_name = output_file + ".h5"
        print "Saving data file: ", opt_num_name
        print "Saving {} examples".format(len(ldens))

        # Compress data and save
        feature_file = open(feature_file_path, 'a')
        comp_kwargs = {'compression': 'gzip', 'compression_opts': 1}
        with h5py.File(opt_num_name, 'w') as f:
            f.create_dataset('label', data=ldens, **comp_kwargs)
            # Save all scales data
            for s in range(n_scales):
                dataset_name = 'data_s{}'.format(s)
                print "Creating dataset: ", dataset_name
                f.create_dataset(dataset_name,
                                 data=trasposeImages(patches_list[:, s, ...]),
                                 **comp_kwargs)
            f.close()
        feature_file.write(opt_num_name + '\n')
        feature_file.close()

    print "--------------------"
    print "Finish!"
Пример #4
0
def main(argv):
    # Init cfg file
    cfg_file = ''
    
    # Get parameters
    try:
        opts, _ = getopt.getopt(argv, "h:", ["cfg="])
    except getopt.GetoptError:
        dispHelp()
        return
    
    for opt, arg in opts:
        if opt == '-h':
            dispHelp(argv[0])
            return
        elif opt in ("--cfg"):
            cfg_file = arg
            
    print "Loading configuration file: ", cfg_file
    (dataset, im_folder, im_list_file, output_file, feature_file_path, 
    dot_ending, pw_base, pw_norm, pw_dens, sigmadots, Nr, n_scales, split_size, 
    do_flip, perspective_path, use_perspective, is_colored, resize_im) = initGenFeatFromCfg(cfg_file)
    
    print "Choosen parameters:"
    print "-------------------"
    print "Dataset: ", dataset
    print "Data base location: ", im_folder
    print "Image names file: ", im_list_file 
    print "Output file:", output_file
    print "Output feature names file:", feature_file_path
    print "Dot image ending: ", dot_ending
    print "Patch width (pw_base): ", pw_base
    print "Patch width (pw_norm): ", pw_norm
    print "Patch width (pw_dens): ", pw_dens
    print "Number of patches per image: ", Nr
    print "Perspective map: ", perspective_path
    print "Use perspective:", use_perspective
    print "Sigma for each dot: ", sigmadots
    print "Number of scales: ", n_scales
    print "Split size: ", split_size
    print "Flip images: ", do_flip
    print "Resize images: ", resize_im
    print "==================="
    
    print "Reading perspective file"
    if use_perspective:
        pers_file = h5py.File(perspective_path,'r')
        pmap = np.array( pers_file['pmap'] )
        pers_file.close()
    
    print "Creating feature names file:"
    feature_file = open(feature_file_path, 'w')
    feature_file.close() # Create empty file
    
    print "Reading image file names:"
    im_names = np.loadtxt(im_list_file, dtype='str')

    ldens = []
    lpos = []
    lpatches = []
    file_count = 0
    for ix, name in enumerate(im_names):
        print "Processing image: ", name
        # Get image paths
        im_path = utl.extendName(name, im_folder)
        dot_im_path = utl.extendName(name, im_folder, use_ending=True, pattern=dot_ending)
        
        # Read image files
        im = loadImage(im_path, color = is_colored)
        dot_im = loadImage(dot_im_path, color = True)

        # Do ground truth
        if use_perspective:
            dens_im = genPDensity(dot_im, sigmadots, pmap)
        else:
            dens_im = genDensity(dot_im, sigmadots)

        if resize_im > 0:
            # Resize image
            im = utl.resizeMaxSize(im, resize_im)
            gt_sum = dens_im.sum()
            dens_im = utl.resizeMaxSize(dens_im, resize_im)
            dens_im = dens_im * gt_sum / dens_im.sum()

        # Collect features from random locations        
#         height, width, _ = im.shape
#         pos = get_dense_pos(height, width, pw_base=pw_base, stride = 5 )
        pos = utl.genRandomPos(im.shape, pw_base, Nr)

        # Collect original patches
        patch = cropAtPos(im, pos, pw_base)
#         patch = cropPerspective(im, pos, pmap, pw_base)
        
        # Collect dens patches
        dpatch = cropAtPos(dens_im, pos, pw_base)
#         dpatch = cropPerspective(dens_im, pos, pmap, pw_base)
        
        # Resize images
        patch = utl.resizePatches(patch, (pw_norm,pw_norm))
        dpatch = utl.resizeListDens(dpatch, (pw_dens, pw_dens)) # 18 is the output size of the paper

        # Flip function
        if do_flip:
            fpatch = hFlipImages(patch)
            fdpatch = hFlipImages(dpatch)

            fscales = extractEscales(fpatch, n_scales)

            # Add flipped data
            lpatches.append( fscales )
            ldens.append( fdpatch )
            lpos.append( pos )

        
        # Store features and densities 
        ldens.append( dpatch )
        lpos.append(pos)
        lpatches.append( extractEscales(patch, n_scales) )
        
        # Save it into a file
        if split_size > 0 and (ix + 1) % split_size == 0:
            # Prepare for saving
            ldens = np.vstack(ldens)
            lpos = np.vstack(lpos)
            patches_list = np.vstack(lpatches[:])
            
            opt_num_name = output_file + str(file_count) + ".h5"
            print "Saving data file: ", opt_num_name
            print "Saving {} examples".format(len(ldens))
        
            # Compress data and save
            feature_file = open(feature_file_path, 'a')
            comp_kwargs = {'compression': 'gzip', 'compression_opts': 1}
            with h5py.File(opt_num_name, 'w') as f:
                f.create_dataset('label', data=ldens, **comp_kwargs)
                
                # Save all scales data
                for s in range(n_scales):
                    dataset_name = 'data_s{}'.format(s) 
                    print "Creating dataset: ", dataset_name
                    f.create_dataset(dataset_name, data=trasposeImages(patches_list[:,s,...]), **comp_kwargs)
                f.close()
            feature_file.write(opt_num_name + '\n')
            feature_file.close()

            # Increase file counter
            file_count += 1

            # Clean memory
            ldens = []
            lpos = []
            lpatches = []
    
    ## Last save
    if len(lpatches) >0:
        # Prepare for saving
        ldens = np.vstack(ldens)
        lpos = np.vstack(lpos)
        patches_list = np.vstack(lpatches[:])
        
        opt_num_name = output_file + ".h5"
        print "Saving data file: ", opt_num_name
        print "Saving {} examples".format(len(ldens))
    
        # Compress data and save
        feature_file = open(feature_file_path, 'a')
        comp_kwargs = {'compression': 'gzip', 'compression_opts': 1}
        with h5py.File(opt_num_name, 'w') as f:
            f.create_dataset('label', data=ldens, **comp_kwargs)
            # Save all scales data
            for s in range(n_scales):
                dataset_name = 'data_s{}'.format(s) 
                print "Creating dataset: ", dataset_name
                f.create_dataset(dataset_name, data=trasposeImages(patches_list[:,s,...]), **comp_kwargs)
            f.close()
        feature_file.write(opt_num_name + '\n')
        feature_file.close()
    
    print "--------------------"    
    print "Finish!"
Пример #5
0
def main(argv):
    # Init parameters
    use_cpu = True
    gpu_dev = 0

    # Batch size
    b_size = -1

    # CNN vars
    prototxt_path = 'models/ucsd/hydra3/hydra3_deploy.prototxt'
    caffemodel_path = 'models/ucsd/hydra3/trancos_hydra2.caffemodel'

    # Get parameters
    try:
        opts, _ = getopt.getopt(
            argv, "h:",
            ["prototxt=", "caffemodel=", "cpu_only", "dev=", "cfg=", "img="])
    except getopt.GetoptError as err:
        print "Error while parsing parameters: ", err
        dispHelp(argv[0])
        return

    for opt, arg in opts:
        if opt == '-h':
            dispHelp(argv[0])
            return
        elif opt in ("--prototxt"):
            prototxt_path = arg
        elif opt in ("--caffemodel"):
            caffemodel_path = arg
        elif opt in ("--cpu_only"):
            use_cpu = True
        elif opt in ("--dev"):
            gpu_dev = int(arg)
        elif opt in ("--cfg"):
            cfg_file = arg
        elif opt in ("--img"):
            image_file = arg

    print "Loading configuration file: ", cfg_file
    (dataset, use_mask, mask_file, test_names_file, im_folder, dot_ending, pw,
     sigmadots, n_scales, perspective_path, use_perspective, is_colored,
     results_file, resize_im) = initTestFromCfg(cfg_file)

    print "Choosen parameters:"
    print "-------------------"
    print "Use only CPU: ", use_cpu
    print "GPU devide: ", gpu_dev
    print "Dataset: ", dataset
    print "Results files: ", results_file
    print "Test data base location: ", im_folder
    print "Test inmage names: ", test_names_file
    print "Dot image ending: ", dot_ending
    print "Use mask: ", use_mask
    print "Mask pattern: ", mask_file
    print "Patch width (pw): ", pw
    print "Sigma for each dot: ", sigmadots
    print "Number of scales: ", n_scales
    print "Perspective map: ", perspective_path
    print "Use perspective:", use_perspective
    print "Prototxt path: ", prototxt_path
    print "Caffemodel path: ", caffemodel_path
    print "Batch size: ", b_size
    print "Resize images: ", resize_im
    print "==================="

    print "----------------------"
    print "Preparing for Testing"
    print "======================"

    # Set GPU CPU setting
    if use_cpu:
        caffe.set_mode_cpu()
    else:
        # Use GPU
        caffe.set_device(gpu_dev)
        caffe.set_mode_gpu()

    print "Reading perspective file"
    if use_perspective:
        pers_file = h5py.File(perspective_path, 'r')
        pers_file.close()

    mask = None
    if dataset == 'UCSD':
        print "Reading mask"
        if use_mask:
            mask_f = h5py.File(mask_file, 'r')
            mask = np.array(mask_f['mask'])
            mask_f.close()

    # Init CNN
    CNN = CaffePredictor(prototxt_path, caffemodel_path, n_scales)

    # Read image files
    im = loadImage(image_file, color=is_colored)

    if resize_im > 0:
        # Resize image
        im = utl.resizeMaxSize(im, resize_im)

    s = time.time()
    npred, resImg, realImg = testOnImg(CNN, im, pw, mask)

    print "npred = %.2f , time =%.2f sec" % (npred, time.time() - s)
    sio.savemat('predictionmap.mat', {'d_map': realImg})

    return 0
Пример #6
0
def main(argv, image_name):
    use_cpu = False
    gpu_dev = 0
    prototxt_path = 'models/trancos/hydra2/hydra_deploy.prototxt'
    caffemodel_path = 'models/trancos/hydra2/trancos_hydra2.caffemodel'

    try:
        opts, _ = getopt.getopt(
            argv, "h:",
            ["prototxt=", "caffemodel=", "cpu_only", "dev=", "cfg="])
    except getopt.GetoptError as err:
        print("Error while parsing parameters: ", err)
        return

    for opt, arg in opts:
        if opt in ("--prototxt"):
            prototxt_path = arg
        elif opt in ("--caffemodel"):
            caffemodel_path = arg
        elif opt in ("--cpu_only"):
            use_cpu = True
        elif opt in ("--dev"):
            gpu_dev = int(arg)
        elif opt in ("--cfg"):
            cfg_file = arg

    (dataset, use_mask, mask_file, test_names_file, im_folder, dot_ending, pw,
     sigmadots, n_scales, perspective_path, use_perspective, is_colored,
     results_file, resize_im) = init_parameters_from_config(cfg_file)

    if use_cpu:
        caffe.set_mode_cpu()
    else:
        # Use GPU
        caffe.set_device(gpu_dev)
        caffe.set_mode_gpu()

    # Init CNN
    CNN = CaffePredictor(prototxt_path, caffemodel_path, n_scales)

    print("\nStart prediction for " + image_name)

    im_path = utl.extendName(image_name, im_folder)
    im = load_image(im_path, color=is_colored)

    if resize_im > 0:
        im = utl.resizeMaxSize(im, resize_im)

    mask = None
    if use_mask:
        mask_im_path = utl.extendName(image_name,
                                      im_folder,
                                      use_ending=True,
                                      pattern=mask_file)
        mask = sio.loadmat(mask_im_path,
                           chars_as_strings=1,
                           matlab_compatible=1)
        mask = mask.get('BW')

    s = time.time()
    npred, resImg = count_objects(CNN, im, pw, mask)
    print("image : %s, npred = %.2f , time =%.2f sec" %
          (image_name, npred, time.time() - s))

    return npred
Пример #7
0
def main(argv):
    # Init parameters      
    use_cpu = False
    gpu_dev = 0

    # GAME max level
    mx_game = 4 # Max game target

    # Batch size
    b_size = -1

    # CNN vars
    prototxt_path = 'models/trancos/hydra2/hydra_deploy.prototxt'
    caffemodel_path = 'models/trancos/hydra2/trancos_hydra2.caffemodel'
        
        
    # Get parameters
    try:
        opts, _ = getopt.getopt(argv, "h:", ["prototxt=", "caffemodel=", 
                                             "cpu_only", "dev=", "cfg="])
    except getopt.GetoptError as err:
        print "Error while parsing parameters: ", err
        dispHelp(argv[0])
        return
    
    for opt, arg in opts:
        if opt == '-h':
            dispHelp(argv[0])
            return
        elif opt in ("--prototxt"):
            prototxt_path = arg
        elif opt in ("--caffemodel"):
            caffemodel_path = arg
        elif opt in ("--cpu_only"):
            use_cpu = True            
        elif opt in ("--dev"):
            gpu_dev = int(arg)
        elif opt in ("--cfg"):
            cfg_file = arg
            
    print "Loading configuration file: ", cfg_file
    (dataset, use_mask, mask_file, test_names_file, im_folder, 
            dot_ending, pw, sigmadots, n_scales, perspective_path, 
            use_perspective, is_colored, results_file, resize_im) = initTestFromCfg(cfg_file)
            
    print "Choosen parameters:"
    print "-------------------"
    print "Use only CPU: ", use_cpu
    print "GPU devide: ", gpu_dev
    print "Dataset: ", dataset
    print "Results files: ", results_file
    print "Test data base location: ", im_folder
    print "Test inmage names: ", test_names_file
    print "Dot image ending: ", dot_ending
    print "Use mask: ", use_mask
    print "Mask pattern: ", mask_file
    print "Patch width (pw): ", pw
    print "Sigma for each dot: ", sigmadots
    print "Number of scales: ", n_scales
    print "Perspective map: ", perspective_path
    print "Use perspective:", use_perspective
    print "Prototxt path: ", prototxt_path
    print "Caffemodel path: ", caffemodel_path
    print "Batch size: ", b_size
    print "Resize images: ", resize_im
    print "==================="
    
    print "----------------------"
    print "Preparing for Testing"
    print "======================"

    # Set GPU CPU setting
    if use_cpu:
        caffe.set_mode_cpu()
    else:
        # Use GPU
        caffe.set_device(gpu_dev)
        caffe.set_mode_gpu()

    print "Reading perspective file"
    if use_perspective:
        pers_file = h5py.File(perspective_path,'r')
        pmap = np.array( pers_file['pmap'] )
        pers_file.close()
        
    mask = None
    if dataset == 'UCSD':
        print "Reading mask"
        if use_mask:
            mask_f = h5py.File(mask_file,'r')
            mask = np.array(mask_f['mask'])
            mask_f.close()
    
    print "Reading image file names:"
    im_names = np.loadtxt(test_names_file, dtype='str')

    # Perform test
    ntrueall=[]
    npredall=[]
    
    # Init GAME
    n_im = len( im_names )
    game_table = np.zeros( (n_im, mx_game) )
    
    # Init CNN
    CNN = CaffePredictor(prototxt_path, caffemodel_path, n_scales)
    
    print 
    print "Start prediction ..."
    count = 0
    gt_vector = np.zeros((len(im_names)))
    pred_vector = np.zeros((len(im_names)))    
    
    for ix, name in enumerate(im_names):
        # Get image paths
        im_path = utl.extendName(name, im_folder)
        dot_im_path = utl.extendName(name, im_folder, use_ending=True, pattern=dot_ending)

        # Read image files
        im = loadImage(im_path, color = is_colored)
        dot_im = loadImage(dot_im_path, color = True)
        
        # Generate features
        if use_perspective:
            dens_im = genPDensity(dot_im, sigmadots, pmap)
        else:
            dens_im = genDensity(dot_im, sigmadots)
        
        if resize_im > 0:
            # Resize image
            im = utl.resizeMaxSize(im, resize_im)
            gt_sum = dens_im.sum()
            dens_im = utl.resizeMaxSize(dens_im, resize_im)
            dens_im = dens_im * gt_sum / dens_im.sum()
        
        # Get mask if needed
        if dataset != 'UCSD':
            if use_mask:
                mask_im_path = utl.extendName(name, im_folder, use_ending=True, pattern=mask_file)
                mask = sio.loadmat(mask_im_path, chars_as_strings=1, matlab_compatible=1)
                mask = mask.get('BW')
        
        s=time.time()
        ntrue,npred,resImg,gtdots=testOnImg(CNN, im, dens_im, pw, mask)
        print "image : %d , ntrue = %.2f ,npred = %.2f , time =%.2f sec"%(count,ntrue,npred,time.time()-s)
    
        # Keep individual predictions
        gt_vector[ix] = ntrue
        pred_vector[ix] = npred    
    
        # Hold predictions and originasl
        ntrueall.append(ntrue)
        npredall.append(npred)
        
        # Compute game metric
        for l in range(mx_game):
            game_table[count, l] = gameMetric(resImg, gtdots, l)
    
        count = count +1
            
    ntrueall=np.asarray(ntrueall)
    npredall=np.asarray(npredall)
    print "done ! mean absolute error %.2f" % np.mean(np.abs(ntrueall-npredall))

    # Print Game results
    results = np.zeros(mx_game)
    for l in range(mx_game):
        results[l] = np.mean( game_table[:,l] )
        print "GAME for level %d: %.2f " % (l, np.mean( game_table[:,l] ))
    
    # Dump results into a txt file
    np.savetxt(results_file + '_pred.txt', npredall)
    np.savetxt(results_file + '_gt.txt', ntrueall)
    
    return 0