Ejemplo n.º 1
0
def run(dataset_dir, path_file):

    # determine the expected location of necessary files from
    # within the dataset
    config_xml = dataset_filepaths.get_hardware_config_xml(dataset_dir)
    timesync_xml = dataset_filepaths.get_timesync_xml(dataset_dir)
    cmfile = dataset_filepaths.get_carvemap_file(dataset_dir)
    wedgefile = dataset_filepaths.get_wedgefile(dataset_dir)
    fssfiles = dataset_filepaths.get_all_fss_files(dataset_dir)

    # verify input is good
    if fssfiles is None:
        print "Error! Unable to determine fss files to use"
        return -1

    # prepare the command-line arguments for the wedge_gen code
    args = [WEDGE_GEN_EXE, '-c', config_xml, '-m', cmfile, \
     '-w', wedgefile, '-p', os.path.abspath(path_file), \
     '-s', SETTINGS_XML, '-t', timesync_xml] + fssfiles

    # run the wedge_gen code
    ret = subprocess.call(args, executable=WEDGE_GEN_EXE, \
     cwd=dataset_dir, stdout=None, stderr=None, \
     stdin=None, shell=False)
    if ret != 0:
        print "wedge_gen program returned error", ret
        return -4

    # success
    return 0
Ejemplo n.º 2
0
def run(dataset_dir):

    # determine the expected location of necessary files from
    # within the dataset
    timefile = dataset_filepaths.get_timesync_xml(dataset_dir)
    conffile = dataset_filepaths.get_hardware_config_xml(dataset_dir)

    # read the xml configuration file
    conf = backpackconfig.Configuration(conffile, True, dataset_dir)

    # iterate through the available URG scanners
    urgSensors = conf.find_sensors_by_type(URG_TYPE)
    for urg in urgSensors:

        # get the output dat file for this sensor
        datfile = os.path.abspath(os.path.join(dataset_dir, \
                conf.find_sensor_prop(urg, URG_DAT_XPATH, URG_TYPE)))

        # get the fss file that will be generated for this sensor
        fssfile = dataset_filepaths.get_fss_file(datfile)

        # prepare the command-line arguments for the filter_urg_scans code
        args = [FILTER_EXE, datfile, fssfile, timefile]

        # run the filter_urg_scans code
        ret = subprocess.call(args, executable=FILTER_EXE, \
            cwd=dataset_dir, stdout=None, stderr=None, \
            stdin=None, shell=False)
        if ret != 0:
            print "filter_urg_scans program returned error", ret
            return -1

    # success
    return 0
Ejemplo n.º 3
0
def run(dataset_dir, name_of_dataset):

    # check that all required executables exist
    if not os.path.exists(GEN_TEX_EXE):
        print "Error! Could not find generate_tex executable:", \
              XYZ2DQ_EXE
        return -1

    # get paths to input files
    configfile = dataset_filepaths.get_hardware_config_xml(dataset_dir)
    pathfile   = dataset_filepaths.get_madfile_from_name(dataset_dir, \
         name_of_dataset)
    fp_files = dataset_filepaths.get_all_floorplan_files(dataset_dir)

    # prepare output directory
    docs_dir = dataset_filepaths.get_docs_dir(dataset_dir)
    if not os.path.exists(docs_dir):
        os.makedirs(docs_dir)
    texfile    = dataset_filepaths.get_tex_file(dataset_dir, \
       name_of_dataset)

    # prepare arguments
    args = [GEN_TEX_EXE, '-c', configfile, '-p', pathfile, '-o', texfile]
    for f in fp_files:
        args += ['-f', f]

    # generate the .tex file
    print "##### generating dataset documentation #####"
    ret = subprocess.call(args, \
                         executable=GEN_TEX_EXE, cwd=dataset_dir, \
                         stdout=None, stderr=None, stdin=None, shell=False)
    if ret != 0:
        print "Error! Tex-file generation program returned", ret
        return -2

    # run pdflatex on the result
    (junk, texfile_local) = os.path.split(texfile)
    dnull = open(os.devnull, 'w')
    ret = subprocess.call( \
     ['pdflatex', '-halt-on-error', texfile_local], \
     cwd=docs_dir, stdout=dnull, stderr=None, stdin=None, \
     shell=False)
    dnull.close()
    if ret != 0:
        print "Error! pdflatex returned", ret
        return -3

    # move the output pdf file to the root directory of the dataset

    pdffile_old = os.path.abspath(os.path.join(dataset_dir, docs_dir, \
       (name_of_dataset + '.pdf')))
    pdffile_new = os.path.abspath(os.path.join(dataset_dir, \
       (name_of_dataset + '.pdf')))
    if os.path.exists(pdffile_old):
        shutil.move(pdffile_old, pdffile_new)

    # success
    return 0
Ejemplo n.º 4
0
def run(dataset_dir, madfile, debug=False):

    # check that directories exist
    output_dir = dataset_filepaths.get_carving_fp_dir(dataset_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # determine the expected location of necessary files from
    # within the dataset
    config_xml = dataset_filepaths.get_hardware_config_xml(dataset_dir)
    fssfiles   = dataset_filepaths.get_all_fss_files(dataset_dir)
    pathfile   = dataset_filepaths.get_noisypath_file(dataset_dir)
    octfile    = dataset_filepaths.get_octree(dataset_dir)
    levelsfile = dataset_filepaths.get_carving_levels_file(dataset_dir)
    dqprefix   = dataset_filepaths.get_carving_dq_prefix(dataset_dir)

    # verify input is good
    if fssfiles is None:
        print "Error! Unable to determine fss files to use"
        return -1

    # prepare the command-line arguments for the oct2dq code and run it
    args = [OCT2DQ_EXE, '-c', config_xml, '-s', SETTINGS_XML, \
        pathfile, octfile, levelsfile, '-o', dqprefix] + fssfiles
    ret = callproc(OCT2DQ_EXE, args, dataset_dir, debug)
    if ret != 0:
        print "oct2dq program returned error",ret
        return -2

    # get the list of generated wall sampling (.dq) files
    dqfiles = dataset_filepaths.get_carving_dq_files(dataset_dir)
    
    # run the floorplan generation code on each dq file
    for dq in dqfiles:

        # get path to corresponding output fp file
        fpfile = dataset_filepaths.get_fp_file(dq)

        # run the code
        args = [FLOORPLAN_EXE, dq, os.path.abspath(madfile), \
                config_xml, fpfile, '-s', '-1']
        ret = callproc(FLOORPLAN_EXE, args, dataset_dir, debug)
        if ret != 0:
            print "floorplan_gen program returned error",ret
            print "\tInput: %s" % dq
	    return -3

    # success
    return 0
Ejemplo n.º 5
0
def run(datasetDir):

    # Read in the hardware config file
    configFile = dataset_filepaths.get_hardware_config_xml(datasetDir)
    conf = backpackconfig.Configuration(configFile, True, datasetDir)

    # Find all the active cameras in the file
    cameraList = conf.find_sensors_by_type("cameras")
    if len(cameraList) == 0 :
        print "No active cameras found in dataset " + datasetDir
        return 0;

    # get location of masks for any cameras
    MASK_DIR = dataset_filepaths.get_camera_masks_dir(datasetDir)

    # Then we loop over the cameraList generating the required things
    for cameraName in cameraList :

        print ""
        print "==== RECTIFYING : " + cameraName + " ===="

        # now we need to read find out what the input names should be 
        calibFile = conf.find_sensor_prop(cameraName, 
            "configFile/dalsa_fisheye_calibration_file", "cameras")
        calibFile = os.path.join(datasetDir, calibFile)
        metaDataFile = os.path.join(datasetDir, \
            conf.find_sensor_prop(cameraName, \
            "configFile/dalsa_metadata_outfile", "cameras"))
        metaDataFile = dataset_filepaths.get_color_metadata_file( \
            cameraName, metaDataFile)
        rToCommon = conf.find_sensor_prop(cameraName,
            "rToCommon", "cameras").split(",")
        tToCommon = conf.find_sensor_prop(cameraName,
            "tToCommon", "cameras").split(",")
        extrinStr =  " ".join((rToCommon + tToCommon))
        outputDirectory = conf.find_sensor_prop(cameraName, 
            "configFile/dalsa_output_directory", "cameras")
        outputDirectory = os.path.abspath(os.path.join( \
            datasetDir, outputDirectory, '..', 'rectified'))

        # Adjust the maskdir
        maskDir = os.path.join(MASK_DIR, cameraName)

        # VODO The camera serial number
        camSerial = conf.find_sensor_prop(cameraName, 
            "serialNum", "cameras")
        camSerial = re.sub("[^0-9]", "", camSerial)

        ### UP CAMERA
        print "---Up Images"

        # Make vcam specific inputs
        outDir = os.path.join(outputDirectory, 'up')
        K = " ".join([str(max(IMAGESIZE)/FUP), "0", str(IMAGESIZE[1]/2), 
                      "0", str(max(IMAGESIZE)/FUP), str(IMAGESIZE[0]/2), 
                      "0","0","1"])
        rotation = "90 0 0"
        maskfile = os.path.join(maskDir, "up.jpg")
        serial = camSerial + "0"

        # Run the command
        command = [RECTIFY_EXE,
            "-ic", calibFile,
            "-id", datasetDir,
            "-iv", maskfile,
            "-ie", extrinStr,
            "-ik", K,
            "-im",metaDataFile,
            "-ir",rotation,
            "-is",str(IMAGESIZE[0]) + " " + str(IMAGESIZE[1]),
            "-od",outDir,
            "-os",serial]
        command = " ".join(command)
        ret = os.system(command)
        if ret != 0 :
            print "Error Processing " + cameraName + " up images"
            return 2;

        ### LEVEL CAMERA
        print "---Level Images"

        # Make vcam specific inputs
        outDir = os.path.join(outputDirectory, 'level')
        K = " ".join([str(max(IMAGESIZE)/FLEVEL), "0", str(IMAGESIZE[1]/2), 
                      "0", str(max(IMAGESIZE)/FLEVEL), str(IMAGESIZE[0]/2), 
                      "0","0","1"])
        rotation = "0 0 0"
        maskfile = os.path.join(maskDir, "level.jpg")
        serial = camSerial + "1"

        # Run the command
        command = [RECTIFY_EXE,
            "-ic", calibFile,
            "-id", datasetDir,
            "-iv", maskfile,
            "-ie", extrinStr,
            "-ik", K,
            "-im",metaDataFile,
            "-ir",rotation,
            "-is",str(IMAGESIZE[0]) + " " + str(IMAGESIZE[1]),
            "-od",outDir,
            "-os",serial]
        command = " ".join(command)
        ret = os.system(command)
        if ret != 0 :
            print "Error Processing " + cameraName + " level images"
            return 2;

        ### DOWN CAMERA
        print "---Down Images"

        # Make vcam specific inputs
        outDir = os.path.join(outputDirectory, 'down')
        K = " ".join([str(max(IMAGESIZE)/FDOWN), "0", str(IMAGESIZE[1]/2), 
                      "0", str(max(IMAGESIZE)/FDOWN), str(IMAGESIZE[0]/2), 
                      "0","0","1"])
        rotation = "-90 0 0"
        maskfile = os.path.join(maskDir, "down.jpg")
        serial = camSerial + "2"

        # Run the command
        command = [RECTIFY_EXE,
            "-ic", calibFile,
            "-id", datasetDir,
            "-iv", maskfile,
            "-ie", extrinStr,
            "-ik", K,
            "-im",metaDataFile,
            "-ir",rotation,
            "-is",str(IMAGESIZE[0]) + " " + str(IMAGESIZE[1]),
            "-od",outDir,
            "-os",serial]
        command = " ".join(command)
        ret = os.system(command)
        if ret != 0 :
            print "Error Processing " + cameraName + " down images"
            return 2;

    return 0;
Ejemplo n.º 6
0
def run(dataset_dir, pathfile, use_cameras=True):

    # check that executable exists
    if not os.path.exists(POINTCLOUD_EXE):
        print "Error!  Could not find pointcloud", \
         "generation executable:", \
         POINTCLOUD_EXE
        return None

    # get configuration files for this dataset
    timesync_xml = dataset_filepaths.get_timesync_xml(dataset_dir)
    config_xml = dataset_filepaths.get_hardware_config_xml(dataset_dir)

    # parse the hardware configuration file
    sensor_types, sensor_cfn, sensor_names = \
      config.parse_backpack_xml(config_xml)
    if sensor_types is None or sensor_cfn is None \
      or len(sensor_types) != len(sensor_cfn):
        return None  # could not parse the xml file

    # find the laser and camera files to use as input
    geomfiles = []
    geomnames = []
    cam_metas = []
    cam_calibs = []
    cam_dirs = []
    for si in range(len(sensor_types)):

        # check for laser scanners
        if sensor_types[si] == 'laser':

            # check if this sensor is in the whitelist
            if sensor_names[si] not in laser_whitelist:
                continue  # ignore this one

            # parse the settings file for this laser
            urg_settings = config.parse_settings_xml( \
              os.path.normpath(os.path.join( \
              dataset_dir, sensor_cfn[si])))
            if urg_settings is None:
                print "Error! Could not parse laser", \
                 sensor_cfn[si]
                return None  # unable to parse settings

            # get the laser data file (relative to dataset)
            urg_filename = urg_settings["urg_datafile"]

            # add to our list
            geomfiles.append(urg_filename)
            geomnames.append(sensor_names[si])

        # check for cameras
        if sensor_types[si] == 'camera':

            # check if this camear is in the whitelist
            if sensor_names[si] not in camera_whitelist:
                continue  # ignore this one

            # parse the settings file for camera
            cam_settings = config.parse_settings_xml( \
              os.path.normpath(os.path.join( \
              dataset_dir, sensor_cfn[si])))
            if cam_settings is None:
                print "Error! Could not parse cam", \
                 sensor_cfn[si]
                return None  # unable to parse settings

            # get the color metadata file (relative to dataset)
            cam_metas.append( \
             dataset_filepaths.get_color_metadata_file( \
             sensor_names[si], \
             cam_settings["dalsa_metadata_outfile"]))

            # get calibration file (relative to dataset)
            cam_calibs.append(cam_settings[ \
             "dalsa_fisheye_calibration_file"])

            # get image directory for this camera (relative)
            cam_dirs.append( \
             dataset_filepaths.get_color_image_dir( \
             cam_settings["dalsa_output_directory"]))

    # prepare output directory for pointclouds
    name = dataset_filepaths.get_file_body(pathfile)
    if use_cameras:
        pc_output_dir = dataset_filepaths.get_colored_pc_dir( \
          dataset_dir)
    else:
        pc_output_dir = dataset_filepaths.get_pointcloud_dir( \
          dataset_dir)
    if not os.path.exists(pc_output_dir):
        os.makedirs(pc_output_dir)

    # choose appropriate parameters based on input
    if use_cameras:
        range_limit = '10'
    else:
        range_limit = '30'

    # now that we have a list of geometry sensors to use, we
    # want to make pointcloud files for each sensor
    output_files = []
    for si in range(len(geomfiles)):

        # specify output file for this scanner
        xyzfile = os.path.join(pc_output_dir, name + '_' \
         + geomnames[si] + '.xyz')

        # Prepare arguments for pointcloud generation program.
        # This will generate pointclouds in units of millimeters
        # and remove points farther than 10 meters from the
        # sensor
        args = [POINTCLOUD_EXE, '-c', config_xml, \
          '-t', timesync_xml, \
          '-o', xyzfile, \
          '-p', os.path.abspath(pathfile), \
          '-u', '1000', '-r', range_limit, \
          '-l', geomnames[si], geomfiles[si]]

        # add camera information if we want to color
        if use_cameras:
            args += ['--remove_noncolored_points', \
             '--time_buffer', '2.0', '0.5']
            for ci in range(len(cam_metas)):
                args += ['-f', cam_metas[ci], \
                 cam_calibs[ci], cam_dirs[ci]]

        # run pointcloud generation program
        ret = subprocess.call(args, executable=POINTCLOUD_EXE, \
cwd=dataset_dir, stdout=None, stderr=None, \
stdin=None, shell=False)
        if ret != 0:
            print "Error! Pointcloud generation program", \
             "returned",ret
            return None

        # record where we wrote the output
        output_files.append(xyzfile)

    # return the final list of files
    return output_files
Ejemplo n.º 7
0
def run(dataset_dir, pathfile, debug=False):

    # ensure abspath for input files
    dataset_dir = os.path.abspath(dataset_dir)
    pathfile = os.path.abspath(pathfile)

    # check that executable exists
    if not os.path.exists(SCANORAMA_EXE):
        print "Error!  Could not find pointcloud", \
            "generation executable:", \
            SCANORAMA_EXE
        return None

    # verify that output directory exists
    scanoramadir = dataset_filepaths.get_scanorama_dir(dataset_dir)
    if not os.path.exists(scanoramadir):
        os.makedirs(scanoramadir)

    # get necessary files for this dataset
    scanoprefix = dataset_filepaths.get_scanorama_ptx_prefix(dataset_dir)
    metafile = dataset_filepaths.get_scanorama_metadata_file(dataset_dir)
    modelfile = dataset_filepaths.get_full_mesh_file(dataset_dir)
    config_xml = dataset_filepaths.get_hardware_config_xml(dataset_dir)
    conf = backpackconfig.Configuration(config_xml, True, dataset_dir)

    # Prepare arguments for program
    args = [SCANORAMA_EXE, '-c', config_xml, '-s', SETTINGS_XML, \
                '-m', modelfile, \
                '-p', pathfile, \
                '-o', scanoprefix, \
                '--meta', metafile]
    if debug:
        args = ['gdb', '--args'] + args

    #--------------------------------------------
    # find all active FISHEYE cameras in the file
    #--------------------------------------------
    cam_metas = []
    cam_calibs = []
    cam_dirs = []
    cameraList = conf.find_sensors_by_type("cameras")
    for cameraName in cameraList:

        # check if this sensor is enabled
        e = conf.find_sensor_prop(cameraName, 'enable', 'cameras')
        if e == '0':
            continue

    # prepare the command-line arguments for this camera
        metafile = os.path.join(dataset_dir, \
                conf.find_sensor_prop(cameraName, \
                    'configFile/dalsa_metadata_outfile', 'cameras'))
        metafile = dataset_filepaths.get_color_metadata_file( \
                cameraName, metafile)
        calibfile = os.path.join(dataset_dir, \
                conf.find_sensor_prop(cameraName, \
                    'configFile/dalsa_fisheye_calibration_file', 'cameras'))
        imgdir = os.path.join(dataset_dir, \
                dataset_filepaths.get_color_image_dir( \
                    conf.find_sensor_prop(cameraName, \
                        'configFile/dalsa_output_directory', 'cameras')))

        # add to arguments lists
        cam_metas.append(metafile)
        cam_calibs.append(calibfile)
        cam_dirs.append(imgdir)

    # add FISHEYE camera information
    for ci in range(len(cam_metas)):
        args += ['-f', cam_metas[ci], \
                cam_calibs[ci], cam_dirs[ci]]

    #-----------------------------------------
    # find all active FLIR cameras in the file
    #-----------------------------------------
    cam_metas = []
    cam_calibs = []
    cam_dirs = []
    cameraList = conf.find_sensors_by_type("flirs")
    for cameraName in cameraList:

        # check if this sensor is enabled
        e = conf.find_sensor_prop(cameraName, 'enable', 'flirs')
        if e == '0':
            continue

    # prepare the command-line arguments for this camera
        metafile = os.path.join(dataset_dir, \
                conf.find_sensor_prop(cameraName, \
                    'configFile/flir_metadata_outfile', 'flirs'))
        metafile = dataset_filepaths.get_ir_normalized_metadata_file( \
                cameraName, metafile)
        calibfile = os.path.join(dataset_dir, \
                conf.find_sensor_prop(cameraName, \
                    'configFile/flir_rectilinear_calibration_file', \
                    'flirs'))
        imgdir = os.path.join(dataset_dir, \
                dataset_filepaths.get_ir_normalized_image_dir( \
                    conf.find_sensor_prop(cameraName, \
                        'configFile/flir_output_directory', 'flirs')))

        # add to arguments lists
        cam_metas.append(metafile)
        cam_calibs.append(calibfile)
        cam_dirs.append(imgdir)

    # add RECTILINEAR camera information
    for ci in range(len(cam_metas)):
        args += ['-r', cam_metas[ci], \
                cam_calibs[ci], cam_dirs[ci]]

    #----------------------------------
    # run pointcloud generation program
    #----------------------------------
    exe_to_run = SCANORAMA_EXE
    if debug:
        exe_to_run = 'gdb'
    ret = subprocess.call(args, executable=exe_to_run, \
            cwd=dataset_dir, stdout=None, stderr=None, \
            stdin=None, shell=False)
    if ret != 0:
        print "Error! Scanorama generation program", \
                "returned",ret
        return ret

    # success
    return 0
Ejemplo n.º 8
0
def run(datasetDir, datasetName):

    # The first thing we need to do is import the backpack i
    # configuration file
    # so that we can deduce the locations of all the files names
    configFile = dataset_filepaths.get_hardware_config_xml(datasetDir)
    conf = backpackconfig.Configuration(configFile, True, datasetDir)
    meshfile = dataset_filepaths.get_full_mesh_file(datasetDir)

    # Then we need to get a list of all active cameras for this dataset
    cameras = conf.find_sensors_by_type("cameras")
    if len(cameraList) == 0:
        print "No active cameras found in dataset " + datasetDir
        return 0

    # For each camera create the triplet of inputs
    code_args = []
    for camera in cameras:

        # get the folder for this camera in the dataset
        datadir = conf.find_sensor_prop(camera,
                                        "configFile/dalsa_output_directory",
                                        "cameras")
        imageDir = path.normpath(path.join(datasetDir, datadir, '..'))

        # Then find the mcd file for this camera
        mcdFiles = glob.glob(path.join(imageDir, \
            'rectified', 'level', '*.mcd'))
        if (len(mcdFiles) == 0):
            print "No .mcd files found in " \
                + path.join(imageDir,'rectified', 'level')
            return 1
        if (len(mcdFiles) > 1):
            print(
                str(len(mcdFiles)) + " .mcd files found in " +
                path.join(imageDir, 'rectified', 'level'))
            return 2

        # Store the final mcd file
        mcdFile = mcdFiles[0]

        # Then locate the pose file for the camera
        poseFiles = glob.glob(
            path.join(datasetDir, 'localization', datasetName, 'cameraposes',
                      camera + "_poses.txt"))
        if (len(poseFiles) == 0):
            print(
                "No camera pose file for " + camera + " found in " +
                path.join(settings['datasetDir'], 'localization', datasetName,
                          'cameraposes'))
            return 1

        # Store the pose file
        poseFile = poseFiles[0]

        # Create the output directory name
        outDir = path.join(datasetDir, 'imagemaps', camera)

        # Store as a tuple
        code_args.append(tuple([mcdFile, poseFile, outDir, camera]))

    # Create the arguments for the C++ code
    args = [
        MESH2IMAGE_EXE, "-dir", datasetDir, "-model", meshfile, "-depth", '12'
    ]
    for argset in code_args:
        args += ["-i"]
        args += argset[:]

    # Run the code
    ret = system(" ".join(args))
    if ret != 0:
        print "Depth mapping returned error : " + str(ret)

    # Return success
    return ret
Ejemplo n.º 9
0
def pointcloud_gen(dataset_dir, madfile):

	# the following constants are used to interface with the system
	# hardware configuration
	CAMERA_TYPE            = 'cameras'
	CAMERA_METAFILE_XPATH  = 'configFile/dalsa_metadata_outfile'
	CAMERA_RECTCALIB_XPATH = 'configFile/flir_rectilinear_calibration_file'
	CAMERA_IMAGEDIR_XPATH  = 'configFile/dalsa_output_directory'
	CAMERA_WHITELIST       = ['tango_color']

	# get the necessary file paths from the dataset
	conffile = dataset_filepaths.get_hardware_config_xml(dataset_dir)
	timefile = dataset_filepaths.get_timesync_xml(dataset_dir)
	fssfiles = dataset_filepaths.get_all_fss_files(dataset_dir)
	xyzfile  = os.path.join( \
			dataset_filepaths.get_colored_pc_dir(dataset_dir), \
			dataset_filepaths.get_name_from_madfile(madfile) \
				+ '_tango.xyz')

	# check if the appropriate camera(s) is/are defined
	conf = backpackconfig.Configuration(conffile, True, dataset_dir)
	cameraSensors = conf.find_sensors_by_type(CAMERA_TYPE)

	# prepare the command-line arguments for this executable
	args = [POINTCLOUD_EXE, \
			'-t', timefile, \
			'-c', conffile, \
			'-p', madfile, \
			'-u', '1000', \
			]

	# add all available sensors
	for f in fssfiles:
		args += ['--fss', f]

	# add all available cameras that are in the whitelist
	colorbycamera = False
	for c in cameraSensors:
		if c in CAMERA_WHITELIST:
			# we should use this camera for color
			colorbycamera = True
			
			# get the argument files for this camera
			cmetadata = dataset_filepaths.get_color_metadata_file( \
					c, \
					os.path.join(dataset_dir, \
					conf.find_sensor_prop(c, \
					CAMERA_METAFILE_XPATH, \
					CAMERA_TYPE)))
			crectcalib = os.path.join(dataset_dir, \
					conf.find_sensor_prop(c, \
					CAMERA_RECTCALIB_XPATH, \
					CAMERA_TYPE))
			cimagedir = os.path.join(dataset_dir, \
					conf.find_sensor_prop(c, \
					CAMERA_IMAGEDIR_XPATH, \
					CAMERA_TYPE))

			# add to the command-line args
			args += ['-q', cmetadata, crectcalib, cimagedir]
	
	# prepare output file
	outdir = os.path.dirname(xyzfile)
	if not os.path.exists(outdir):
		os.makedirs(outdir)
	args += ['-o', xyzfile]

	# call the pointcloud executable
	ret = subprocess.call(args, executable=POINTCLOUD_EXE, \
			cwd=dataset_dir, stdout=None, stderr=None, \
			stdin=None, shell=False)
	if ret != 0:
		print "Pointcloud generation code returned error",ret
		return None

	# return the xyzfile as success
	return xyzfile