class PrepareDenseScene(desc.CommandLineNode): commandLine = 'aliceVision_prepareDenseScene {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=40) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' inputs = [ desc.File( name='input', label='Input', description='''SfMData file.''', value='', uid=[0], ), desc.ChoiceParam( name='outputFileType', label='Output File Type', description='Output file type for the undistorted images.', value='exr', values=['jpg', 'png', 'tif', 'exr'], exclusive=True, uid=[0], ), desc.BoolParam( name='saveMetadata', label='Save Metadata', description='Save projections and intrinsics informations in images metadata (only for .exr images).', value=True, uid=[0], ), desc.BoolParam( name='saveMatricesTxtFiles', label='Save Matrices Text Files', description='Save projections and intrinsics informations in text files.', value=False, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output', description='''Output folder.''', value=desc.Node.internalFolder, uid=[], ) ]
class FeatureExtraction(desc.CommandLineNode): commandLine = 'aliceVision_featureExtraction {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=40) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.ChoiceParam( name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', value=['sift'], values=['sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='describerPreset', label='Describer Preset', description='Control the ImageDescriber configuration (low, medium, normal, high, ultra). Configuration "ultra" can take long time !', value='normal', values=['low', 'medium', 'normal', 'high', 'ultra'], exclusive=True, uid=[0], ), desc.BoolParam( name='forceCpuExtraction', label='Force CPU Extraction', description='Use only CPU feature extraction.', value=True, uid=[], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Output Folder', description='Output path for the features and descriptors files (*.feat, *.desc).', value=desc.Node.internalFolder, uid=[], ), ]
def attributeDescFromValue(attrName, value, isOutput): """ Generate an attribute description (desc.Attribute) that best matches 'value'. Args: attrName (str): the name of the attribute value: the value of the attribute isOutput (bool): whether the attribute is an output Returns: desc.Attribute: the generated attribute description """ params = { "name": attrName, "label": attrName, "description": "Incompatible parameter", "value": value, "uid": (), "group": "incompatible" } if isinstance(value, bool): return desc.BoolParam(**params) if isinstance(value, int): return desc.IntParam(range=None, **params) elif isinstance(value, float): return desc.FloatParam(range=None, **params) elif isinstance(value, pyCompatibility.basestring): if isOutput or os.path.isabs(value) or Attribute.isLinkExpression( value): return desc.File(**params) else: return desc.StringParam(**params) # List/GroupAttribute: recursively build descriptions elif isinstance(value, (list, dict)): del params["value"] del params["uid"] attrDesc = None if isinstance(value, list): elt = value[ 0] if value else "" # fallback: empty string value if list is empty eltDesc = CompatibilityNode.attributeDescFromValue( "element", elt, isOutput) attrDesc = desc.ListAttribute(elementDesc=eltDesc, **params) elif isinstance(value, dict): groupDesc = [] for key, value in value.items(): eltDesc = CompatibilityNode.attributeDescFromValue( key, value, isOutput) groupDesc.append(eltDesc) attrDesc = desc.GroupAttribute(groupDesc=groupDesc, **params) # override empty default value with attrDesc._value = value return attrDesc # handle any other type of parameters as Strings return desc.StringParam(**params)
class MeshFiltering(desc.CommandLineNode): commandLine = 'aliceVision_meshFiltering {allParams}' inputs = [ desc.File( name='inputMesh', label='Input Mesh', description='''Input Mesh (OBJ file format).''', value='', uid=[0], ), desc.FloatParam( name='removeLargeTrianglesFactor', label='Filter Large Triangles Factor', description='Remove all large triangles. We consider a triangle as large if one edge is bigger than N times the average edge length. Put zero to disable it.', value=60.0, range=(1.0, 100.0, 0.1), uid=[0], ), desc.BoolParam( name='keepLargestMeshOnly', label='Keep Only the Largest Mesh', description='Keep only the largest connected triangles group.', value=False, uid=[0], ), desc.IntParam( name='iterations', label='Smoothing Iterations', description='Number of smoothing iterations', value=5, range=(0, 50, 1), uid=[0], ), desc.FloatParam( name='lambda', label='Lambda', description='', value=1.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='outputMesh', label='Output Mesh', description='''Output mesh (OBJ file format).''', value=desc.Node.internalFolder + 'mesh.obj', uid=[], ), ]
class PanoramaWarping(desc.CommandLineNode): commandLine = 'aliceVision_panoramaWarping {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=5) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' documentation = ''' Compute the image warping for each input image in the panorama coordinate system. ''' inputs = [ desc.File( name='input', label='Input', description="SfM Data File", value='', uid=[0], ), desc.BoolParam( name='estimateResolution', label='Estimate Resolution', description='Estimate output panorama resolution automatically based on the input images resolution.', value=True, uid=[0], group=None, # skip group from command line ), desc.IntParam( name='panoramaWidth', label='Panorama Width', description='Choose the output panorama width (in pixels).', value=10000, range=(0, 50000, 1000), uid=[0], enabled=lambda node: (not node.estimateResolution.value), ), desc.IntParam( name='percentUpscale', label='Upscale Ratio', description='Percentage of upscaled pixels.\n' '\n' 'How many percent of the pixels will be upscaled (compared to its original resolution):\n' ' * 0: all pixels will be downscaled\n' ' * 50: on average the input resolution is kept (optimal to reduce over/under-sampling)\n' ' * 100: all pixels will be upscaled\n', value=50, range=(0, 100, 1), enabled=lambda node: (node.estimateResolution.value), uid=[0] ), desc.IntParam( name='maxPanoramaWidth', label='Max Panorama Width', description='Choose the maximal output panorama width (in pixels). Zero means no limit.', value=35000, range=(0, 100000, 1000), uid=[0], enabled=lambda node: (node.estimateResolution.value), ), desc.ChoiceParam( name='storageDataType', label='Storage Data Type', description='Storage image data type:\n' ' * float: Use full floating point (32 bits per channel)\n' ' * half: Use half float (16 bits per channel)\n' ' * halfFinite: Use half float, but clamp values to avoid non-finite values\n' ' * auto: Use half float if all values can fit, else use full float\n', value='float', values=['float', 'half', 'halfFinite', 'auto'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output directory', description='', value=desc.Node.internalFolder, uid=[], ), ]
class StructureFromMotion(desc.CommandLineNode): commandLine = 'aliceVision_incrementalSfM {allParams}' size = desc.DynamicNodeSize('input') inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.ListAttribute( elementDesc=desc.File( name="featuresFolder", label="Features Folder", description="", value="", uid=[0], ), name="featuresFolders", label="Features Folders", description="Folder(s) containing the extracted features and descriptors." ), desc.ListAttribute( elementDesc=desc.File( name="matchesFolder", label="Matches Folder", description="", value="", uid=[0], ), name="matchesFolders", label="Matches Folders", description="Folder(s) in which computed matches are stored." ), desc.ChoiceParam( name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', value=['sift'], values=['sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='localizerEstimator', label='Localizer Estimator', description='Estimator type used to localize cameras (acransac, ransac, lsmeds, loransac, maxconsensus).', value='acransac', values=['acransac', 'ransac', 'lsmeds', 'loransac', 'maxconsensus'], exclusive=True, uid=[0], ), desc.BoolParam( name='lockScenePreviouslyReconstructed', label='Lock Scene Previously Reconstructed', description='This option is useful for SfM augmentation. Lock previously reconstructed poses and intrinsics.', value=False, uid=[0], ), desc.BoolParam( name='useLocalBA', label='Local Bundle Adjustment', description='It reduces the reconstruction time, especially for large datasets (500+ images),\n' 'by avoiding computation of the Bundle Adjustment on areas that are not changing.', value=True, uid=[0], ), desc.IntParam( name='localBAGraphDistance', label='LocalBA Graph Distance', description='Graph-distance limit to define the Active region in the Local Bundle Adjustment strategy.', value=1, range=(2, 10, 1), uid=[0], ), desc.IntParam( name='maxNumberOfMatches', label='Maximum Number of Matches', description='Maximum number of matches per image pair (and per feature type). \n' 'This can be useful to have a quick reconstruction overview. \n' '0 means no limit.', value=0, range=(0, 50000, 1), uid=[0], ), desc.IntParam( name='minInputTrackLength', label='Min Input Track Length', description='Minimum track length in input of SfM', value=2, range=(2, 10, 1), uid=[0], ), desc.IntParam( name='minNumberOfObservationsForTriangulation', label='Min Observation For Triangulation', description='Minimum number of observations to triangulate a point.\n' 'Set it to 3 (or more) reduces drastically the noise in the point cloud,\n' 'but the number of final poses is a little bit reduced\n' '(from 1.5% to 11% on the tested datasets).', value=2, range=(2, 10, 1), uid=[0], ), desc.FloatParam( name='minAngleForTriangulation', label='Min Angle For Triangulation', description='Minimum angle for triangulation.', value=3.0, range=(0.1, 10, 0.1), uid=[0], ), desc.FloatParam( name='minAngleForLandmark', label='Min Angle For Landmark', description='Minimum angle for landmark.', value=2.0, range=(0.1, 10, 0.1), uid=[0], ), desc.FloatParam( name='maxReprojectionError', label='Max Reprojection Error', description='Maximum reprojection error.', value=4.0, range=(0.1, 10, 0.1), uid=[0], ), desc.FloatParam( name='minAngleInitialPair', label='Min Angle Initial Pair', description='Minimum angle for the initial pair.', value=5.0, range=(0.1, 10, 0.1), uid=[0], ), desc.FloatParam( name='maxAngleInitialPair', label='Max Angle Initial Pair', description='Maximum angle for the initial pair.', value=40.0, range=(0.1, 60, 0.1), uid=[0], ), desc.BoolParam( name='useOnlyMatchesFromInputFolder', label='Use Only Matches From Input Folder', description='Use only matches from the input matchesFolder parameter.\n' 'Matches folders previously added to the SfMData file will be ignored.', value=False, uid=[], ), desc.File( name='initialPairA', label='Initial Pair A', description='Filename of the first image (without path).', value='', uid=[0], ), desc.File( name='initialPairB', label='Initial Pair B', description='Filename of the second image (without path).', value='', uid=[0], ), desc.ChoiceParam( name='interFileExtension', label='Inter File Extension', description='Extension of the intermediate file export.', value='.abc', values=('.abc', '.ply'), exclusive=True, uid=[], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Output SfMData File', description='Path to the output sfmdata file', value=desc.Node.internalFolder + 'sfm.abc', uid=[], ), desc.File( name='outputViewsAndPoses', label='Output SfMData File', description='''Path to the output sfmdata file with cameras (views and poses).''', value=desc.Node.internalFolder + 'cameras.sfm', uid=[], ), desc.File( name='extraInfoFolder', label='Output Folder', description='Folder for intermediate reconstruction files and additional reconstruction information files.', value=desc.Node.internalFolder, uid=[], ), ] @staticmethod def getViewsAndPoses(node): """ Parse SfM result and return views and poses as two dict with viewId and poseId as keys. """ reportFile = node.outputViewsAndPoses.value if not os.path.exists(reportFile): return {}, {} with open(reportFile) as jsonFile: report = json.load(jsonFile) views = dict() poses = dict() for view in report['views']: views[view['viewId']] = view for pose in report['poses']: poses[pose['poseId']] = pose['pose'] return views, poses
class DepthMap(desc.CommandLineNode): commandLine = 'aliceVision_depthMapEstimation {allParams}' gpu = desc.Level.INTENSIVE size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=3) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' category = 'Dense Reconstruction' documentation = ''' For each camera that have been estimated by the Structure-From-Motion, it estimates the depth value per pixel. Adjust the downscale factor to compute depth maps at a higher/lower resolution. Use a downscale factor of one (full-resolution) only if the quality of the input images is really high (camera on a tripod with high-quality optics). ## Online [https://alicevision.org/#photogrammetry/depth_maps_estimation](https://alicevision.org/#photogrammetry/depth_maps_estimation) ''' inputs = [ desc.File( name='input', label='SfMData', description='SfMData file.', value='', uid=[0], ), desc.File( name='imagesFolder', label='Images Folder', description= 'Use images from a specific folder instead of those specify in the SfMData file.\nFilename should be the image uid.', value='', uid=[0], ), desc.ChoiceParam( name='downscale', label='Downscale', description='Image downscale factor.', value=2, values=[1, 2, 4, 8, 16], exclusive=True, uid=[0], ), desc.FloatParam( name='minViewAngle', label='Min View Angle', description='Minimum angle between two views.', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='maxViewAngle', label='Max View Angle', description='Maximum angle between two views.', value=70.0, range=(10.0, 120.0, 1), uid=[0], advanced=True, ), desc.IntParam( name='sgmMaxTCams', label='SGM: Nb Neighbour Cameras', description='Semi Global Matching: Number of neighbour cameras.', value=10, range=(1, 100, 1), uid=[0], ), desc.IntParam( name='sgmWSH', label='SGM: WSH', description= 'Semi Global Matching: Half-size of the patch used to compute the similarity.', value=4, range=(1, 20, 1), uid=[0], advanced=True, ), desc.FloatParam( name='sgmGammaC', label='SGM: GammaC', description='Semi Global Matching: GammaC Threshold.', value=5.5, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.FloatParam( name='sgmGammaP', label='SGM: GammaP', description='Semi Global Matching: GammaP Threshold.', value=8.0, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.IntParam( name='refineMaxTCams', label='Refine: Nb Neighbour Cameras', description='Refine: Number of neighbour cameras.', value=6, range=(1, 20, 1), uid=[0], ), desc.IntParam( name='refineNSamplesHalf', label='Refine: Number of Samples', description='Refine: Number of samples.', value=150, range=(1, 500, 10), uid=[0], advanced=True, ), desc.IntParam( name='refineNDepthsToRefine', label='Refine: Number of Depths', description='Refine: Number of depths.', value=31, range=(1, 100, 1), uid=[0], advanced=True, ), desc.IntParam( name='refineNiters', label='Refine: Number of Iterations', description='Refine:: Number of iterations.', value=100, range=(1, 500, 10), uid=[0], advanced=True, ), desc.IntParam( name='refineWSH', label='Refine: WSH', description= 'Refine: Half-size of the patch used to compute the similarity.', value=3, range=(1, 20, 1), uid=[0], advanced=True, ), desc.FloatParam( name='refineSigma', label='Refine: Sigma', description='Refine: Sigma Threshold.', value=15, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.FloatParam( name='refineGammaC', label='Refine: GammaC', description='Refine: GammaC Threshold.', value=15.5, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.FloatParam( name='refineGammaP', label='Refine: GammaP', description='Refine: GammaP threshold.', value=8.0, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.BoolParam( name='refineUseTcOrRcPixSize', label='Refine: Tc or Rc pixel size', description= 'Refine: Use minimum pixel size of neighbour cameras (Tc) or current camera pixel size (Rc)', value=False, uid=[0], advanced=True, ), desc.BoolParam( name='exportIntermediateResults', label='Export Intermediate Results', description= 'Export intermediate results from the SGM and Refine steps.', value=False, uid=[], advanced=True, ), desc.IntParam( name='nbGPUs', label='Number of GPUs', description= 'Number of GPUs to use (0 means use all available GPUs).', value=0, range=(0, 5, 1), uid=[], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output', description='Output folder for generated depth maps.', value=desc.Node.internalFolder, uid=[], ), ]
class SfMAlignment(desc.CommandLineNode): commandLine = 'aliceVision_utils_sfmAlignment {allParams}' size = desc.DynamicNodeSize('input') inputs = [ desc.File( name='input', label='Input', description='''SfMData file .''', value='', uid=[0], ), desc.File( name='reference', label='Reference', description= '''Path to the scene used as the reference coordinate system.''', value='', uid=[0], ), desc.ChoiceParam( name='method', label='Alignment Method', description="Alignment Method:\n" " * from_cameras_viewid: Align cameras with same view Id\n" " * from_cameras_poseid: Align cameras with same pose Id\n" " * from_cameras_filepath: Align cameras with a filepath matching, using 'fileMatchingPattern'\n" " * from_cameras_metadata: Align cameras with matching metadata, using 'metadataMatchingList'\n" " * from_markers: Align from markers with the same Id\n", value='from_cameras_viewid', values=[ 'from_cameras_viewid', 'from_cameras_poseid', 'from_cameras_filepath', 'from_cameras_metadata', 'from_markers' ], exclusive=True, uid=[0], ), desc.StringParam( name='fileMatchingPattern', label='File Matching Pattern', description= 'Matching regular expression for the "from_cameras_filepath" method. ' 'You should capture specific parts of the filepath with parenthesis to define matching elements.\n' 'Some examples of patterns:\n' ' - Match the filename without extension (default value): ".*\/(.*?)\.\w{3}"\n' ' - Match the filename suffix after "_": ".*\/.*(_.*?\.\w{3})"\n' ' - Match the filename prefix before "_": ".*\/(.*?)_.*\.\w{3}"\n', value='.*\/(.*?)\.\w{3}', uid=[0], ), desc.ListAttribute( elementDesc=desc.File( name="metadataMatching", label="Metadata", description="", value="", uid=[0], ), name="metadataMatchingList", label="Metadata Matching List", description= 'List of metadata that should match to create the correspondences. If the list is empty, the default value will be used: ["Make", "Model", "Exif:BodySerialNumber", "Exif:LensSerialNumber"].', ), desc.BoolParam(name='applyScale', label='Scale', description='Apply scale transformation.', value=True, uid=[0]), desc.BoolParam(name='applyRotation', label='Rotation', description='Apply rotation transformation.', value=True, uid=[0]), desc.BoolParam(name='applyTranslation', label='Translation', description='Apply translation transformation.', value=True, uid=[0]), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output', description='''Aligned SfMData file .''', value=desc.Node.internalFolder + 'alignedSfM.abc', uid=[], ), ]
class CameraRigLocalization(desc.CommandLineNode): commandLine = 'aliceVision_rigLocalization {allParams}' inputs = [ desc.File( name='sfmdata', label='Sfm Data', description='''The sfmData file.''', value='', uid=[0], ), desc.File( name='mediapath', label='Media Path', description= '''The path to the video file, the folder of the image sequence or a text file (one image path per line) for each camera of the rig (eg. --mediapath /path/to/cam1.mov /path/to/cam2.mov).''', value='', uid=[0], ), desc.File( name='calibration', label='Rig Calibration File', description= '''The file containing the calibration data for the rig (subposes)''', value='', uid=[0], ), desc.File( name='cameraIntrinsics', label='Camera Intrinsics', description= '''The intrinsics calibration file for each camera of the rig. (eg. --cameraIntrinsics /path/to/calib1.txt /path/to/calib2.txt).''', value='', uid=[0], ), desc.File( name='descriptorPath', label='Descriptor Path', description='''Folder containing the .desc.''', value='', uid=[0], ), desc.ChoiceParam( name='matchDescTypes', label='Match Describer Types', description='''The describer types to use for the matching''', value=['sift'], values=[ 'sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv' ], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='preset', label='Preset', description= '''Preset for the feature extractor when localizing a new image (low, medium, normal, high, ultra)''', value='normal', values=['low', 'medium', 'normal', 'high', 'ultra'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='resectionEstimator', label='Resection Estimator', description= '''The type of *sac framework to use for resection (acransac, loransac)''', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='matchingEstimator', label='Matching Estimator', description= '''The type of *sac framework to use for matching (acransac, loransac)''', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.StringParam( name='refineIntrinsics', label='Refine Intrinsics', description= '''Enable/Disable camera intrinsics refinement for each localized image''', value='', uid=[0], ), desc.FloatParam( name='reprojectionError', label='Reprojection Error', description= '''Maximum reprojection error (in pixels) allowed for resectioning. If set to 0 it lets the ACRansac select an optimal value.''', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.BoolParam( name='useLocalizeRigNaive', label='Use Localize Rig Naive', description= '''Enable/Disable the naive method for rig localization: naive method tries to localize each camera separately.''', value=False, uid=[0], ), desc.FloatParam( name='angularThreshold', label='Angular Threshold', description= '''The maximum angular threshold in degrees between feature bearing vector and 3D point direction. Used only with the opengv method.''', value=0.1, range=(0.0, 10.0, 0.01), uid=[0], ), desc.File( name='voctree', label='Voctree', description='''[voctree] Filename for the vocabulary tree''', value=os.environ.get('ALICEVISION_VOCTREE', ''), uid=[0], ), desc.File( name='voctreeWeights', label='Voctree Weights', description= '''[voctree] Filename for the vocabulary tree weights''', value='', uid=[0], ), desc.ChoiceParam( name='algorithm', label='Algorithm', description='''[voctree] Algorithm type: {FirstBest,AllResults}''', value='AllResults', values=['FirstBest', 'AllResults'], exclusive=True, uid=[0], ), desc.IntParam( name='nbImageMatch', label='Nb Image Match', description= '''[voctree] Number of images to retrieve in the database''', value=4, range=(0, 100, 1), uid=[0], ), desc.IntParam( name='maxResults', label='Max Results', description= '''[voctree] For algorithm AllResults, it stops the image matching when this number of matched images is reached. If 0 it is ignored.''', value=10, range=(0, 100, 1), uid=[0], ), desc.FloatParam( name='matchingError', label='Matching Error', description= '''[voctree] Maximum matching error (in pixels) allowed for image matching with geometric verification. If set to 0 it lets the ACRansac select an optimal value.''', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.IntParam( name='nNearestKeyFrames', label='N Nearest Key Frames', description='''[cctag] Number of images to retrieve in database''', value=5, range=(0, 50, 1), uid=[0], ), ] outputs = [ desc.File( name='outputAlembic', label='Output Alembic', description= '''Filename for the SfMData export file (where camera poses will be stored).''', value=desc.Node.internalFolder + 'trackedcameras.abc', uid=[], ), ]
class DepthMapFilter(desc.CommandLineNode): commandLine = 'aliceVision_depthMapFiltering {allParams}' gpu = desc.Level.NORMAL size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=10) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' documentation = ''' Filter depth map values that are not coherent in multiple depth maps. This allows to filter unstable points before starting the fusion of all depth maps in the Meshing node. ''' inputs = [ desc.File( name='input', label='SfMData', description='SfMData file.', value='', uid=[0], ), desc.File( name="depthMapsFolder", label="DepthMaps Folder", description="Input depth maps folder", value="", uid=[0], ), desc.FloatParam( name='minViewAngle', label='Min View Angle', description='Minimum angle between two views.', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='maxViewAngle', label='Max View Angle', description='Maximum angle between two views.', value=70.0, range=(10.0, 120.0, 1), uid=[0], advanced=True, ), desc.IntParam( name="nNearestCams", label="Number of Nearest Cameras", description="Number of nearest cameras used for filtering.", value=10, range=(0, 20, 1), uid=[0], advanced=True, ), desc.IntParam( name="minNumOfConsistentCams", label="Min Consistent Cameras", description="Min Number of Consistent Cameras", value=3, range=(0, 10, 1), uid=[0], ), desc.IntParam( name="minNumOfConsistentCamsWithLowSimilarity", label="Min Consistent Cameras Bad Similarity", description= "Min Number of Consistent Cameras for pixels with weak similarity value", value=4, range=(0, 10, 1), uid=[0], ), desc.IntParam( name="pixSizeBall", label="Filtering Size in Pixels", description="Filtering size in pixels", value=0, range=(0, 10, 1), uid=[0], advanced=True, ), desc.IntParam( name="pixSizeBallWithLowSimilarity", label="Filtering Size in Pixels Bad Similarity", description="Filtering size in pixels", value=0, range=(0, 10, 1), uid=[0], advanced=True, ), desc.BoolParam( name='computeNormalMaps', label='Compute Normal Maps', description='Compute normal maps per depth map.', value=False, uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Filtered DepthMaps Folder', description='Output folder for generated depth maps.', value=desc.Node.internalFolder, uid=[], ), ]
class PanoramaEstimation(desc.CommandLineNode): commandLine = 'aliceVision_panoramaEstimation {allParams}' size = desc.DynamicNodeSize('input') documentation = ''' Estimate relative camera rotations between input images. ''' inputs = [ desc.File( name='input', label='Input', description="SfM Data File", value='', uid=[0], ), desc.ListAttribute( elementDesc=desc.File( name='featuresFolder', label='Features Folder', description="", value='', uid=[0], ), name='featuresFolders', label='Features Folders', description="Folder(s) containing the extracted features." ), desc.ListAttribute( elementDesc=desc.File( name='matchesFolder', label='Matches Folder', description="", value='', uid=[0], ), name='matchesFolders', label='Matches Folders', description="Folder(s) in which computed matches are stored." ), desc.ChoiceParam( name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', value=['sift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], joinChar=',', ), desc.FloatParam( name='offsetLongitude', label='Longitude offset (deg.)', description='''Offset to the panorama longitude''', value=0.0, range=(-180.0, 180.0, 1.0), uid=[0], ), desc.FloatParam( name='offsetLatitude', label='Latitude offset (deg.)', description='''Offset to the panorama latitude''', value=0.0, range=(-90.0, 90.0, 1.0), uid=[0], ), desc.ChoiceParam( name='rotationAveraging', label='Rotation Averaging Method', description="Method for rotation averaging :\n" " * L1 minimization\n" " * L2 minimization\n", values=['L1_minimization', 'L2_minimization'], value='L2_minimization', exclusive=True, uid=[0], advanced=True, ), desc.ChoiceParam( name='relativeRotation', label='Relative Rotation Method', description="Method for relative rotation :\n" " * from essential matrix\n" " * from homography matrix\n" " * from rotation matrix", values=['essential_matrix', 'homography_matrix', 'rotation_matrix'], value='rotation_matrix', exclusive=True, uid=[0], advanced=True, ), desc.BoolParam( name='refine', label='Refine', description='Refine camera relative poses, points and optionally internal camera parameter', value=True, uid=[0], ), desc.BoolParam( name='lockAllIntrinsics', label='Force Lock of All Intrinsics', description='Force to keep constant all the intrinsics parameters of the cameras (focal length, \n' 'principal point, distortion if any) during the reconstruction.\n' 'This may be helpful if the input cameras are already fully calibrated.', value=False, uid=[0], ), desc.FloatParam( name='maxAngleToPrior', label='Max Angle To Priors (deg.)', description='''Maximal angle allowed regarding the input prior (in degrees).''', value=20.0, range=(0.0, 360.0, 1.0), uid=[0], advanced=True, ), desc.FloatParam( name='maxAngularError', label='Max Angular Error (deg.)', description='''Maximal angular error in global rotation averging (in degrees).''', value=100.0, range=(0.0, 360.0, 1.0), uid=[0], advanced=True, ), desc.BoolParam( name='intermediateRefineWithFocal', label='Intermediate Refine: Focal', description='Intermediate refine with rotation and focal length only.', value=False, uid=[0], advanced=True, ), desc.BoolParam( name='intermediateRefineWithFocalDist', label='Intermediate Refine: Focal And Distortion', description='Intermediate refine with rotation, focal length and distortion.', value=False, uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output SfMData File', description='Path to the output sfmdata file', value=desc.Node.internalFolder + 'panorama.abc', uid=[], ), desc.File( name='outputViewsAndPoses', label='Output Poses', description='''Path to the output sfmdata file with cameras (views and poses).''', value=desc.Node.internalFolder + 'cameras.sfm', uid=[], ), ]
class CameraInit(desc.CommandLineNode): commandLine = 'aliceVision_cameraInit {allParams} --allowSingleView 1' # don't throw an error if there is only one image size = desc.DynamicNodeSize('viewpoints') documentation = ''' This node describes your dataset. It lists the Viewpoints candidates, the guess about the type of optic, the initial focal length and which images are sharing the same internal camera parameters, as well as potential cameras rigs. When you import new images into Meshroom, this node is automatically configured from the analysis of the image metadata. The software can support images without any metadata but it is recommended to have them for robustness. ### Metadata Metadata allows images to be grouped together and provides an initialization of the focal length (in pixel unit). The metadata needed are: * **Focal Length**: the focal length in mm. * **Make** & **Model**: this information allows to convert the focal in mm into a focal length in pixel using an embedded sensor database. * **Serial Number**: allows to uniquely identify a device so multiple devices with the same Make, Model can be differentiated and their internal parameters are optimized separately (in the photogrammetry case). ''' inputs = [ desc.ListAttribute( name="viewpoints", elementDesc=desc.GroupAttribute(name="viewpoint", label="Viewpoint", description="", groupDesc=Viewpoint), label="Viewpoints", description="Input viewpoints", group="", ), desc.ListAttribute( name="intrinsics", elementDesc=desc.GroupAttribute(name="intrinsic", label="Intrinsic", description="", groupDesc=Intrinsic), label="Intrinsics", description="Camera Intrinsics", group="", ), desc.File( name='sensorDatabase', label='Sensor Database', description='''Camera sensor width database path.''', value=os.environ.get('ALICEVISION_SENSOR_DB', ''), uid=[], ), desc.FloatParam( name='defaultFieldOfView', label='Default Field Of View', description='Empirical value for the field of view in degree.', value=45.0, range=(0, 180.0, 1), uid=[], advanced=True, ), desc.ChoiceParam( name='groupCameraFallback', label='Group Camera Fallback', description= "If there is no serial number in image metadata, devices cannot be accurately identified.\n" "Therefore, internal camera parameters cannot be shared among images reliably.\n" "A fallback grouping strategy must be chosen:\n" " * global: group images from comparable devices (same make/model/focal) globally.\n" " * folder: group images from comparable devices only within the same folder.\n" " * image: never group images from comparable devices", values=['global', 'folder', 'image'], value='folder', exclusive=True, uid=[], advanced=True, ), desc.ChoiceParam( name='allowedCameraModels', label='Allowed Camera Models', description='the Camera Models that can be attributed.', value=[ 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4', 'fisheye1' ], values=[ 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4', 'fisheye1' ], exclusive=False, uid=[], joinChar=',', advanced=True, ), desc.BoolParam( name='useInternalWhiteBalance', label='Apply internal white balance', description='Apply image white balance (Only for raw images)', value=True, uid=[0], ), desc.ChoiceParam( name='viewIdMethod', label='ViewId Method', description="Allows to choose the way the viewID is generated:\n" " * metadata : Generate viewId from image metadata.\n" " * filename : Generate viewId from file names using regex.", value='metadata', values=['metadata', 'filename'], exclusive=True, uid=[], advanced=True, ), desc.StringParam( name='viewIdRegex', label='ViewId Regex', description='Regex used to catch number used as viewId in filename.' 'You should capture specific parts of the filename with parenthesis to define matching elements. (only number will works)\n' 'Some examples of patterns:\n' ' - Match the longest number at the end of filename (default value): ".*?(\d+)"\n' ' - Match the first number found in filename : "(\d+).*"\n', value='.*?(\d+)', uid=[], advanced=True, enabled=lambda node: node.viewIdMethod.value == 'filename', ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='SfMData', description='''Output SfMData.''', value=desc.Node.internalFolder + 'cameraInit.sfm', uid=[], ), ] def readSfMData(self, sfmFile): return readSfMData(sfmFile) def buildIntrinsics(self, node, additionalViews=()): """ Build intrinsics from node current views and optional additional views Args: node: the CameraInit node instance to build intrinsics for additionalViews: (optional) the new views (list of path to images) to add to the node's viewpoints Returns: The updated views and intrinsics as two separate lists """ assert isinstance(node.nodeDesc, CameraInit) if node.graph: # make a copy of the node outside the graph # to change its cache folder without modifying the original node node = node.graph.copyNode(node)[0] tmpCache = tempfile.mkdtemp() node.updateInternals(tmpCache) try: os.makedirs(os.path.join(tmpCache, node.internalFolder)) self.createViewpointsFile(node, additionalViews) cmd = self.buildCommandLine(node.chunks[0]) logging.debug(' - commandLine: {}'.format(cmd)) proc = psutil.Popen(cmd, stdout=None, stderr=None, shell=True) stdout, stderr = proc.communicate() # proc.wait() if proc.returncode != 0: raise RuntimeError( 'CameraInit failed with error code {}.\nCommand was: "{}".\n' .format(proc.returncode, cmd)) # Reload result of aliceVision_cameraInit cameraInitSfM = node.output.value return readSfMData(cameraInitSfM) except Exception as e: logging.debug( "[CameraInit] Error while building intrinsics: {}".format( str(e))) raise finally: if os.path.exists(tmpCache): logging.debug( "[CameraInit] Remove temp files in: {}".format(tmpCache)) shutil.rmtree(tmpCache) def createViewpointsFile(self, node, additionalViews=()): node.viewpointsFile = "" if node.viewpoints or additionalViews: newViews = [] for path in additionalViews: # format additional views to match json format newViews.append({"path": path}) intrinsics = node.intrinsics.getPrimitiveValue(exportDefault=True) for intrinsic in intrinsics: intrinsic['principalPoint'] = [ intrinsic['principalPoint']['x'], intrinsic['principalPoint']['y'] ] views = node.viewpoints.getPrimitiveValue(exportDefault=False) # convert the metadata string into a map for view in views: if 'metadata' in view: view['metadata'] = json.loads(view['metadata']) sfmData = { "version": [1, 0, 0], "views": views + newViews, "intrinsics": intrinsics, "featureFolder": "", "matchingFolder": "", } node.viewpointsFile = (node.nodeDesc.internalFolder + '/viewpoints.sfm').format(**node._cmdVars) with open(node.viewpointsFile, 'w') as f: json.dump(sfmData, f, indent=4) def buildCommandLine(self, chunk): cmd = desc.CommandLineNode.buildCommandLine(self, chunk) if chunk.node.viewpointsFile: cmd += ' --input "{}"'.format(chunk.node.viewpointsFile) return cmd def processChunk(self, chunk): self.createViewpointsFile(chunk.node) desc.CommandLineNode.processChunk(self, chunk)
advanced=True), desc.ListAttribute( name="distortionParams", elementDesc=desc.FloatParam(name="p", label="", description="", value=0.0, uid=[0], range=(-0.1, 0.1, 0.01)), label="Distortion Params", description="Distortion Parameters", ), desc.BoolParam( name='locked', label='Locked', description= 'If the camera has been calibrated, the internal camera parameters (intrinsics) can be locked. It should improve robustness and speedup the reconstruction.', value=False, uid=[0]), ] def readSfMData(sfmFile): """ Read views and intrinsics from a .sfm file Args: sfmFile: the .sfm file containing views and intrinsics Returns: The views and intrinsics of the .sfm as two separate lists """
class CameraLocalization(desc.CommandLineNode): commandLine = 'aliceVision_cameraLocalization {allParams}' inputs = [ desc.File( name='sfmdata', label='SfM Data', description= '''The sfm_data.json kind of file generated by AliceVision.''', value='', uid=[0], ), desc.File( name='mediafile', label='Media File', description= '''The folder path or the filename for the media to track''', value='', uid=[0], ), desc.File( name='visualDebug', label='Visual Debug Folder', description= '''If a folder is provided it enables visual debug and saves all the debugging info in that folder''', value='', uid=[0], ), desc.File( name='descriptorPath', label='Descriptor Path', description= '''Folder containing the descriptors for all the images (ie the *.desc.)''', value='', uid=[0], ), desc.ChoiceParam( name='matchDescTypes', label='Match Desc Types', description='''Describer types to use for the matching.''', value=['sift'], values=[ 'sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv' ], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='preset', label='Preset', description= '''Preset for the feature extractor when localizing a new image (low, medium, normal, high, ultra)''', value='normal', values=['low', 'medium', 'normal', 'high', 'ultra'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='resectionEstimator', label='Resection Estimator', description= '''The type of *sac framework to use for resection (acransac, loransac)''', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='matchingEstimator', label='Matching Estimator', description= '''The type of *sac framework to use for matching (acransac, loransac)''', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.File( name='calibration', label='Calibration', description='''Calibration file''', value='', uid=[0], ), desc.BoolParam( name='refineIntrinsics', label='Refine Intrinsics', description= '''Enable/Disable camera intrinsics refinement for each localized image''', value=False, uid=[0], ), desc.FloatParam( name='reprojectionError', label='Reprojection Error', description= '''Maximum reprojection error (in pixels) allowed for resectioning. If set to 0 it lets the ACRansac select an optimal value.''', value=4.0, range=(0.1, 50.0, 0.1), uid=[0], ), desc.IntParam( name='nbImageMatch', label='Nb Image Match', description= '''[voctree] Number of images to retrieve in database''', value=4, range=(1, 1000, 1), uid=[0], ), desc.IntParam( name='maxResults', label='Max Results', description= '''[voctree] For algorithm AllResults, it stops the image matching when this number of matched images is reached. If 0 it is ignored.''', value=10, range=(1, 100, 1), uid=[0], ), desc.IntParam( name='commonviews', label='Commonviews', description= '''[voctree] Number of minimum images in which a point must be seen to be used in cluster tracking''', value=3, range=(2, 50, 1), uid=[0], ), desc.File( name='voctree', label='Voctree', description='''[voctree] Filename for the vocabulary tree''', value=os.environ.get('ALICEVISION_VOCTREE', ''), uid=[0], ), desc.File( name='voctreeWeights', label='Voctree Weights', description= '''[voctree] Filename for the vocabulary tree weights''', value='', uid=[0], ), desc.ChoiceParam( name='algorithm', label='Algorithm', description='''[voctree] Algorithm type: FirstBest, AllResults''', value='AllResults', values=['FirstBest', 'AllResults'], exclusive=True, uid=[0], ), desc.FloatParam( name='matchingError', label='Matching Error', description= '''[voctree] Maximum matching error (in pixels) allowed for image matching with geometric verification. If set to 0 it lets the ACRansac select an optimal value.''', value=4.0, range=(0.0, 50.0, 1.0), uid=[0], ), desc.IntParam( name='nbFrameBufferMatching', label='Nb Frame Buffer Matching', description= '''[voctree] Number of previous frame of the sequence to use for matching (0 = Disable)''', value=10, range=(0, 100, 1), uid=[0], ), desc.BoolParam( name='robustMatching', label='Robust Matching', description= '''[voctree] Enable/Disable the robust matching between query and database images, all putative matches will be considered.''', value=True, uid=[0], ), desc.IntParam( name='nNearestKeyFrames', label='N Nearest Key Frames', description= '''[cctag] Number of images to retrieve in the database Parameters specific for final (optional) bundle adjustment optimization of the sequence:''', value=5, range=(1, 100, 1), uid=[0], ), desc.StringParam( name='globalBundle', label='Global Bundle', description= '''[bundle adjustment] If --refineIntrinsics is not set, this option allows to run a final global bundle adjustment to refine the scene.''', value='', uid=[0], ), desc.BoolParam( name='noDistortion', label='No Distortion', description= '''[bundle adjustment] It does not take into account distortion during the BA, it consider the distortion coefficients all equal to 0''', value=False, uid=[0], ), desc.BoolParam( name='noBArefineIntrinsics', label='No BA Refine Intrinsics', description= '''[bundle adjustment] It does not refine intrinsics during BA''', value=False, uid=[0], ), desc.IntParam( name='minPointVisibility', label='Min Point Visibility', description= '''[bundle adjustment] Minimum number of observation that a point must have in order to be considered for bundle adjustment''', value=2, range=(2, 50, 1), uid=[0], ), ] outputs = [ desc.File( name='outputAlembic', label='Output Alembic', description= '''Filename for the SfMData export file (where camera poses will be stored)''', value=desc.Node.internalFolder + 'trackedCameras.abc', uid=[], ), desc.File( name='outputJSON', label='Output JSON', description='''Filename for the localization results as .json''', value=desc.Node.internalFolder + 'trackedCameras.json', uid=[], ), ]
class Meshing(desc.CommandLineNode): commandLine = 'aliceVision_meshing {allParams}' cpu = desc.Level.INTENSIVE ram = desc.Level.INTENSIVE category = 'Dense Reconstruction' documentation = ''' This node creates a dense geometric surface representation of the scene. First, it fuses all the depth maps into a global dense point cloud with an adaptive resolution. It then performs a 3D Delaunay tetrahedralization and a voting procedure is done to compute weights on cells and weights on facets connecting the cells. A Graph Cut Max-Flow is applied to optimally cut the volume. This cut represents the extracted mesh surface. ## Online [https://alicevision.org/#photogrammetry/meshing](https://alicevision.org/#photogrammetry/meshing) ''' inputs = [ desc.File( name='input', label='SfmData', description='SfMData file.', value='', uid=[0], ), desc.File( name="depthMapsFolder", label='Depth Maps Folder', description='Input depth maps folder.', value='', uid=[0], ), desc.BoolParam( name='useBoundingBox', label='Custom Bounding Box', description= 'Edit the meshing bounding box. If enabled, it takes priority over the Estimate From SfM option. Parameters can be adjusted in advanced settings.', value=False, uid=[0], group=''), desc.GroupAttribute( name="boundingBox", label="Bounding Box Settings", description="Translation, rotation and scale of the bounding box.", groupDesc=[ desc.GroupAttribute( name="bboxTranslation", label="Translation", description="Position in space.", groupDesc=[ desc.FloatParam(name="x", label="x", description="X Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01)), desc.FloatParam(name="y", label="y", description="Y Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01)), desc.FloatParam(name="z", label="z", description="Z Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01)) ], joinChar=","), desc.GroupAttribute( name="bboxRotation", label="Euler Rotation", description="Rotation in Euler degrees.", groupDesc=[ desc.FloatParam(name="x", label="x", description="Euler X Rotation", value=0.0, uid=[0], range=(-90.0, 90.0, 1)), desc.FloatParam(name="y", label="y", description="Euler Y Rotation", value=0.0, uid=[0], range=(-180.0, 180.0, 1)), desc.FloatParam(name="z", label="z", description="Euler Z Rotation", value=0.0, uid=[0], range=(-180.0, 180.0, 1)) ], joinChar=","), desc.GroupAttribute( name="bboxScale", label="Scale", description="Scale of the bounding box.", groupDesc=[ desc.FloatParam(name="x", label="x", description="X Scale", value=1.0, uid=[0], range=(0.0, 20.0, 0.01)), desc.FloatParam(name="y", label="y", description="Y Scale", value=1.0, uid=[0], range=(0.0, 20.0, 0.01)), desc.FloatParam(name="z", label="z", description="Z Scale", value=1.0, uid=[0], range=(0.0, 20.0, 0.01)) ], joinChar=",") ], joinChar=",", enabled=lambda node: node.useBoundingBox.value, ), desc.BoolParam( name='estimateSpaceFromSfM', label='Estimate Space From SfM', description='Estimate the 3d space from the SfM', value=True, uid=[0], advanced=True, ), desc.IntParam( name='estimateSpaceMinObservations', label='Min Observations For SfM Space Estimation', description= 'Minimum number of observations for SfM space estimation.', value=3, range=(0, 100, 1), uid=[0], advanced=True, enabled=lambda node: node.estimateSpaceFromSfM.value, ), desc.FloatParam( name='estimateSpaceMinObservationAngle', label='Min Observations Angle For SfM Space Estimation', description= 'Minimum angle between two observations for SfM space estimation.', value=10, range=(0, 120, 1), uid=[0], enabled=lambda node: node.estimateSpaceFromSfM.value, ), desc.IntParam( name='maxInputPoints', label='Max Input Points', description='Max input points loaded from depth map images.', value=50000000, range=(500000, 500000000, 1000), uid=[0], ), desc.IntParam( name='maxPoints', label='Max Points', description='Max points at the end of the depth maps fusion.', value=5000000, range=(100000, 10000000, 1000), uid=[0], ), desc.IntParam( name='maxPointsPerVoxel', label='Max Points Per Voxel', description='Max points per voxel', value=1000000, range=(500000, 30000000, 1000), uid=[0], advanced=True, ), desc.IntParam( name='minStep', label='Min Step', description= 'The step used to load depth values from depth maps is computed from maxInputPts. ' 'Here we define the minimal value for this step, so on small datasets we will not spend ' 'too much time at the beginning loading all depth values.', value=2, range=(1, 20, 1), uid=[0], advanced=True, ), desc.ChoiceParam( name='partitioning', label='Partitioning', description='', value='singleBlock', values=('singleBlock', 'auto'), exclusive=True, uid=[0], advanced=True, ), desc.ChoiceParam( name='repartition', label='Repartition', description='', value='multiResolution', values=('multiResolution', 'regularGrid'), exclusive=True, uid=[0], advanced=True, ), desc.FloatParam( name='angleFactor', label='angleFactor', description='angleFactor', value=15.0, range=(0.0, 200.0, 1.0), uid=[0], advanced=True, ), desc.FloatParam( name='simFactor', label='simFactor', description='simFactor', value=15.0, range=(0.0, 200.0, 1.0), uid=[0], advanced=True, ), desc.FloatParam( name='pixSizeMarginInitCoef', label='pixSizeMarginInitCoef', description='pixSizeMarginInitCoef', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='pixSizeMarginFinalCoef', label='pixSizeMarginFinalCoef', description='pixSizeMarginFinalCoef', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='voteMarginFactor', label='voteMarginFactor', description='voteMarginFactor', value=4.0, range=(0.1, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='contributeMarginFactor', label='contributeMarginFactor', description='contributeMarginFactor', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='simGaussianSizeInit', label='simGaussianSizeInit', description='simGaussianSizeInit', value=10.0, range=(0.0, 50.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='simGaussianSize', label='simGaussianSize', description='simGaussianSize', value=10.0, range=(0.0, 50.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='minAngleThreshold', label='minAngleThreshold', description='minAngleThreshold', value=1.0, range=(0.0, 10.0, 0.01), uid=[0], advanced=True, ), desc.BoolParam( name='refineFuse', label='Refine Fuse', description= 'Refine depth map fusion with the new pixels size defined by angle and similarity scores.', value=True, uid=[0], advanced=True, ), desc.IntParam( name='helperPointsGridSize', label='Helper Points Grid Size', description='Grid Size for the helper points.', value=10, range=(0, 50, 1), uid=[0], advanced=True, ), desc.BoolParam( name='densify', label='Densify', description='Densify scene with helper points around vertices.', value=False, uid=[], advanced=True, group='', ), desc.IntParam( name='densifyNbFront', label='Densify: Front', description='Densify vertices: front.', value=1, range=(0, 5, 1), uid=[0], advanced=True, enabled=lambda node: node.densify.value, ), desc.IntParam( name='densifyNbBack', label='Densify: Back', description='Densify vertices: back.', value=1, range=(0, 5, 1), uid=[0], advanced=True, enabled=lambda node: node.densify.value, ), desc.FloatParam( name='densifyScale', label='Densify Scale', description='Scale between points used to densify the scene.', value=20.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, enabled=lambda node: node.densify.value, ), desc.FloatParam( name='nPixelSizeBehind', label='Nb Pixel Size Behind', description= 'Number of pixel size units to vote behind the vertex as FULL status.', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='fullWeight', label='Full Weight', description='Weighting for full status.', value=1.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.BoolParam( name='voteFilteringForWeaklySupportedSurfaces', label='Weakly Supported Surface Support', description= 'Improve support of weakly supported surfaces with a tetrahedra fullness score filtering.', value=True, uid=[0], ), desc.BoolParam( name='addLandmarksToTheDensePointCloud', label='Add Landmarks To The Dense Point Cloud', description='Add SfM Landmarks to the dense point cloud.', value=False, uid=[0], advanced=True, ), desc.IntParam( name='invertTetrahedronBasedOnNeighborsNbIterations', label='Tretrahedron Neighbors Coherency Nb Iterations', description= 'Invert cells status around surface to improve smoothness. Zero to disable.', value=10, range=(0, 30, 1), uid=[0], advanced=True, ), desc.FloatParam( name='minSolidAngleRatio', label='minSolidAngleRatio', description= 'Change cells status on surface around vertices to improve smoothness using solid angle ratio between full/empty parts. Zero to disable.', value=0.2, range=(0.0, 0.5, 0.01), uid=[0], advanced=True, ), desc.IntParam( name='nbSolidAngleFilteringIterations', label='Nb Solid Angle Filtering Iterations', description= 'Filter cells status on surface around vertices to improve smoothness using solid angle ratio between full/empty parts. Zero to disable.', value=2, range=(0, 30, 1), uid=[0], advanced=True, ), desc.BoolParam( name='colorizeOutput', label='Colorize Output', description= 'Whether to colorize output dense point cloud and mesh.', value=False, uid=[0], ), desc.BoolParam( name='addMaskHelperPoints', label='Add Mask Helper Points', description= 'Add Helper points on the outline of the depth maps masks.', value=False, uid=[], advanced=True, group='', ), desc.FloatParam( name='maskHelperPointsWeight', label='Mask Helper Points Weight', description= 'Weight value for mask helper points. Zero means no helper point.', value=1.0, range=(0.0, 20.0, 1.0), uid=[0], advanced=True, enabled=lambda node: node.addMaskHelperPoints.value, ), desc.IntParam( name='maskBorderSize', label='Mask Border Size', description='How many pixels on mask borders?', value=4, range=(0, 20, 1), uid=[0], advanced=True, enabled=lambda node: node.addMaskHelperPoints.value, ), desc.IntParam( name='maxNbConnectedHelperPoints', label='Helper Points: Max Segment Size', description= 'Maximum size of a segment of connected helper points before we remove it. Small segments of helper points can be on the real surface and should not be removed to avoid the creation of holes. 0 means that we remove all helper points. -1 means that we do not filter helper points at all.', value=50, range=(-1, 100, 1), uid=[0], advanced=True, ), desc.BoolParam( name='saveRawDensePointCloud', label='Save Raw Dense Point Cloud', description='Save dense point cloud before cut and filtering.', value=False, uid=[], advanced=True, ), desc.BoolParam( name='exportDebugTetrahedralization', label='Export DEBUG Tetrahedralization', description= 'Export debug cells score as tetrahedral mesh.\nWARNING: Could create HUGE meshes, only use on very small datasets.', value=False, uid=[], advanced=True, ), desc.IntParam( name='seed', label='Seed', description= 'Seed used for random operations. Zero means use of random device instead of a fixed seed.', value=0, range=(0, 10000, 1), uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name="outputMesh", label="Mesh", description="Output mesh (OBJ file format).", value="{cache}/{nodeType}/{uid0}/mesh.obj", uid=[], ), desc.File( name="output", label="Dense SfMData", description= "Output dense point cloud with visibilities (SfMData file format).", value="{cache}/{nodeType}/{uid0}/densePointCloud.abc", uid=[], ), ]
class LdrToHdrMerge(desc.CommandLineNode): commandLine = 'aliceVision_LdrToHdrMerge {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=2) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' documentation = ''' Calibrate LDR to HDR response curve from samples ''' inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.File( name='response', label='Response file', description='Response file', value='', uid=[0], ), desc.IntParam( name='userNbBrackets', label='Number of Brackets', description= 'Number of exposure brackets per HDR image (0 for automatic detection).', value=0, range=(0, 15, 1), uid=[], group='user', # not used directly on the command line ), desc.IntParam( name='nbBrackets', label='Automatic Nb Brackets', description= 'Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".', value=0, range=(0, 10, 1), uid=[0], ), desc.IntParam( name='offsetRefBracketIndex', label='Offset Ref Bracket Index', description= 'Zero to use the center bracket. +N to use a more exposed bracket or -N to use a less exposed backet.', value=1, range=(-4, 4, 1), uid=[0], enabled=lambda node: node.nbBrackets.value != 1, ), desc.BoolParam( name='byPass', label='Bypass', description= "Bypass HDR creation and use the medium bracket as the source for the next steps.", value=False, uid=[0], enabled=lambda node: node.nbBrackets.value != 1, ), desc.ChoiceParam( name='fusionWeight', label='Fusion Weight', description="Weight function used to fuse all LDR images together:\n" " * gaussian \n" " * triangle \n" " * plateau", value='gaussian', values=['gaussian', 'triangle', 'plateau'], exclusive=True, uid=[0], enabled=lambda node: node.byPass.enabled and not node.byPass.value, ), desc.IntParam( name='channelQuantizationPower', label='Channel Quantization Power', description='Quantization level like 8 bits or 10 bits.', value=10, range=(8, 14, 1), uid=[0], advanced=True, enabled=lambda node: node.byPass.enabled and not node.byPass.value, ), desc.FloatParam( name='highlightCorrectionFactor', label='Highlights Correction', description= 'Pixels saturated in all input images have a partial information about their real luminance.\n' 'We only know that the value should be >= to the standard hdr fusion.\n' 'This parameter allows to perform a post-processing step to put saturated pixels to a constant\n' 'value defined by the `highlightsMaxLuminance` parameter.\n' 'This parameter is float to enable to weight this correction.', value=1.0, range=(0.0, 1.0, 0.01), uid=[0], enabled=lambda node: node.byPass.enabled and not node.byPass.value, ), desc.FloatParam( name='highlightTargetLux', label='Highlight Target Luminance (Lux)', description= 'This is an arbitrary target value (in Lux) used to replace the unknown luminance value of the saturated pixels.\n' '\n' 'Some Outdoor Reference Light Levels:\n' ' * 120,000 lux: Brightest sunlight\n' ' * 110,000 lux: Bright sunlight\n' ' * 20,000 lux: Shade illuminated by entire clear blue sky, midday\n' ' * 1,000 lux: Typical overcast day, midday\n' ' * 400 lux: Sunrise or sunset on a clear day\n' ' * 40 lux: Fully overcast, sunset/sunrise\n' '\n' 'Some Indoor Reference Light Levels:\n' ' * 20000 lux: Max Usually Used Indoor\n' ' * 750 lux: Supermarkets\n' ' * 500 lux: Office Work\n' ' * 150 lux: Home\n', value=120000.0, range=(1000.0, 150000.0, 1.0), uid=[0], enabled=lambda node: node.byPass.enabled and not node.byPass.value and node.highlightCorrectionFactor.value != 0, ), desc.ChoiceParam( name='storageDataType', label='Storage Data Type', description='Storage image data type:\n' ' * float: Use full floating point (32 bits per channel)\n' ' * half: Use half float (16 bits per channel)\n' ' * halfFinite: Use half float, but clamp values to avoid non-finite values\n' ' * auto: Use half float if all values can fit, else use full float\n', value='float', values=['float', 'half', 'halfFinite', 'auto'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= 'verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='outSfMData', label='Output SfMData File', description='Path to the output sfmdata file', value=desc.Node.internalFolder + 'sfmData.sfm', uid=[], ) ] @classmethod def update(cls, node): if not isinstance(node.nodeDesc, cls): raise ValueError("Node {} is not an instance of type {}".format( node, cls)) # TODO: use Node version for this test if 'userNbBrackets' not in node.getAttributes().keys(): # Old version of the node return if node.userNbBrackets.value != 0: node.nbBrackets.value = node.userNbBrackets.value return # logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion)) cameraInitOutput = node.input.getLinkParam(recursive=True) if not cameraInitOutput: node.nbBrackets.value = 0 return if not cameraInitOutput.node.hasAttribute('viewpoints'): if cameraInitOutput.node.hasAttribute('input'): cameraInitOutput = cameraInitOutput.node.input.getLinkParam( recursive=True) if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute( 'viewpoints'): viewpoints = cameraInitOutput.node.viewpoints.value else: # No connected CameraInit node.nbBrackets.value = 0 return # logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints))) inputs = [] for viewpoint in viewpoints: jsonMetadata = viewpoint.metadata.value if not jsonMetadata: # no metadata, we cannot found the number of brackets node.nbBrackets.value = 0 return d = json.loads(jsonMetadata) fnumber = findMetadata( d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "") shutterSpeed = findMetadata(d, [ "Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed" ], "") iso = findMetadata( d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "") if not fnumber and not shutterSpeed: # If one image without shutter or fnumber, we cannot found the number of brackets. # We assume that there is no multi-bracketing, so nothing to do. node.nbBrackets.value = 1 return inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso))) inputs.sort() exposureGroups = [] exposures = [] for path, exp in inputs: if exposures and exp != exposures[-1] and exp == exposures[0]: exposureGroups.append(exposures) exposures = [exp] else: exposures.append(exp) exposureGroups.append(exposures) exposures = None bracketSizes = set() if len(exposureGroups) == 1: if len(set(exposureGroups[0])) == 1: # Single exposure and multiple views node.nbBrackets.value = 1 else: # Single view and multiple exposures node.nbBrackets.value = len(exposureGroups[0]) else: for expGroup in exposureGroups: bracketSizes.add(len(expGroup)) if len(bracketSizes) == 1: node.nbBrackets.value = bracketSizes.pop() # logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value)) else: node.nbBrackets.value = 0
class MeshDecimate(desc.CommandLineNode): commandLine = 'aliceVision_meshDecimate {allParams}' cpu = desc.Level.NORMAL ram = desc.Level.NORMAL inputs = [ desc.File( name="input", label='Input Mesh (OBJ file format).', description='', value='', uid=[0], ), desc.FloatParam( name='simplificationFactor', label='Simplification factor', description='Simplification factor', value=0.5, range=(0.0, 1.0, 0.01), uid=[0], ), desc.IntParam( name='nbVertices', label='Fixed Number of Vertices', description='Fixed number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='minVertices', label='Min Vertices', description='Min number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='maxVertices', label='Max Vertices', description='Max number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.BoolParam( name='flipNormals', label='Flip Normals', description='Option to flip face normals.\n' 'It can be needed as it depends on the vertices order in triangles\n' 'and the convention change from one software to another.', value=False, uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name="output", label="Output mesh", description="Output mesh (OBJ file format).", value=desc.Node.internalFolder + 'mesh.obj', uid=[], ), ]
class SketchfabUpload(desc.Node): size = desc.DynamicNodeSize('inputFiles') documentation = ''' Upload a textured mesh on Sketchfab. ''' inputs = [ desc.ListAttribute( elementDesc=desc.File( name="input", label="Input", description="", value="", uid=[0], ), name="inputFiles", label="Input Files", description="Input Files to export.", group="", ), desc.StringParam( name='apiToken', label='API Token', description= 'Get your token from https://sketchfab.com/settings/password', value='', uid=[0], ), desc.StringParam( name='title', label='Title', description='Title cannot be longer than 48 characters.', value='', uid=[0], ), desc.StringParam( name='description', label='Description', description='Description cannot be longer than 1024 characters.', value='', uid=[0], ), desc.ChoiceParam( name='license', label='License', description='License label.', value='CC Attribution', values=[ 'CC Attribution', 'CC Attribution-ShareAlike', 'CC Attribution-NoDerivs', 'CC Attribution-NonCommercial', 'CC Attribution-NonCommercial-ShareAlike', 'CC Attribution-NonCommercial-NoDerivs' ], exclusive=True, uid=[0], ), desc.ListAttribute( elementDesc=desc.StringParam( name='tag', label='Tag', description='Tag cannot be longer than 48 characters.', value='', uid=[0], ), name="tags", label="Tags", description="Maximum of 42 separate tags.", group="", ), desc.ChoiceParam( name='category', label='Category', description= 'Adding categories helps improve the discoverability of your model.', value='none', values=[ 'none', 'animals-pets', 'architecture', 'art-abstract', 'cars-vehicles', 'characters-creatures', 'cultural-heritage-history', 'electronics-gadgets', 'fashion-style', 'food-drink', 'furniture-home', 'music', 'nature-plants', 'news-politics', 'people', 'places-travel', 'science-technology', 'sports-fitness', 'weapons-military' ], exclusive=True, uid=[0], ), desc.BoolParam( name='isPublished', label='Publish', description= 'If the model is not published it will be saved as a draft.', value=False, uid=[0], ), desc.BoolParam( name='isInspectable', label='Inspectable', description='Allow 2D view in model inspector.', value=True, uid=[0], ), desc.BoolParam( name='isPrivate', label='Private', description='Requires a pro account.', value=False, uid=[0], ), desc.StringParam( name='password', label='Password', description='Requires a pro account.', value='', uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (critical, error, warning, info, debug).''', value='info', values=['critical', 'error', 'warning', 'info', 'debug'], exclusive=True, uid=[], ), ] def upload(self, apiToken, modelFile, data, chunk): modelEndpoint = 'https://api.sketchfab.com/v3/models' f = open(modelFile, 'rb') file = {'modelFile': (os.path.basename(modelFile), f.read())} file.update(data) f.close() (files, contentType ) = requests.packages.urllib3.filepost.encode_multipart_formdata(file) headers = { 'Authorization': 'Token {}'.format(apiToken), 'Content-Type': contentType } body = BufferReader(files, progressUpdate, cb_kwargs={'logManager': chunk.logManager}, stopped=self.stopped) chunk.logger.info('Uploading...') try: r = requests.post(modelEndpoint, **{ 'data': body, 'headers': headers }) chunk.logManager.completeProgressBar() except requests.exceptions.RequestException as e: chunk.logger.error(u'An error occured: {}'.format(e)) raise RuntimeError() if r.status_code != requests.codes.created: chunk.logger.error(u'Upload failed with error: {}'.format( r.json())) raise RuntimeError() def resolvedPaths(self, inputFiles): paths = [] for inputFile in inputFiles: if os.path.isdir(inputFile.value): for path, subdirs, files in os.walk(inputFile.value): for name in files: paths.append(os.path.join(path, name)) else: for f in glob.glob(inputFile.value): paths.append(f) return paths def stopped(self): return self._stopped def processChunk(self, chunk): try: self._stopped = False chunk.logManager.start(chunk.node.verboseLevel.value) uploadFile = '' if not chunk.node.inputFiles: chunk.logger.warning('Nothing to upload') return if chunk.node.apiToken.value == '': chunk.logger.error('Need API token.') raise RuntimeError() if len(chunk.node.title.value) > 48: chunk.logger.error( 'Title cannot be longer than 48 characters.') raise RuntimeError() if len(chunk.node.description.value) > 1024: chunk.logger.error( 'Description cannot be longer than 1024 characters.') raise RuntimeError() tags = [ i.value.replace(' ', '-') for i in chunk.node.tags.value.values() ] if all(len(i) > 48 for i in tags) and len(tags) > 0: chunk.logger.error('Tags cannot be longer than 48 characters.') raise RuntimeError() if len(tags) > 42: chunk.logger.error('Maximum of 42 separate tags.') raise RuntimeError() data = { 'name': chunk.node.title.value, 'description': chunk.node.description.value, 'license': chunk.node.license.value, 'tags': str(tags), 'isPublished': chunk.node.isPublished.value, 'isInspectable': chunk.node.isInspectable.value, 'private': chunk.node.isPrivate.value, 'password': chunk.node.password.value } if chunk.node.category.value != 'none': data.update({'categories': chunk.node.category.value}) chunk.logger.debug('Data to be sent: {}'.format(str(data))) # pack files into .zip to reduce file size and simplify process uploadFile = os.path.join(chunk.node.internalFolder, 'temp.zip') files = self.resolvedPaths(chunk.node.inputFiles.value) zf = zipfile.ZipFile(uploadFile, 'w') for file in files: zf.write(file, os.path.basename(file)) zf.close() chunk.logger.debug('Files added to zip: {}'.format(str(files))) chunk.logger.debug('Created {}'.format(uploadFile)) chunk.logger.info('File size: {}MB'.format( round(os.path.getsize(uploadFile) / (1024 * 1024), 3))) self.upload(chunk.node.apiToken.value, uploadFile, data, chunk) chunk.logger.info( 'Upload successful. Your model is being processed on Sketchfab. It may take some time to show up on your "models" page.' ) except Exception as e: chunk.logger.error(e) raise RuntimeError() finally: if os.path.isfile(uploadFile): os.remove(uploadFile) chunk.logger.debug('Deleted {}'.format(uploadFile)) chunk.logManager.end() def stopProcess(self, chunk): self._stopped = True
class SfMTransform(desc.CommandLineNode): commandLine = 'aliceVision_utils_sfmTransform {allParams}' size = desc.DynamicNodeSize('input') documentation = ''' This node allows to change the coordinate system of one SfM scene. The transformation can be based on: * transformation: Apply a given transformation * auto_from_cameras: Fit all cameras into a box [-1,1] * auto_from_landmarks: Fit all landmarks into a box [-1,1] * from_single_camera: Use a specific camera as the origin of the coordinate system * from_markers: Align specific markers to custom coordinates ''' inputs = [ desc.File( name='input', label='Input', description='''SfMData file .''', value='', uid=[0], ), desc.ChoiceParam( name='method', label='Transformation Method', description="Transformation method:\n" " * transformation: Apply a given transformation\n" " * manual: Apply the gizmo transformation (show the transformed input)\n" " * auto_from_cameras: Use cameras\n" " * auto_from_landmarks: Use landmarks\n" " * from_single_camera: Use a specific camera as the origin of the coordinate system\n" " * from_markers: Align specific markers to custom coordinates", value='auto_from_landmarks', values=['transformation', 'manual', 'auto_from_cameras', 'auto_from_landmarks', 'from_single_camera', 'from_markers'], exclusive=True, uid=[0], ), desc.StringParam( name='transformation', label='Transformation', description="Required only for 'transformation' and 'from_single_camera' methods:\n" " * transformation: Align [X,Y,Z] to +Y-axis, rotate around Y by R deg, scale by S; syntax: X,Y,Z;R;S\n" " * from_single_camera: Camera UID or image filename", value='', uid=[0], enabled=lambda node: node.method.value == "transformation" or node.method.value == "from_single_camera", ), desc.GroupAttribute( name="manualTransform", label="Manual Transform (Gizmo)", description="Translation, rotation (Euler ZXY) and uniform scale.", groupDesc=[ desc.GroupAttribute( name="manualTranslation", label="Translation", description="Translation in space.", groupDesc=[ desc.FloatParam( name="x", label="x", description="X Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01) ), desc.FloatParam( name="y", label="y", description="Y Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01) ), desc.FloatParam( name="z", label="z", description="Z Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01) ) ], joinChar="," ), desc.GroupAttribute( name="manualRotation", label="Euler Rotation", description="Rotation in Euler degrees.", groupDesc=[ desc.FloatParam( name="x", label="x", description="Euler X Rotation", value=0.0, uid=[0], range=(-90.0, 90.0, 1) ), desc.FloatParam( name="y", label="y", description="Euler Y Rotation", value=0.0, uid=[0], range=(-180.0, 180.0, 1) ), desc.FloatParam( name="z", label="z", description="Euler Z Rotation", value=0.0, uid=[0], range=(-180.0, 180.0, 1) ) ], joinChar="," ), desc.FloatParam( name="manualScale", label="Scale", description="Uniform Scale.", value=1.0, uid=[0], range=(0.0, 20.0, 0.01) ) ], joinChar=",", enabled=lambda node: node.method.value == "manual", ), desc.ChoiceParam( name='landmarksDescriberTypes', label='Landmarks Describer Types', description='Image describer types used to compute the mean of the point cloud. (only for "landmarks" method).', value=['sift', 'dspsift', 'akaze'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv', 'unknown'], exclusive=False, uid=[0], joinChar=',', ), desc.FloatParam( name='scale', label='Additional Scale', description='Additional scale to apply.', value=1.0, range=(0.0, 100.0, 0.1), uid=[0], ), desc.ListAttribute( name="markers", elementDesc=desc.GroupAttribute(name="markerAlign", label="Marker Align", description="", joinChar=":", groupDesc=[ desc.IntParam(name="markerId", label="Marker", description="Marker Id", value=0, uid=[0], range=(0, 32, 1)), desc.GroupAttribute(name="markerCoord", label="Coord", description="", joinChar=",", groupDesc=[ desc.FloatParam(name="x", label="x", description="", value=0.0, uid=[0], range=(-2.0, 2.0, 1.0)), desc.FloatParam(name="y", label="y", description="", value=0.0, uid=[0], range=(-2.0, 2.0, 1.0)), desc.FloatParam(name="z", label="z", description="", value=0.0, uid=[0], range=(-2.0, 2.0, 1.0)), ]) ]), label="Markers", description="Markers alignment points", ), desc.BoolParam( name='applyScale', label='Scale', description='Apply scale transformation.', value=True, uid=[0], enabled=lambda node: node.method.value != "manual", ), desc.BoolParam( name='applyRotation', label='Rotation', description='Apply rotation transformation.', value=True, uid=[0], enabled=lambda node: node.method.value != "manual", ), desc.BoolParam( name='applyTranslation', label='Translation', description='Apply translation transformation.', value=True, uid=[0], enabled=lambda node: node.method.value != "manual", ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output SfMData File', description='''Aligned SfMData file .''', value=lambda attr: desc.Node.internalFolder + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or 'sfmData') + '.abc', uid=[], ), desc.File( name='outputViewsAndPoses', label='Output Poses', description='''Path to the output sfmdata file with cameras (views and poses).''', value=desc.Node.internalFolder + 'cameras.sfm', uid=[], ), ]
class LDRToHDR(desc.CommandLineNode): commandLine = 'aliceVision_convertLDRToHDR {allParams}' inputs = [ desc.ListAttribute( elementDesc=desc.File( name='inputFolder', label='Input File/Folder', description="Folder containing LDR images", value='', uid=[0], ), name="input", label="Input Files or Folders", description='Folders containing LDR images.', ), desc.BoolParam( name='fisheyeLens', label='Fisheye Lens', description="Enable if a fisheye lens has been used.\n " "This will improve the estimation of the Camera's Response Function by considering only the pixels in the center of the image\n" "and thus ignore undefined/noisy pixels outside the circle defined by the fisheye lens.", value=True, uid=[0], ), desc.ChoiceParam( name='calibrationMethod', label='Calibration Method', description="Method used for camera calibration \n" " * linear \n" " * robertson \n" " * debevec \n" " * grossberg", values=['linear', 'robertson', 'debevec', 'grossberg'], value='linear', exclusive=True, uid=[0], ), desc.File( name='inputResponse', label='Input Response', description="external camera response file path to fuse all LDR images together.", value='', uid=[0], ), desc.StringParam( name='targetExposureImage', label='Target Exposure Image', description="LDR image(s) name(s) at the target exposure for the output HDR image(s) to be centered.", value='', uid=[0], ), desc.ChoiceParam( name='calibrationWeight', label='Calibration Weight', description="Weight function used to calibrate camera response \n" " * default (automatically selected according to the calibrationMethod) \n" " * gaussian \n" " * triangle \n" " * plateau", value='default', values=['default', 'gaussian', 'triangle', 'plateau'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='fusionWeight', label='Fusion Weight', description="Weight function used to fuse all LDR images together \n" " * gaussian \n" " * triangle \n" " * plateau", value='gaussian', values=['gaussian', 'triangle', 'plateau'], exclusive=True, uid=[0], ), desc.FloatParam( name='expandDynamicRange', label='Expand Dynamic Range', description="Correction of clamped high values in dynamic range: \n" " - use 0 for no correction \n" " - use 0.5 for interior lighting \n" " - use 1 for outdoor lighting", value=1, range=(0, 1, 0.1), uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description="Verbosity level (fatal, error, warning, info, debug, trace).", value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), desc.File( name='recoverPath', label='Output Recovered Files', description="(debug) Folder for recovered LDR images at target exposures.", advanced=True, value='', uid=[], ), ] outputs = [ desc.File( name='output', label='Output Folder', description="Output folder for HDR images", value=desc.Node.internalFolder, uid=[], ), desc.File( name='outputResponse', label='Output Response', description="Output response function path.", value=desc.Node.internalFolder + 'response.csv', uid=[], ), ]
class ImageProcessing(desc.CommandLineNode): commandLine = 'aliceVision_utils_imageProcessing {allParams}' size = desc.DynamicNodeSize('input') # parallelization = desc.Parallelization(blockSize=40) # commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' documentation = ''' Convert or apply filtering to the input images. ''' inputs = [ desc.File( name='input', label='Input', description= 'SfMData file input, image filenames or regex(es) on the image file path.\nsupported regex: \'#\' matches a single digit, \'@\' one or more digits, \'?\' one character and \'*\' zero or more.', value='', uid=[0], ), desc.ListAttribute( elementDesc=desc.File( name="inputFolder", label="input Folder", description="", value="", uid=[0], ), name="inputFolders", label="Images input Folders", description='Use images from specific folder(s).', ), desc.ListAttribute( elementDesc=desc.StringParam( name="metadataFolder", label="Metadata Folder", description="", value="", uid=[0], ), name="metadataFolders", label="Metadata input Folders", description='Use images metadata from specific folder(s).', ), desc.ChoiceParam( name='extension', label='Output File Extension', description='Output Image File Extension.', value='', values=['', 'exr', 'jpg', 'tiff', 'png'], exclusive=True, uid=[0], ), desc.BoolParam( name='reconstructedViewsOnly', label='Only Reconstructed Views', description='Process Only Reconstructed Views', value=False, uid=[0], ), desc.BoolParam( name='fixNonFinite', label='Fix Non-Finite', description= 'Fix non-finite pixels based on neighboring pixels average.', value=False, uid=[0], ), desc.BoolParam( name='exposureCompensation', label='Exposure Compensation', description='Exposure Compensation', value=False, uid=[0], ), desc.FloatParam( name='scaleFactor', label='ScaleFactor', description='Scale Factor.', value=1.0, range=(0.0, 1.0, 0.01), uid=[0], ), desc.FloatParam( name='contrast', label='Contrast', description='Contrast.', value=1.0, range=(0.0, 100.0, 0.1), uid=[0], ), desc.IntParam( name='medianFilter', label='Median Filter', description='Median Filter.', value=0, range=(0, 10, 1), uid=[0], ), desc.BoolParam( name='fillHoles', label='Fill Holes', description='Fill holes based on the alpha channel.\n' 'Note: It will enable fixNonFinite, as it is required for the image pyramid construction used to fill holes.', value=False, uid=[0], ), desc.GroupAttribute(name="sharpenFilter", label="Sharpen Filter", description="Sharpen Filtering Parameters.", joinChar=":", groupDesc=[ desc.BoolParam( name='sharpenFilterEnabled', label='Enable', description='Use sharpen.', value=False, uid=[0], ), desc.IntParam( name='width', label='Width', description='Sharpen Width.', value=3, range=(1, 9, 2), uid=[0], enabled=lambda node: node.sharpenFilter. sharpenFilterEnabled.value, ), desc.FloatParam( name='contrast', label='Contrast', description='Sharpen Contrast.', value=1.0, range=(0.0, 100.0, 0.1), uid=[0], enabled=lambda node: node.sharpenFilter. sharpenFilterEnabled.value, ), desc.FloatParam( name='threshold', label='Threshold', description='Sharpen Threshold.', value=0.0, range=(0.0, 1.0, 0.01), uid=[0], enabled=lambda node: node.sharpenFilter. sharpenFilterEnabled.value, ), ]), desc.GroupAttribute( name="bilateralFilter", label="Bilateral Filter", description="Bilateral Filtering Parameters.", joinChar=":", groupDesc=[ desc.BoolParam( name='bilateralFilterEnabled', label='Enable', description='Bilateral Filter.', value=False, uid=[0], ), desc.IntParam( name='bilateralFilterDistance', label='Distance', description= 'Diameter of each pixel neighborhood that is used during bilateral filtering.\nCould be very slow for large filters, so it is recommended to use 5.', value=0, range=(0, 9, 1), uid=[0], enabled=lambda node: node.bilateralFilter. bilateralFilterEnabled.value, ), desc.FloatParam( name='bilateralFilterSigmaSpace', label='Sigma Coordinate Space', description= 'Bilateral Filter sigma in the coordinate space.', value=0.0, range=(0.0, 150.0, 0.01), uid=[0], enabled=lambda node: node.bilateralFilter. bilateralFilterEnabled.value, ), desc.FloatParam( name='bilateralFilterSigmaColor', label='Sigma Color Space', description='Bilateral Filter sigma in the color space.', value=0.0, range=(0.0, 150.0, 0.01), uid=[0], enabled=lambda node: node.bilateralFilter. bilateralFilterEnabled.value, ), ]), desc.GroupAttribute( name="claheFilter", label="Clahe Filter", description="Clahe Filtering Parameters.", joinChar=":", groupDesc=[ desc.BoolParam( name='claheEnabled', label='Enable', description= 'Use Contrast Limited Adaptive Histogram Equalization (CLAHE) Filter.', value=False, uid=[0], ), desc.FloatParam( name='claheClipLimit', label='Clip Limit', description='Sets Threshold For Contrast Limiting.', value=4.0, range=(0.0, 8.0, 1.0), uid=[0], enabled=lambda node: node.claheFilter.claheEnabled.value, ), desc.IntParam( name='claheTileGridSize', label='Tile Grid Size', description= 'Sets Size Of Grid For Histogram Equalization. Input Image Will Be Divided Into Equally Sized Rectangular Tiles.', value=8, range=(4, 64, 4), uid=[0], enabled=lambda node: node.claheFilter.claheEnabled.value, ), ]), desc.GroupAttribute( name="noiseFilter", label="Noise Filter", description="Noise Filtering Parameters.", joinChar=":", groupDesc=[ desc.BoolParam( name='noiseEnabled', label='Enable', description='Add Noise.', value=False, uid=[0], ), desc.ChoiceParam( name='noiseMethod', label='Method', description= " * method: There are several noise types to choose from:\n" " * uniform: adds noise values uninformly distributed on range [A,B).\n" " * gaussian: adds Gaussian (normal distribution) noise values with mean value A and standard deviation B.\n" " * salt: changes to value A a portion of pixels given by B.\n", value='uniform', values=['uniform', 'gaussian', 'salt'], exclusive=True, uid=[0], enabled=lambda node: node.noiseFilter.noiseEnabled.value, ), desc.FloatParam( name='noiseA', label='A', description= 'Parameter that have a different interpretation depending on the method chosen.', value=0.0, range=(0.0, 1.0, 0.0001), uid=[0], enabled=lambda node: node.noiseFilter.noiseEnabled.value, ), desc.FloatParam( name='noiseB', label='B', description= 'Parameter that have a different interpretation depending on the method chosen.', value=1.0, range=(0.0, 1.0, 0.0001), uid=[0], enabled=lambda node: node.noiseFilter.noiseEnabled.value, ), desc.BoolParam( name='noiseMono', label='Mono', description= 'If is Checked, a single noise value will be applied to all channels otherwise a separate noise value will be computed for each channel.', value=True, uid=[0], enabled=lambda node: node.noiseFilter.noiseEnabled.value, ), ]), desc.ChoiceParam( name='outputFormat', label='Output Image Format', description='Allows you to choose the format of the output image.', value='rgba', values=['rgba', 'rgb', 'grayscale'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='storageDataType', label='Storage Data Type for EXR output', description='Storage image data type:\n' ' * float: Use full floating point (32 bits per channel)\n' ' * half: Use half float (16 bits per channel)\n' ' * halfFinite: Use half float, but clamp values to avoid non-finite values\n' ' * auto: Use half float if all values can fit, else use full float\n', value='float', values=['float', 'half', 'halfFinite', 'auto'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= 'verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='outSfMData', label='Output sfmData', description='Output sfmData.', value=lambda attr: (desc.Node.internalFolder + os.path.basename( attr.node.input.value)) if (os.path.splitext( attr.node.input.value)[1] in ['.abc', '.sfm']) else '', uid=[], group='', # do not export on the command line ), desc.File( name='output', label='Output Folder', description='Output Images Folder.', value=desc.Node.internalFolder, uid=[], ), desc.File( name='outputImages', label='Output Images', description='Output Image Files.', value=outputImagesValueFunct, group='', # do not export on the command line uid=[], ), ]
class LdrToHdrSampling(desc.CommandLineNode): commandLine = 'aliceVision_LdrToHdrSampling {allParams}' size = DividedInputNodeSize('input', 'nbBrackets') parallelization = desc.Parallelization(blockSize=2) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' category = 'Panorama HDR' documentation = ''' Sample pixels from Low range images for HDR creation ''' inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.IntParam( name='userNbBrackets', label='Number of Brackets', description='Number of exposure brackets per HDR image (0 for automatic detection).', value=0, range=(0, 15, 1), uid=[], group='user', # not used directly on the command line ), desc.IntParam( name='nbBrackets', label='Automatic Nb Brackets', description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".', value=0, range=(0, 10, 1), uid=[0], ), desc.BoolParam( name='byPass', label='Bypass', description="Bypass HDR creation and use the medium bracket as the source for the next steps", value=False, uid=[0], group='internal', enabled= lambda node: node.nbBrackets.value != 1, ), desc.IntParam( name='channelQuantizationPower', label='Channel Quantization Power', description='Quantization level like 8 bits or 10 bits.', value=10, range=(8, 14, 1), uid=[0], advanced=True, enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.IntParam( name='blockSize', label='Block Size', description='Size of the image tile to extract a sample.', value=256, range=(8, 1024, 1), uid=[0], advanced=True, enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.IntParam( name='radius', label='Patch Radius', description='Radius of the patch used to analyze the sample statistics.', value=5, range=(0, 10, 1), uid=[0], advanced=True, enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.IntParam( name='maxCountSample', label='Max Number of Samples', description='Max number of samples per image group.', value=200, range=(10, 1000, 10), uid=[0], advanced=True, enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.BoolParam( name='debug', label='Export Debug Files', description="Export debug files to analyze the sampling strategy.", value=False, uid=[], enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Output Folder', description='Output path for the samples.', value=desc.Node.internalFolder, uid=[], ), ] def processChunk(self, chunk): if chunk.node.nbBrackets.value == 1 or chunk.node.byPass.value: return super(LdrToHdrSampling, self).processChunk(chunk) @classmethod def update(cls, node): if not isinstance(node.nodeDesc, cls): raise ValueError("Node {} is not an instance of type {}".format(node, cls)) # TODO: use Node version for this test if 'userNbBrackets' not in node.getAttributes().keys(): # Old version of the node return if node.userNbBrackets.value != 0: node.nbBrackets.value = node.userNbBrackets.value return # logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion)) cameraInitOutput = node.input.getLinkParam(recursive=True) if not cameraInitOutput: node.nbBrackets.value = 0 return if not cameraInitOutput.node.hasAttribute('viewpoints'): if cameraInitOutput.node.hasAttribute('input'): cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True) if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute('viewpoints'): viewpoints = cameraInitOutput.node.viewpoints.value else: # No connected CameraInit node.nbBrackets.value = 0 return # logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints))) inputs = [] for viewpoint in viewpoints: jsonMetadata = viewpoint.metadata.value if not jsonMetadata: # no metadata, we cannot found the number of brackets node.nbBrackets.value = 0 return d = json.loads(jsonMetadata) fnumber = findMetadata(d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "") shutterSpeed = findMetadata(d, ["Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], "") iso = findMetadata(d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "") if not fnumber and not shutterSpeed: # If one image without shutter or fnumber, we cannot found the number of brackets. # We assume that there is no multi-bracketing, so nothing to do. node.nbBrackets.value = 1 return inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso))) inputs.sort() exposureGroups = [] exposures = [] for path, exp in inputs: if exposures and exp != exposures[-1] and exp == exposures[0]: exposureGroups.append(exposures) exposures = [exp] else: exposures.append(exp) exposureGroups.append(exposures) exposures = None bracketSizes = set() if len(exposureGroups) == 1: if len(set(exposureGroups[0])) == 1: # Single exposure and multiple views node.nbBrackets.value = 1 else: # Single view and multiple exposures node.nbBrackets.value = len(exposureGroups[0]) else: for expGroup in exposureGroups: bracketSizes.add(len(expGroup)) if len(bracketSizes) == 1: node.nbBrackets.value = bracketSizes.pop() # logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value)) else: node.nbBrackets.value = 0
class MeshResampling(desc.CommandLineNode): commandLine = 'aliceVision_meshResampling {allParams}' cpu = desc.Level.NORMAL ram = desc.Level.NORMAL category = 'Mesh Post-Processing' documentation = ''' This node allows to recompute the mesh surface with a new topology and uniform density. ''' inputs = [ desc.File( name="input", label='Input Mesh (OBJ file format).', description='', value='', uid=[0], ), desc.FloatParam( name='simplificationFactor', label='Simplification factor', description='Simplification factor', value=0.5, range=(0.0, 1.0, 0.01), uid=[0], ), desc.IntParam( name='nbVertices', label='Fixed Number of Vertices', description='Fixed number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='minVertices', label='Min Vertices', description='Min number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='maxVertices', label='Max Vertices', description='Max number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='nbLloydIter', label='Number of Pre-Smoothing Iteration', description='Number of iterations for Lloyd pre-smoothing.', value=40, range=(0, 100, 1), uid=[0], ), desc.BoolParam( name='flipNormals', label='Flip Normals', description= '''Option to flip face normals. It can be needed as it depends on the vertices order in triangles and the convention change from one software to another.''', value=False, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name="output", label="Output mesh", description="Output mesh (OBJ file format).", value=desc.Node.internalFolder + 'mesh.obj', uid=[], ), ]
class FeatureExtraction(desc.CommandLineNode): commandLine = 'aliceVision_featureExtraction {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=40) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' category = 'Sparse Reconstruction' documentation = ''' This node extracts distinctive groups of pixels that are, to some extent, invariant to changing camera viewpoints during image acquisition. Hence, a feature in the scene should have similar feature descriptions in all images. This node implements multiple methods: * **SIFT** The most standard method. This is the default and recommended value for all use cases. * **AKAZE** AKAZE can be interesting solution to extract features in challenging condition. It could be able to match wider angle than SIFT but has drawbacks. It may extract to many features, the repartition is not always good. It is known to be good on challenging surfaces such as skin. * **CCTAG** CCTag is a marker type with 3 or 4 crowns. You can put markers in the scene during the shooting session to automatically re-orient and re-scale the scene to a known size. It is robust to motion-blur, depth-of-field, occlusion. Be careful to have enough white margin around your CCTags. ## Online [https://alicevision.org/#photogrammetry/natural_feature_extraction](https://alicevision.org/#photogrammetry/natural_feature_extraction) ''' inputs = [ desc.File( name='input', label='SfMData', description='SfMData file.', value='', uid=[0], ), desc.ChoiceParam( name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', value=['sift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='describerPreset', label='Describer Density', description='Control the ImageDescriber density (low, medium, normal, high, ultra).\n' 'Warning: Use ULTRA only on small datasets.', value='normal', values=['low', 'medium', 'normal', 'high', 'ultra', 'custom'], exclusive=True, uid=[0], group=lambda node: 'allParams' if node.describerPreset.value != 'custom' else None, ), desc.IntParam( name='maxNbFeatures', label='Max Nb Features', description='Max number of features extracted (0 means default value based on Describer Density).', value=0, range=(0, 100000, 1000), uid=[0], advanced=True, enabled=lambda node: (node.describerPreset.value == 'custom'), ), desc.ChoiceParam( name='describerQuality', label='Describer Quality', description='Control the ImageDescriber quality (low, medium, normal, high, ultra).', value='normal', values=['low', 'medium', 'normal', 'high', 'ultra'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='contrastFiltering', label='Contrast Filtering', description="Contrast filtering method to ignore features with too low contrast that can be considered as noise:\n" "* Static: Fixed threshold.\n" "* AdaptiveToMedianVariance: Based on image content analysis.\n" "* NoFiltering: Disable contrast filtering.\n" "* GridSortOctaves: Grid Sort but per octaves (and only per scale at the end).\n" "* GridSort: Grid sort per octaves and at the end (scale * peakValue).\n" "* GridSortScaleSteps: Grid sort per octaves and at the end (scale and then peakValue).\n" "* NonExtremaFiltering: Filter non-extrema peakValues.\n", value='GridSort', values=['Static', 'AdaptiveToMedianVariance', 'NoFiltering', 'GridSortOctaves', 'GridSort', 'GridSortScaleSteps', 'GridSortOctaveSteps', 'NonExtremaFiltering'], exclusive=True, advanced=True, uid=[0], ), desc.FloatParam( name='relativePeakThreshold', label='Relative Peak Threshold', description='Peak Threshold relative to median of gradiants.', value=0.01, range=(0.01, 1.0, 0.001), advanced=True, uid=[0], enabled=lambda node: (node.contrastFiltering.value == 'AdaptiveToMedianVariance'), ), desc.BoolParam( name='gridFiltering', label='Grid Filtering', description='Enable grid filtering. Highly recommended to ensure usable number of features.', value=True, advanced=True, uid=[0], ), desc.BoolParam( name='forceCpuExtraction', label='Force CPU Extraction', description='Use only CPU feature extraction.', value=True, uid=[], advanced=True, ), desc.IntParam( name='maxThreads', label='Max Nb Threads', description='Specifies the maximum number of threads to run simultaneously (0 for automatic mode).', value=0, range=(0, 24, 1), uid=[], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Features Folder', description='Output path for the features and descriptors files (*.feat, *.desc).', value=desc.Node.internalFolder, uid=[], ), ]
class DepthMap(desc.CommandLineNode): commandLine = 'aliceVision_depthMapEstimation {allParams}' gpu = desc.Level.INTENSIVE size = desc.DynamicNodeSize('ini') parallelization = desc.Parallelization(blockSize=3) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' inputs = [ desc.File( name="ini", label='MVS Configuration File', description='', value='', uid=[0], ), desc.ChoiceParam( name='downscale', label='Downscale', description='Image downscale factor.', value=2, values=[1, 2, 4, 8, 16], exclusive=True, uid=[0], ), desc.IntParam( name='sgmMaxTCams', label='SGM: Nb Neighbour Cameras', description='Semi Global Matching: Number of neighbour cameras.', value=10, range=(1, 100, 1), uid=[0], ), desc.IntParam( name='sgmWSH', label='SGM: WSH', description= 'Semi Global Matching: Half-size of the patch used to compute the similarity.', value=4, range=(1, 20, 1), uid=[0], ), desc.FloatParam( name='sgmGammaC', label='SGM: GammaC', description='Semi Global Matching: GammaC Threshold.', value=5.5, range=(0.0, 30.0, 0.5), uid=[0], ), desc.FloatParam( name='sgmGammaP', label='SGM: GammaP', description='Semi Global Matching: GammaP Threshold.', value=8.0, range=(0.0, 30.0, 0.5), uid=[0], ), desc.IntParam( name='refineNSamplesHalf', label='Refine: Number of Samples', description='Refine: Number of samples.', value=150, range=(1, 500, 10), uid=[0], ), desc.IntParam( name='refineNDepthsToRefine', label='Refine: Number of Depths', description='Refine: Number of depths.', value=31, range=(1, 100, 1), uid=[0], ), desc.IntParam( name='refineNiters', label='Refine: Number of Iterations', description='Refine:: Number of iterations.', value=100, range=(1, 500, 10), uid=[0], ), desc.IntParam( name='refineWSH', label='Refine: WSH', description= 'Refine: Half-size of the patch used to compute the similarity.', value=3, range=(1, 20, 1), uid=[0], ), desc.IntParam( name='refineMaxTCams', label='Refine: Nb Neighbour Cameras', description='Refine: Number of neighbour cameras.', value=6, range=(1, 20, 1), uid=[0], ), desc.FloatParam( name='refineSigma', label='Refine: Sigma', description='Refine: Sigma Threshold.', value=15, range=(0.0, 30.0, 0.5), uid=[0], ), desc.FloatParam( name='refineGammaC', label='Refine: GammaC', description='Refine: GammaC Threshold.', value=15.5, range=(0.0, 30.0, 0.5), uid=[0], ), desc.FloatParam( name='refineGammaP', label='Refine: GammaP', description='Refine: GammaP threshold.', value=8.0, range=(0.0, 30.0, 0.5), uid=[0], ), desc.BoolParam( name='refineUseTcOrRcPixSize', label='Refine: Tc or Rc pixel size', description= 'Refine: Use minimum pixel size of neighbour cameras (Tc) or current camera pixel size (Rc)', value=False, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output', description='Output folder for generated depth maps.', value=desc.Node.internalFolder, uid=[], ), ]
class ExportAnimatedCamera(desc.CommandLineNode): commandLine = 'aliceVision_exportAnimatedCamera {allParams}' documentation = ''' Convert cameras from an SfM scene into an animated cameras in Alembic file format. Based on the input image filenames, it will recognize the input video sequence to create an animated camera. ''' inputs = [ desc.File( name='input', label='Input SfMData', description='SfMData file containing a complete SfM.', value='', uid=[0], ), desc.File( name='viewFilter', label='SfMData Filter', description='A SfMData file use as filter.', value='', uid=[0], ), desc.BoolParam( name='exportUndistortedImages', label='Export Undistorted Images', description='Export Undistorted Images.', value=True, uid=[0], ), desc.ChoiceParam( name='undistortedImageType', label='Undistort Image Format', description='Image file format to use for undistorted images ("jpg", "png", "tif", "exr (half)").', value='jpg', values=['jpg', 'png', 'tif', 'exr'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output filepath', description='Output filepath for the alembic animated camera.', value=desc.Node.internalFolder, uid=[], ), desc.File( name='outputCamera', label='Output Camera Filepath', description='Output filename for the alembic animated camera.', value=desc.Node.internalFolder + 'camera.abc', group='', # exclude from command line uid=[], ), ]
class PanoramaInit(desc.CommandLineNode): commandLine = 'aliceVision_panoramaInit {allParams}' size = desc.DynamicNodeSize('input') category = 'Panorama HDR' documentation = ''' This node allows to setup the Panorama: 1/ Enables the initialization the cameras from known position in an XML file (provided by ["Roundshot VR Drive"](https://www.roundshot.com/xml_1/internet/fr/application/d394/d395/f396.cfm) ). 2/ Enables to setup Full Fisheye Optics (to use an Equirectangular camera model). 3/ To automatically detects the Fisheye Circle (radius + center) in input images or manually adjust it. ''' inputs = [ desc.File( name='input', label='Input', description="SfM Data File", value='', uid=[0], ), desc.ChoiceParam( name='initializeCameras', label='Initialize Cameras', description='Initialize cameras.', value='No', values=['No', 'File', 'Horizontal', 'Horizontal+Zenith', 'Zenith+Horizontal', 'Spherical'], exclusive=True, uid=[0], ), desc.File( name='config', label='Xml Config', description="XML Data File", value='', uid=[0], enabled=lambda node: node.initializeCameras.value == 'File', ), desc.BoolParam( name='yawCW', label='Yaw CW', description="Yaw ClockWise or CounterClockWise", value=1, uid=[0], enabled=lambda node: ('Horizontal' in node.initializeCameras.value) or (node.initializeCameras.value == "Spherical"), ), desc.ListAttribute( elementDesc=desc.IntParam( name='nbViews', label='', description='', value=-1, range=[-1, 20], uid=[0], ), name='nbViewsPerLine', label='Spherical: Nb Views Per Line', description='Number of views per line in Spherical acquisition. Assumes angles from [-90,+90deg] for pitch and [-180,+180deg] for yaw. Use -1 to estimate the number of images automatically.', joinChar=',', enabled=lambda node: node.initializeCameras.value == 'Spherical', ), desc.ListAttribute( elementDesc=desc.File( name='dependency', label='', description="", value='', uid=[], ), name='dependency', label='Dependency', description="Folder(s) in which computed features are stored. (WORKAROUND for valid Tractor graph submission)", group='forDependencyOnly', # not a command line argument ), desc.BoolParam( name='useFisheye', label='Full Fisheye', description='To declare a full fisheye panorama setup', value=False, uid=[0], ), desc.BoolParam( name='estimateFisheyeCircle', label='Estimate Fisheye Circle', description='Automatically estimate the Fisheye Circle center and radius instead of using user values.', value=True, uid=[0], enabled=lambda node: node.useFisheye.value, ), desc.GroupAttribute( name="fisheyeCenterOffset", label="Fisheye Center", description="Center of the Fisheye circle (XY offset to the center in pixels).", groupDesc=[ desc.FloatParam( name="fisheyeCenterOffset_x", label="x", description="X Offset in pixels", value=0.0, uid=[0], range=(-1000.0, 10000.0, 1.0)), desc.FloatParam( name="fisheyeCenterOffset_y", label="y", description="Y Offset in pixels", value=0.0, uid=[0], range=(-1000.0, 10000.0, 1.0)), ], group=None, # skip group from command line enabled=lambda node: node.useFisheye.value and not node.estimateFisheyeCircle.value, ), desc.FloatParam( name='fisheyeRadius', label='Radius', description='Fisheye visibillity circle radius (% of image shortest side).', value=96.0, range=(0.0, 150.0, 0.01), uid=[0], enabled=lambda node: node.useFisheye.value and not node.estimateFisheyeCircle.value, ), desc.ChoiceParam( name='inputAngle', label='input Angle offset', description='Add a rotation to the input XML given poses (CCW).', value='None', values=['None', 'rotate90', 'rotate180', 'rotate270'], exclusive=True, uid=[0] ), desc.BoolParam( name='debugFisheyeCircleEstimation', label='Debug Fisheye Circle Detection', description='Debug fisheye circle detection.', value=False, uid=[0], enabled=lambda node: node.useFisheye.value, advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='outSfMData', label='Output SfMData File', description='Path to the output sfmdata file', value=desc.Node.internalFolder + 'sfmData.sfm', uid=[], ) ]
class Meshing(desc.CommandLineNode): commandLine = 'aliceVision_meshing {allParams}' cpu = desc.Level.INTENSIVE ram = desc.Level.INTENSIVE inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.File( name='imagesFolder', label='Images Folder', description= 'Use images from a specific folder. Filename should be the image uid.', value='', uid=[0], ), desc.File( name="depthMapFolder", label='Depth Maps Folder', description='Input depth maps folder', value='', uid=[0], ), desc.File( name="depthMapFilterFolder", label='Filtered Depth Maps Folder', description='Input filtered depth maps folder', value='', uid=[0], ), desc.BoolParam( name='estimateSpaceFromSfM', label='Estimate Space From SfM', description='Estimate the 3d space from the SfM', value=True, uid=[0], ), desc.IntParam( name='estimateSpaceMinObservations', label='Min Observations For SfM Space Estimation', description= 'Minimum number of observations for SfM space estimation.', value=3, range=(0, 100, 1), uid=[0], ), desc.FloatParam( name='estimateSpaceMinObservationAngle', label='Min Observations Angle For SfM Space Estimation', description= 'Minimum angle between two observations for SfM space estimation.', value=0.2, range=(0, 10, 0.1), uid=[0], ), desc.IntParam( name='maxInputPoints', label='Max Input Points', description='Max input points loaded from depth map images.', value=50000000, range=(500000, 500000000, 1000), uid=[0], ), desc.IntParam( name='maxPoints', label='Max Points', description='Max points at the end of the depth maps fusion.', value=5000000, range=(100000, 10000000, 1000), uid=[0], ), desc.IntParam( name='maxPointsPerVoxel', label='Max Points Per Voxel', description='Max points per voxel', value=1000000, range=(500000, 30000000, 1000), uid=[0], ), desc.IntParam( name='minStep', label='Min Step', description= 'The step used to load depth values from depth maps is computed from maxInputPts. ' 'Here we define the minimal value for this step, so on small datasets we will not spend ' 'too much time at the beginning loading all depth values.', value=2, range=(1, 20, 1), uid=[0], ), desc.ChoiceParam( name='partitioning', label='Partitioning', description='', value='singleBlock', values=('singleBlock', 'auto'), exclusive=True, uid=[0], ), desc.ChoiceParam( name='repartition', label='Repartition', description='', value='multiResolution', values=('multiResolution', 'regularGrid'), exclusive=True, uid=[0], ), desc.FloatParam( name='angleFactor', label='angleFactor', description='angleFactor', value=15.0, range=(0.0, 200.0, 1.0), uid=[0], ), desc.FloatParam( name='simFactor', label='simFactor', description='simFactor', value=15.0, range=(0.0, 200.0, 1.0), uid=[0], ), desc.FloatParam( name='pixSizeMarginInitCoef', label='pixSizeMarginInitCoef', description='pixSizeMarginInitCoef', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.FloatParam( name='pixSizeMarginFinalCoef', label='pixSizeMarginFinalCoef', description='pixSizeMarginFinalCoef', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.FloatParam( name='voteMarginFactor', label='voteMarginFactor', description='voteMarginFactor', value=4.0, range=(0.1, 10.0, 0.1), uid=[0], ), desc.FloatParam( name='contributeMarginFactor', label='contributeMarginFactor', description='contributeMarginFactor', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.FloatParam( name='simGaussianSizeInit', label='simGaussianSizeInit', description='simGaussianSizeInit', value=10.0, range=(0.0, 50.0, 0.1), uid=[0], ), desc.FloatParam( name='simGaussianSize', label='simGaussianSize', description='simGaussianSize', value=10.0, range=(0.0, 50.0, 0.1), uid=[0], ), desc.FloatParam( name='minAngleThreshold', label='minAngleThreshold', description='minAngleThreshold', value=1.0, range=(0.0, 10.0, 0.01), uid=[0], ), desc.BoolParam( name='refineFuse', label='Refine Fuse', description= 'Refine depth map fusion with the new pixels size defined by angle and similarity scores.', value=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name="output", label="Output mesh", description="Output mesh (OBJ file format).", value="{cache}/{nodeType}/{uid0}/mesh.obj", uid=[], ), desc.File( name="outputDenseReconstruction", label="Output reconstruction", description="Output dense reconstruction (BIN file format).", value="{cache}/{nodeType}/{uid0}/denseReconstruction.bin", uid=[], group="", ), ]
class FeatureMatching(desc.CommandLineNode): commandLine = 'aliceVision_featureMatching {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=20) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.ListAttribute( elementDesc=desc.File( name="featuresFolder", label="Features Folder", description="", value="", uid=[0], ), name="featuresFolders", label="Features Folders", description= "Folder(s) containing the extracted features and descriptors."), desc.File( name='imagePairsList', label='Image Pairs List', description= 'Path to a file which contains the list of image pairs to match.', value='', uid=[0], ), desc.ChoiceParam( name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', value=['sift'], values=[ 'sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv' ], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='photometricMatchingMethod', label='Photometric Matching Method', description='For Scalar based regions descriptor\n' ' * BRUTE_FORCE_L2: L2 BruteForce matching\n' ' * ANN_L2: L2 Approximate Nearest Neighbor matching\n' ' * CASCADE_HASHING_L2: L2 Cascade Hashing matching\n' ' * FAST_CASCADE_HASHING_L2: L2 Cascade Hashing with precomputed hashed regions (faster than CASCADE_HASHING_L2 but use more memory) \n' 'For Binary based descriptor\n' ' * BRUTE_FORCE_HAMMING: BruteForce Hamming matching', value='ANN_L2', values=('BRUTE_FORCE_L2', 'ANN_L2', 'CASCADE_HASHING_L2', 'FAST_CASCADE_HASHING_L2', 'BRUTE_FORCE_HAMMING'), exclusive=True, uid=[0], ), desc.ChoiceParam( name='geometricEstimator', label='Geometric Estimator', description= 'Geometric estimator: (acransac: A-Contrario Ransac, loransac: LO-Ransac (only available for "fundamental_matrix" model)', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='geometricFilterType', label='Geometric Filter Type', description= 'Geometric validation method to filter features matches: \n' ' * fundamental_matrix\n' ' * essential_matrix\n' ' * homography_matrix\n' ' * homography_growing\n' ' * no_filtering', value='fundamental_matrix', values=[ 'fundamental_matrix', 'essential_matrix', 'homography_matrix', 'homography_growing', 'no_filtering' ], exclusive=True, uid=[0], ), desc.FloatParam( name='distanceRatio', label='Distance Ratio', description='Distance ratio to discard non meaningful matches.', value=0.8, range=(0.0, 1.0, 0.01), uid=[0], ), desc.IntParam( name='maxIteration', label='Max Iteration', description='Maximum number of iterations allowed in ransac step.', value=2048, range=(1, 20000, 1), uid=[0], ), desc.IntParam( name='maxMatches', label='Max Matches', description='Maximum number of matches to keep.', value=0, range=(0, 10000, 1), uid=[0], ), desc.BoolParam( name='savePutativeMatches', label='Save Putative Matches', description='putative matches.', value=False, uid=[0], ), desc.BoolParam( name='guidedMatching', label='Guided Matching', description= 'the found model to improve the pairwise correspondences.', value=False, uid=[0], ), desc.BoolParam( name='exportDebugFiles', label='Export Debug Files', description='debug files (svg, dot).', value=False, uid=[], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= 'verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Output Folder', description= 'Path to a folder in which computed matches will be stored.', value=desc.Node.internalFolder, uid=[], ), ]
class ConvertSfMFormat(desc.CommandLineNode): commandLine = 'aliceVision_convertSfMFormat {allParams}' size = desc.DynamicNodeSize('input') documentation = ''' Convert an SfM scene from one file format to another. It can also be used to remove specific parts of from an SfM scene (like filter all 3D landmarks or filter 2D observations). ''' inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.ChoiceParam( name='fileExt', label='SfM File Format', description='SfM File Format', value='abc', values=['abc', 'sfm', 'json', 'ply', 'baf'], exclusive=True, uid=[0], group='', # exclude from command line ), desc.ChoiceParam( name='describerTypes', label='Describer Types', description='Describer types to keep.', value=['sift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv', 'unknown'], exclusive=False, uid=[0], joinChar=',', ), desc.ListAttribute( elementDesc=desc.File( name="imageId", label="Image id", description="", value="", uid=[0], ), name="imageWhiteList", label="Image White List", description='image white list (uids or image paths).', ), desc.BoolParam( name='views', label='Views', description='Export views.', value=True, uid=[0], ), desc.BoolParam( name='intrinsics', label='Intrinsics', description='Export intrinsics.', value=True, uid=[0], ), desc.BoolParam( name='extrinsics', label='Extrinsics', description='Export extrinsics.', value=True, uid=[0], ), desc.BoolParam( name='structure', label='Structure', description='Export structure.', value=True, uid=[0], ), desc.BoolParam( name='observations', label='Observations', description='Export observations.', value=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[0], ), ] outputs = [ desc.File( name='output', label='Output', description='Path to the output SfM Data file.', value=desc.Node.internalFolder + 'sfm.{fileExtValue}', uid=[], ), ]