class ImageMasking(desc.CommandLineNode): commandLine = 'mogrify -format png -path {outputValue} -type Grayscale -negate -fill black -fuzz {fuzzValue}% +opaque "#ffffff" -blur {radiusValue}x{sigmaValue} -type Bilevel -depth 1 {inputValue}/*jpg' # Default node parameters demo: # mogrify -format png -path "path/to/output/dir" -type Grayscale -negate -fill black -fuzz 9% +opaque "#ffffff" -blur 0x6 -type Bilevel -depth 1 "path/to/input/dir"/*.jpg cpu = desc.Level.NORMAL ram = desc.Level.NORMAL #define node inputs, use PrepareDenseScene node to convert the input images to jpg inputs = [ desc.File( name="input", label='Input Image Folder', description='', value='', uid=[0], ), # wip black/white/green/* background, background pattern desc.IntParam( name='fuzz', label='fuzz', description='', value=60, range=(0, 100, 1), uid=[0], ), # Documentation: http://www.imagemagick.org/Usage/blur/ desc.IntParam( name='radius', label='Blur radius', description= 'larger value=larger blur radius, 0=auto value (default)', value=0, range=(0, 100, 1), uid=[0], ), desc.FloatParam( name='sigma', label='Blur Sigma', description='blur intensity', value=6, range=(0.0, 100.0, 0.01), uid=[0], ), ] # define node outputs outputs = [ desc.File( name="output", label="Output Masks", description="Output Masks folder (monochrome PNG)", value=desc.Node.internalFolder, uid=[], ), ]
class InstantMeshes(desc.CommandLineNode): commandLine = 'alicevision_InstantMeshes {inputValue} -S {smoothValue} -c {creaseValue} -r {rosyValue} -p {posyValue} --output {outputValue}' cpu = desc.Level.NORMAL ram = desc.Level.NORMAL inputs = [ desc.File( name="input", label='Input Mesh (OBJ file format).', description='', value='', uid=[0], ), desc.IntParam( name='smooth', label='Number of smoothing', description='Number of smoothing & ray tracing reprojection steps (default: 2)', value=2, range=(0, 100, 1), uid=[0], ), desc.IntParam( name='crease', label='Dihedral angle threshold for creases', description='Dihedral angle threshold for creases in degrees', value=0, range=(0, 360, 1), uid=[0], ), desc.IntParam( name='rosy', label='Orientation symmetry', description='Specifies the orientation symmetry type', value=2, range=(2, 6, 2), uid=[0], ), desc.IntParam( name='posy', label='Position symmetry', description='Specifies the position symmetry type', value=4, range=(4, 6, 2), uid=[0], ), ] outputs = [ desc.File( name="output", label="Output mesh", description="Output mesh (OBJ file format).", value=desc.Node.internalFolder + 'mesh.obj', uid=[], ), ]
class InstantMeshes(desc.CommandLineNode): commandLine = 'alicevision_InstantMeshes {inputValue} -S {smoothValue} --output {outputValue}' cpu = desc.Level.NORMAL ram = desc.Level.NORMAL inputs = [ desc.File( name="input", label='Input Mesh (OBJ file format).', description='', value='', uid=[0], ), desc.IntParam( name='smooth', label='Number of smoothing', description='Number of smoothing & ray tracing reprojection steps (default: 2)', value=2, range=(0, 100, 1), uid=[0], ) ] outputs = [ desc.File( name="output", label="Output mesh", description="Output mesh (OBJ file format).", value=desc.Node.internalFolder + 'mesh.obj', uid=[], ), ]
def attributeDescFromValue(attrName, value, isOutput): """ Generate an attribute description (desc.Attribute) that best matches 'value'. Args: attrName (str): the name of the attribute value: the value of the attribute isOutput (bool): whether the attribute is an output Returns: desc.Attribute: the generated attribute description """ params = { "name": attrName, "label": attrName, "description": "Incompatible parameter", "value": value, "uid": (), "group": "incompatible" } if isinstance(value, bool): return desc.BoolParam(**params) if isinstance(value, int): return desc.IntParam(range=None, **params) elif isinstance(value, float): return desc.FloatParam(range=None, **params) elif isinstance(value, pyCompatibility.basestring): if isOutput or os.path.isabs(value) or Attribute.isLinkExpression( value): return desc.File(**params) else: return desc.StringParam(**params) # List/GroupAttribute: recursively build descriptions elif isinstance(value, (list, dict)): del params["value"] del params["uid"] attrDesc = None if isinstance(value, list): elt = value[ 0] if value else "" # fallback: empty string value if list is empty eltDesc = CompatibilityNode.attributeDescFromValue( "element", elt, isOutput) attrDesc = desc.ListAttribute(elementDesc=eltDesc, **params) elif isinstance(value, dict): groupDesc = [] for key, value in value.items(): eltDesc = CompatibilityNode.attributeDescFromValue( key, value, isOutput) groupDesc.append(eltDesc) attrDesc = desc.GroupAttribute(groupDesc=groupDesc, **params) # override empty default value with attrDesc._value = value return attrDesc # handle any other type of parameters as Strings return desc.StringParam(**params)
class PanoramaWarping(desc.CommandLineNode): commandLine = 'aliceVision_panoramaWarping {allParams}' size = desc.DynamicNodeSize('input') inputs = [ desc.File( name='input', label='Input', description="SfM Data File", value='', uid=[0], ), desc.IntParam( name='panoramaWidth', label='Panorama Width', description='Panorama width (pixels). 0 For automatic size', value=10000, range=(0, 50000, 1000), uid=[0]), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= 'Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output directory', description='', value=desc.Node.internalFolder, uid=[], ), ]
class LightingEstimation(desc.CommandLineNode): commandLine = 'aliceVision_utils_lightingEstimation {allParams}' category = 'Utils' inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.File( name="depthMapsFilterFolder", label='Filtered Depth Maps Folder', description='Input filtered depth maps folder', value='', uid=[0], ), desc.File( name='imagesFolder', label='Images Folder', description= 'Use images from a specific folder instead of those specify in the SfMData file.\nFilename should be the image uid.', value='', uid=[0], ), desc.ChoiceParam( name='lightingEstimationMode', label='Lighting Estimation Mode', description='Lighting Estimation Mode.', value='global', values=['global', 'per_image'], exclusive=True, uid=[0], advanced=True, ), desc.ChoiceParam( name='lightingColor', label='Lighting Color Mode', description='Lighting Color Mode.', value='RGB', values=['RGB', 'Luminance'], exclusive=True, uid=[0], advanced=True, ), desc.ChoiceParam( name='albedoEstimationName', label='Albedo Estimation Name', description='Albedo estimation method used for light estimation.', value='constant', values=['constant', 'picture', 'median_filter', 'blur_filter'], exclusive=True, uid=[0], advanced=True, ), desc.IntParam( name='albedoEstimationFilterSize', label='Albedo Estimation Filter Size', description= 'Albedo filter size for estimation method using filter.', value=3, range=(0, 100, 1), uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= 'Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output Folder', description='Folder for output lighting vector files.', value=desc.Node.internalFolder, uid=[], ), ]
class PanoramaInit(desc.CommandLineNode): commandLine = 'aliceVision_panoramaInit {allParams}' size = desc.DynamicNodeSize('input') category = 'Panorama HDR' documentation = ''' This node allows to setup the Panorama: 1/ Enables the initialization the cameras from known position in an XML file (provided by ["Roundshot VR Drive"](https://www.roundshot.com/xml_1/internet/fr/application/d394/d395/f396.cfm) ). 2/ Enables to setup Full Fisheye Optics (to use an Equirectangular camera model). 3/ To automatically detects the Fisheye Circle (radius + center) in input images or manually adjust it. ''' inputs = [ desc.File( name='input', label='Input', description="SfM Data File", value='', uid=[0], ), desc.ChoiceParam( name='initializeCameras', label='Initialize Cameras', description='Initialize cameras.', value='No', values=['No', 'File', 'Horizontal', 'Horizontal+Zenith', 'Zenith+Horizontal', 'Spherical'], exclusive=True, uid=[0], ), desc.File( name='config', label='Xml Config', description="XML Data File", value='', uid=[0], enabled=lambda node: node.initializeCameras.value == 'File', ), desc.BoolParam( name='yawCW', label='Yaw CW', description="Yaw ClockWise or CounterClockWise", value=1, uid=[0], enabled=lambda node: ('Horizontal' in node.initializeCameras.value) or (node.initializeCameras.value == "Spherical"), ), desc.ListAttribute( elementDesc=desc.IntParam( name='nbViews', label='', description='', value=-1, range=[-1, 20], uid=[0], ), name='nbViewsPerLine', label='Spherical: Nb Views Per Line', description='Number of views per line in Spherical acquisition. Assumes angles from [-90,+90deg] for pitch and [-180,+180deg] for yaw. Use -1 to estimate the number of images automatically.', joinChar=',', enabled=lambda node: node.initializeCameras.value == 'Spherical', ), desc.ListAttribute( elementDesc=desc.File( name='dependency', label='', description="", value='', uid=[], ), name='dependency', label='Dependency', description="Folder(s) in which computed features are stored. (WORKAROUND for valid Tractor graph submission)", group='forDependencyOnly', # not a command line argument ), desc.BoolParam( name='useFisheye', label='Full Fisheye', description='To declare a full fisheye panorama setup', value=False, uid=[0], ), desc.BoolParam( name='estimateFisheyeCircle', label='Estimate Fisheye Circle', description='Automatically estimate the Fisheye Circle center and radius instead of using user values.', value=True, uid=[0], enabled=lambda node: node.useFisheye.value, ), desc.GroupAttribute( name="fisheyeCenterOffset", label="Fisheye Center", description="Center of the Fisheye circle (XY offset to the center in pixels).", groupDesc=[ desc.FloatParam( name="fisheyeCenterOffset_x", label="x", description="X Offset in pixels", value=0.0, uid=[0], range=(-1000.0, 10000.0, 1.0)), desc.FloatParam( name="fisheyeCenterOffset_y", label="y", description="Y Offset in pixels", value=0.0, uid=[0], range=(-1000.0, 10000.0, 1.0)), ], group=None, # skip group from command line enabled=lambda node: node.useFisheye.value and not node.estimateFisheyeCircle.value, ), desc.FloatParam( name='fisheyeRadius', label='Radius', description='Fisheye visibillity circle radius (% of image shortest side).', value=96.0, range=(0.0, 150.0, 0.01), uid=[0], enabled=lambda node: node.useFisheye.value and not node.estimateFisheyeCircle.value, ), desc.ChoiceParam( name='inputAngle', label='input Angle offset', description='Add a rotation to the input XML given poses (CCW).', value='None', values=['None', 'rotate90', 'rotate180', 'rotate270'], exclusive=True, uid=[0] ), desc.BoolParam( name='debugFisheyeCircleEstimation', label='Debug Fisheye Circle Detection', description='Debug fisheye circle detection.', value=False, uid=[0], enabled=lambda node: node.useFisheye.value, advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='outSfMData', label='Output SfMData File', description='Path to the output sfmdata file', value=desc.Node.internalFolder + 'sfmData.sfm', uid=[], ) ]
class DepthMap(desc.CommandLineNode): commandLine = 'aliceVision_depthMapEstimation {allParams}' gpu = desc.Level.INTENSIVE size = desc.DynamicNodeSize('ini') parallelization = desc.Parallelization(blockSize=3) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' inputs = [ desc.File( name="ini", label='MVS Configuration File', description='', value='', uid=[0], ), desc.ChoiceParam( name='downscale', label='Downscale', description='Image downscale factor.', value=2, values=[1, 2, 4, 8, 16], exclusive=True, uid=[0], ), desc.IntParam( name='sgmMaxTCams', label='SGM: Nb Neighbour Cameras', description='Semi Global Matching: Number of neighbour cameras.', value=10, range=(1, 100, 1), uid=[0], ), desc.IntParam( name='sgmWSH', label='SGM: WSH', description= 'Semi Global Matching: Half-size of the patch used to compute the similarity.', value=4, range=(1, 20, 1), uid=[0], ), desc.FloatParam( name='sgmGammaC', label='SGM: GammaC', description='Semi Global Matching: GammaC Threshold.', value=5.5, range=(0.0, 30.0, 0.5), uid=[0], ), desc.FloatParam( name='sgmGammaP', label='SGM: GammaP', description='Semi Global Matching: GammaP Threshold.', value=8.0, range=(0.0, 30.0, 0.5), uid=[0], ), desc.IntParam( name='refineNSamplesHalf', label='Refine: Number of Samples', description='Refine: Number of samples.', value=150, range=(1, 500, 10), uid=[0], ), desc.IntParam( name='refineNDepthsToRefine', label='Refine: Number of Depths', description='Refine: Number of depths.', value=31, range=(1, 100, 1), uid=[0], ), desc.IntParam( name='refineNiters', label='Refine: Number of Iterations', description='Refine:: Number of iterations.', value=100, range=(1, 500, 10), uid=[0], ), desc.IntParam( name='refineWSH', label='Refine: WSH', description= 'Refine: Half-size of the patch used to compute the similarity.', value=3, range=(1, 20, 1), uid=[0], ), desc.IntParam( name='refineMaxTCams', label='Refine: Nb Neighbour Cameras', description='Refine: Number of neighbour cameras.', value=6, range=(1, 20, 1), uid=[0], ), desc.FloatParam( name='refineSigma', label='Refine: Sigma', description='Refine: Sigma Threshold.', value=15, range=(0.0, 30.0, 0.5), uid=[0], ), desc.FloatParam( name='refineGammaC', label='Refine: GammaC', description='Refine: GammaC Threshold.', value=15.5, range=(0.0, 30.0, 0.5), uid=[0], ), desc.FloatParam( name='refineGammaP', label='Refine: GammaP', description='Refine: GammaP threshold.', value=8.0, range=(0.0, 30.0, 0.5), uid=[0], ), desc.BoolParam( name='refineUseTcOrRcPixSize', label='Refine: Tc or Rc pixel size', description= 'Refine: Use minimum pixel size of neighbour cameras (Tc) or current camera pixel size (Rc)', value=False, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output', description='Output folder for generated depth maps.', value=desc.Node.internalFolder, uid=[], ), ]
class MeshResampling(desc.CommandLineNode): commandLine = 'aliceVision_meshResampling {allParams}' cpu = desc.Level.NORMAL ram = desc.Level.NORMAL category = 'Mesh Post-Processing' documentation = ''' This node allows to recompute the mesh surface with a new topology and uniform density. ''' inputs = [ desc.File( name="input", label='Input Mesh (OBJ file format).', description='', value='', uid=[0], ), desc.FloatParam( name='simplificationFactor', label='Simplification factor', description='Simplification factor', value=0.5, range=(0.0, 1.0, 0.01), uid=[0], ), desc.IntParam( name='nbVertices', label='Fixed Number of Vertices', description='Fixed number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='minVertices', label='Min Vertices', description='Min number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='maxVertices', label='Max Vertices', description='Max number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='nbLloydIter', label='Number of Pre-Smoothing Iteration', description='Number of iterations for Lloyd pre-smoothing.', value=40, range=(0, 100, 1), uid=[0], ), desc.BoolParam( name='flipNormals', label='Flip Normals', description= '''Option to flip face normals. It can be needed as it depends on the vertices order in triangles and the convention change from one software to another.''', value=False, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name="output", label="Output mesh", description="Output mesh (OBJ file format).", value=desc.Node.internalFolder + 'mesh.obj', uid=[], ), ]
class CameraRigCalibration(desc.CommandLineNode): commandLine = 'aliceVision_rigCalibration {allParams}' inputs = [ desc.File( name='sfmdata', label='SfM Data', description='''The sfmData file.''', value='', uid=[0], ), desc.File( name='mediapath', label='Media Path', description= '''The path to the video file, the folder of the image sequence or a text file (one image path per line) for each camera of the rig (eg. --mediapath /path/to/cam1.mov /path/to/cam2.mov).''', value='', uid=[0], ), desc.File( name='cameraIntrinsics', label='Camera Intrinsics', description= '''The intrinsics calibration file for each camera of the rig. (eg. --cameraIntrinsics /path/to/calib1.txt /path/to/calib2.txt).''', value='', uid=[0], ), desc.File( name='export', label='Export', description= '''Filename for the alembic file containing the rig poses with the 3D points. It also saves a file for each camera named 'filename.cam##.abc'.''', value='trackedcameras.abc', uid=[0], ), desc.File( name='descriptorPath', label='Descriptor Path', description='''Folder containing the .desc.''', value='', uid=[0], ), desc.ChoiceParam( name='matchDescTypes', label='Match Describer Types', description='''The describer types to use for the matching''', value=['sift'], values=[ 'sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv' ], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='preset', label='Preset', description= '''Preset for the feature extractor when localizing a new image (low, medium, normal, high, ultra)''', value='normal', values=['low', 'medium', 'normal', 'high', 'ultra'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='resectionEstimator', label='Resection Estimator', description= '''The type of *sac framework to use for resection (acransac,loransac)''', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='matchingEstimator', label='Matching Estimator', description= '''The type of *sac framework to use for matching (acransac,loransac)''', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.StringParam( name='refineIntrinsics', label='Refine Intrinsics', description= '''Enable/Disable camera intrinsics refinement for each localized image''', value='', uid=[0], ), desc.FloatParam( name='reprojectionError', label='Reprojection Error', description= '''Maximum reprojection error (in pixels) allowed for resectioning. If set to 0 it lets the ACRansac select an optimal value.''', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.IntParam( name='maxInputFrames', label='Max Input Frames', description= '''Maximum number of frames to read in input. 0 means no limit.''', value=0, range=(0, 1000, 1), uid=[0], ), desc.File( name='voctree', label='Voctree', description='''[voctree] Filename for the vocabulary tree''', value=os.environ.get('ALICEVISION_VOCTREE', ''), uid=[0], ), desc.File( name='voctreeWeights', label='Voctree Weights', description= '''[voctree] Filename for the vocabulary tree weights''', value='', uid=[0], ), desc.ChoiceParam( name='algorithm', label='Algorithm', description='''[voctree] Algorithm type: {FirstBest,AllResults}''', value='AllResults', values=['FirstBest', 'AllResults'], exclusive=True, uid=[0], ), desc.IntParam( name='nbImageMatch', label='Nb Image Match', description= '''[voctree] Number of images to retrieve in the database''', value=4, range=(0, 50, 1), uid=[0], ), desc.IntParam( name='maxResults', label='Max Results', description= '''[voctree] For algorithm AllResults, it stops the image matching when this number of matched images is reached. If 0 it is ignored.''', value=10, range=(0, 100, 1), uid=[0], ), desc.FloatParam( name='matchingError', label='Matching Error', description= '''[voctree] Maximum matching error (in pixels) allowed for image matching with geometric verification. If set to 0 it lets the ACRansac select an optimal value.''', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.IntParam( name='nNearestKeyFrames', label='N Nearest Key Frames', description='''[cctag] Number of images to retrieve in database''', value=5, range=(0, 50, 1), uid=[0], ), ] outputs = [ desc.File( name='outfile', label='Output File', description= '''The name of the file where to store the calibration data''', value=desc.Node.internalFolder + 'cameraRigCalibration.rigCal', uid=[], ), ]
class SfMTransform(desc.CommandLineNode): commandLine = 'aliceVision_utils_sfmTransform {allParams}' size = desc.DynamicNodeSize('input') documentation = ''' This node allows to change the coordinate system of one SfM scene. The transformation can be based on: * transformation: Apply a given transformation * auto_from_cameras: Fit all cameras into a box [-1,1] * auto_from_landmarks: Fit all landmarks into a box [-1,1] * from_single_camera: Use a specific camera as the origin of the coordinate system * from_markers: Align specific markers to custom coordinates ''' inputs = [ desc.File( name='input', label='Input', description='''SfMData file .''', value='', uid=[0], ), desc.ChoiceParam( name='method', label='Transformation Method', description="Transformation method:\n" " * transformation: Apply a given transformation\n" " * manual: Apply the gizmo transformation (show the transformed input)\n" " * auto_from_cameras: Use cameras\n" " * auto_from_landmarks: Use landmarks\n" " * from_single_camera: Use a specific camera as the origin of the coordinate system\n" " * from_markers: Align specific markers to custom coordinates", value='auto_from_landmarks', values=['transformation', 'manual', 'auto_from_cameras', 'auto_from_landmarks', 'from_single_camera', 'from_markers'], exclusive=True, uid=[0], ), desc.StringParam( name='transformation', label='Transformation', description="Required only for 'transformation' and 'from_single_camera' methods:\n" " * transformation: Align [X,Y,Z] to +Y-axis, rotate around Y by R deg, scale by S; syntax: X,Y,Z;R;S\n" " * from_single_camera: Camera UID or image filename", value='', uid=[0], enabled=lambda node: node.method.value == "transformation" or node.method.value == "from_single_camera", ), desc.GroupAttribute( name="manualTransform", label="Manual Transform (Gizmo)", description="Translation, rotation (Euler ZXY) and uniform scale.", groupDesc=[ desc.GroupAttribute( name="manualTranslation", label="Translation", description="Translation in space.", groupDesc=[ desc.FloatParam( name="x", label="x", description="X Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01) ), desc.FloatParam( name="y", label="y", description="Y Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01) ), desc.FloatParam( name="z", label="z", description="Z Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01) ) ], joinChar="," ), desc.GroupAttribute( name="manualRotation", label="Euler Rotation", description="Rotation in Euler degrees.", groupDesc=[ desc.FloatParam( name="x", label="x", description="Euler X Rotation", value=0.0, uid=[0], range=(-90.0, 90.0, 1) ), desc.FloatParam( name="y", label="y", description="Euler Y Rotation", value=0.0, uid=[0], range=(-180.0, 180.0, 1) ), desc.FloatParam( name="z", label="z", description="Euler Z Rotation", value=0.0, uid=[0], range=(-180.0, 180.0, 1) ) ], joinChar="," ), desc.FloatParam( name="manualScale", label="Scale", description="Uniform Scale.", value=1.0, uid=[0], range=(0.0, 20.0, 0.01) ) ], joinChar=",", enabled=lambda node: node.method.value == "manual", ), desc.ChoiceParam( name='landmarksDescriberTypes', label='Landmarks Describer Types', description='Image describer types used to compute the mean of the point cloud. (only for "landmarks" method).', value=['sift', 'dspsift', 'akaze'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv', 'unknown'], exclusive=False, uid=[0], joinChar=',', ), desc.FloatParam( name='scale', label='Additional Scale', description='Additional scale to apply.', value=1.0, range=(0.0, 100.0, 0.1), uid=[0], ), desc.ListAttribute( name="markers", elementDesc=desc.GroupAttribute(name="markerAlign", label="Marker Align", description="", joinChar=":", groupDesc=[ desc.IntParam(name="markerId", label="Marker", description="Marker Id", value=0, uid=[0], range=(0, 32, 1)), desc.GroupAttribute(name="markerCoord", label="Coord", description="", joinChar=",", groupDesc=[ desc.FloatParam(name="x", label="x", description="", value=0.0, uid=[0], range=(-2.0, 2.0, 1.0)), desc.FloatParam(name="y", label="y", description="", value=0.0, uid=[0], range=(-2.0, 2.0, 1.0)), desc.FloatParam(name="z", label="z", description="", value=0.0, uid=[0], range=(-2.0, 2.0, 1.0)), ]) ]), label="Markers", description="Markers alignment points", ), desc.BoolParam( name='applyScale', label='Scale', description='Apply scale transformation.', value=True, uid=[0], enabled=lambda node: node.method.value != "manual", ), desc.BoolParam( name='applyRotation', label='Rotation', description='Apply rotation transformation.', value=True, uid=[0], enabled=lambda node: node.method.value != "manual", ), desc.BoolParam( name='applyTranslation', label='Translation', description='Apply translation transformation.', value=True, uid=[0], enabled=lambda node: node.method.value != "manual", ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output SfMData File', description='''Aligned SfMData file .''', value=lambda attr: desc.Node.internalFolder + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or 'sfmData') + '.abc', uid=[], ), desc.File( name='outputViewsAndPoses', label='Output Poses', description='''Path to the output sfmdata file with cameras (views and poses).''', value=desc.Node.internalFolder + 'cameras.sfm', uid=[], ), ]
__version__ = "3.0" import os import json import psutil import shutil import tempfile import logging from meshroom.core import desc Viewpoint = [ desc.IntParam(name="viewId", label="Id", description="Image UID", value=-1, uid=[0], range=None), desc.IntParam(name="poseId", label="Pose Id", description="Pose Id", value=-1, uid=[0], range=None), desc.File(name="path", label="Image Path", description="Image Filepath", value="", uid=[0]), desc.IntParam(name="intrinsicId", label="Intrinsic",
class CameraLocalization(desc.CommandLineNode): commandLine = 'aliceVision_cameraLocalization {allParams}' inputs = [ desc.File( name='sfmdata', label='SfM Data', description= '''The sfm_data.json kind of file generated by AliceVision.''', value='', uid=[0], ), desc.File( name='mediafile', label='Media File', description= '''The folder path or the filename for the media to track''', value='', uid=[0], ), desc.File( name='visualDebug', label='Visual Debug Folder', description= '''If a folder is provided it enables visual debug and saves all the debugging info in that folder''', value='', uid=[0], ), desc.File( name='descriptorPath', label='Descriptor Path', description= '''Folder containing the descriptors for all the images (ie the *.desc.)''', value='', uid=[0], ), desc.ChoiceParam( name='matchDescTypes', label='Match Desc Types', description='''Describer types to use for the matching.''', value=['sift'], values=[ 'sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv' ], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='preset', label='Preset', description= '''Preset for the feature extractor when localizing a new image (low, medium, normal, high, ultra)''', value='normal', values=['low', 'medium', 'normal', 'high', 'ultra'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='resectionEstimator', label='Resection Estimator', description= '''The type of *sac framework to use for resection (acransac, loransac)''', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='matchingEstimator', label='Matching Estimator', description= '''The type of *sac framework to use for matching (acransac, loransac)''', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.File( name='calibration', label='Calibration', description='''Calibration file''', value='', uid=[0], ), desc.BoolParam( name='refineIntrinsics', label='Refine Intrinsics', description= '''Enable/Disable camera intrinsics refinement for each localized image''', value=False, uid=[0], ), desc.FloatParam( name='reprojectionError', label='Reprojection Error', description= '''Maximum reprojection error (in pixels) allowed for resectioning. If set to 0 it lets the ACRansac select an optimal value.''', value=4.0, range=(0.1, 50.0, 0.1), uid=[0], ), desc.IntParam( name='nbImageMatch', label='Nb Image Match', description= '''[voctree] Number of images to retrieve in database''', value=4, range=(1, 1000, 1), uid=[0], ), desc.IntParam( name='maxResults', label='Max Results', description= '''[voctree] For algorithm AllResults, it stops the image matching when this number of matched images is reached. If 0 it is ignored.''', value=10, range=(1, 100, 1), uid=[0], ), desc.IntParam( name='commonviews', label='Commonviews', description= '''[voctree] Number of minimum images in which a point must be seen to be used in cluster tracking''', value=3, range=(2, 50, 1), uid=[0], ), desc.File( name='voctree', label='Voctree', description='''[voctree] Filename for the vocabulary tree''', value=os.environ.get('ALICEVISION_VOCTREE', ''), uid=[0], ), desc.File( name='voctreeWeights', label='Voctree Weights', description= '''[voctree] Filename for the vocabulary tree weights''', value='', uid=[0], ), desc.ChoiceParam( name='algorithm', label='Algorithm', description='''[voctree] Algorithm type: FirstBest, AllResults''', value='AllResults', values=['FirstBest', 'AllResults'], exclusive=True, uid=[0], ), desc.FloatParam( name='matchingError', label='Matching Error', description= '''[voctree] Maximum matching error (in pixels) allowed for image matching with geometric verification. If set to 0 it lets the ACRansac select an optimal value.''', value=4.0, range=(0.0, 50.0, 1.0), uid=[0], ), desc.IntParam( name='nbFrameBufferMatching', label='Nb Frame Buffer Matching', description= '''[voctree] Number of previous frame of the sequence to use for matching (0 = Disable)''', value=10, range=(0, 100, 1), uid=[0], ), desc.BoolParam( name='robustMatching', label='Robust Matching', description= '''[voctree] Enable/Disable the robust matching between query and database images, all putative matches will be considered.''', value=True, uid=[0], ), desc.IntParam( name='nNearestKeyFrames', label='N Nearest Key Frames', description= '''[cctag] Number of images to retrieve in the database Parameters specific for final (optional) bundle adjustment optimization of the sequence:''', value=5, range=(1, 100, 1), uid=[0], ), desc.StringParam( name='globalBundle', label='Global Bundle', description= '''[bundle adjustment] If --refineIntrinsics is not set, this option allows to run a final global bundle adjustment to refine the scene.''', value='', uid=[0], ), desc.BoolParam( name='noDistortion', label='No Distortion', description= '''[bundle adjustment] It does not take into account distortion during the BA, it consider the distortion coefficients all equal to 0''', value=False, uid=[0], ), desc.BoolParam( name='noBArefineIntrinsics', label='No BA Refine Intrinsics', description= '''[bundle adjustment] It does not refine intrinsics during BA''', value=False, uid=[0], ), desc.IntParam( name='minPointVisibility', label='Min Point Visibility', description= '''[bundle adjustment] Minimum number of observation that a point must have in order to be considered for bundle adjustment''', value=2, range=(2, 50, 1), uid=[0], ), ] outputs = [ desc.File( name='outputAlembic', label='Output Alembic', description= '''Filename for the SfMData export file (where camera poses will be stored)''', value=desc.Node.internalFolder + 'trackedCameras.abc', uid=[], ), desc.File( name='outputJSON', label='Output JSON', description='''Filename for the localization results as .json''', value=desc.Node.internalFolder + 'trackedCameras.json', uid=[], ), ]
class Meshing(desc.CommandLineNode): commandLine = 'aliceVision_meshing {allParams}' cpu = desc.Level.INTENSIVE ram = desc.Level.INTENSIVE category = 'Dense Reconstruction' documentation = ''' This node creates a dense geometric surface representation of the scene. First, it fuses all the depth maps into a global dense point cloud with an adaptive resolution. It then performs a 3D Delaunay tetrahedralization and a voting procedure is done to compute weights on cells and weights on facets connecting the cells. A Graph Cut Max-Flow is applied to optimally cut the volume. This cut represents the extracted mesh surface. ## Online [https://alicevision.org/#photogrammetry/meshing](https://alicevision.org/#photogrammetry/meshing) ''' inputs = [ desc.File( name='input', label='SfmData', description='SfMData file.', value='', uid=[0], ), desc.File( name="depthMapsFolder", label='Depth Maps Folder', description='Input depth maps folder.', value='', uid=[0], ), desc.BoolParam( name='useBoundingBox', label='Custom Bounding Box', description= 'Edit the meshing bounding box. If enabled, it takes priority over the Estimate From SfM option. Parameters can be adjusted in advanced settings.', value=False, uid=[0], group=''), desc.GroupAttribute( name="boundingBox", label="Bounding Box Settings", description="Translation, rotation and scale of the bounding box.", groupDesc=[ desc.GroupAttribute( name="bboxTranslation", label="Translation", description="Position in space.", groupDesc=[ desc.FloatParam(name="x", label="x", description="X Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01)), desc.FloatParam(name="y", label="y", description="Y Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01)), desc.FloatParam(name="z", label="z", description="Z Offset", value=0.0, uid=[0], range=(-20.0, 20.0, 0.01)) ], joinChar=","), desc.GroupAttribute( name="bboxRotation", label="Euler Rotation", description="Rotation in Euler degrees.", groupDesc=[ desc.FloatParam(name="x", label="x", description="Euler X Rotation", value=0.0, uid=[0], range=(-90.0, 90.0, 1)), desc.FloatParam(name="y", label="y", description="Euler Y Rotation", value=0.0, uid=[0], range=(-180.0, 180.0, 1)), desc.FloatParam(name="z", label="z", description="Euler Z Rotation", value=0.0, uid=[0], range=(-180.0, 180.0, 1)) ], joinChar=","), desc.GroupAttribute( name="bboxScale", label="Scale", description="Scale of the bounding box.", groupDesc=[ desc.FloatParam(name="x", label="x", description="X Scale", value=1.0, uid=[0], range=(0.0, 20.0, 0.01)), desc.FloatParam(name="y", label="y", description="Y Scale", value=1.0, uid=[0], range=(0.0, 20.0, 0.01)), desc.FloatParam(name="z", label="z", description="Z Scale", value=1.0, uid=[0], range=(0.0, 20.0, 0.01)) ], joinChar=",") ], joinChar=",", enabled=lambda node: node.useBoundingBox.value, ), desc.BoolParam( name='estimateSpaceFromSfM', label='Estimate Space From SfM', description='Estimate the 3d space from the SfM', value=True, uid=[0], advanced=True, ), desc.IntParam( name='estimateSpaceMinObservations', label='Min Observations For SfM Space Estimation', description= 'Minimum number of observations for SfM space estimation.', value=3, range=(0, 100, 1), uid=[0], advanced=True, enabled=lambda node: node.estimateSpaceFromSfM.value, ), desc.FloatParam( name='estimateSpaceMinObservationAngle', label='Min Observations Angle For SfM Space Estimation', description= 'Minimum angle between two observations for SfM space estimation.', value=10, range=(0, 120, 1), uid=[0], enabled=lambda node: node.estimateSpaceFromSfM.value, ), desc.IntParam( name='maxInputPoints', label='Max Input Points', description='Max input points loaded from depth map images.', value=50000000, range=(500000, 500000000, 1000), uid=[0], ), desc.IntParam( name='maxPoints', label='Max Points', description='Max points at the end of the depth maps fusion.', value=5000000, range=(100000, 10000000, 1000), uid=[0], ), desc.IntParam( name='maxPointsPerVoxel', label='Max Points Per Voxel', description='Max points per voxel', value=1000000, range=(500000, 30000000, 1000), uid=[0], advanced=True, ), desc.IntParam( name='minStep', label='Min Step', description= 'The step used to load depth values from depth maps is computed from maxInputPts. ' 'Here we define the minimal value for this step, so on small datasets we will not spend ' 'too much time at the beginning loading all depth values.', value=2, range=(1, 20, 1), uid=[0], advanced=True, ), desc.ChoiceParam( name='partitioning', label='Partitioning', description='', value='singleBlock', values=('singleBlock', 'auto'), exclusive=True, uid=[0], advanced=True, ), desc.ChoiceParam( name='repartition', label='Repartition', description='', value='multiResolution', values=('multiResolution', 'regularGrid'), exclusive=True, uid=[0], advanced=True, ), desc.FloatParam( name='angleFactor', label='angleFactor', description='angleFactor', value=15.0, range=(0.0, 200.0, 1.0), uid=[0], advanced=True, ), desc.FloatParam( name='simFactor', label='simFactor', description='simFactor', value=15.0, range=(0.0, 200.0, 1.0), uid=[0], advanced=True, ), desc.FloatParam( name='pixSizeMarginInitCoef', label='pixSizeMarginInitCoef', description='pixSizeMarginInitCoef', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='pixSizeMarginFinalCoef', label='pixSizeMarginFinalCoef', description='pixSizeMarginFinalCoef', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='voteMarginFactor', label='voteMarginFactor', description='voteMarginFactor', value=4.0, range=(0.1, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='contributeMarginFactor', label='contributeMarginFactor', description='contributeMarginFactor', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='simGaussianSizeInit', label='simGaussianSizeInit', description='simGaussianSizeInit', value=10.0, range=(0.0, 50.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='simGaussianSize', label='simGaussianSize', description='simGaussianSize', value=10.0, range=(0.0, 50.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='minAngleThreshold', label='minAngleThreshold', description='minAngleThreshold', value=1.0, range=(0.0, 10.0, 0.01), uid=[0], advanced=True, ), desc.BoolParam( name='refineFuse', label='Refine Fuse', description= 'Refine depth map fusion with the new pixels size defined by angle and similarity scores.', value=True, uid=[0], advanced=True, ), desc.IntParam( name='helperPointsGridSize', label='Helper Points Grid Size', description='Grid Size for the helper points.', value=10, range=(0, 50, 1), uid=[0], advanced=True, ), desc.BoolParam( name='densify', label='Densify', description='Densify scene with helper points around vertices.', value=False, uid=[], advanced=True, group='', ), desc.IntParam( name='densifyNbFront', label='Densify: Front', description='Densify vertices: front.', value=1, range=(0, 5, 1), uid=[0], advanced=True, enabled=lambda node: node.densify.value, ), desc.IntParam( name='densifyNbBack', label='Densify: Back', description='Densify vertices: back.', value=1, range=(0, 5, 1), uid=[0], advanced=True, enabled=lambda node: node.densify.value, ), desc.FloatParam( name='densifyScale', label='Densify Scale', description='Scale between points used to densify the scene.', value=20.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, enabled=lambda node: node.densify.value, ), desc.FloatParam( name='nPixelSizeBehind', label='Nb Pixel Size Behind', description= 'Number of pixel size units to vote behind the vertex as FULL status.', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='fullWeight', label='Full Weight', description='Weighting for full status.', value=1.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.BoolParam( name='voteFilteringForWeaklySupportedSurfaces', label='Weakly Supported Surface Support', description= 'Improve support of weakly supported surfaces with a tetrahedra fullness score filtering.', value=True, uid=[0], ), desc.BoolParam( name='addLandmarksToTheDensePointCloud', label='Add Landmarks To The Dense Point Cloud', description='Add SfM Landmarks to the dense point cloud.', value=False, uid=[0], advanced=True, ), desc.IntParam( name='invertTetrahedronBasedOnNeighborsNbIterations', label='Tretrahedron Neighbors Coherency Nb Iterations', description= 'Invert cells status around surface to improve smoothness. Zero to disable.', value=10, range=(0, 30, 1), uid=[0], advanced=True, ), desc.FloatParam( name='minSolidAngleRatio', label='minSolidAngleRatio', description= 'Change cells status on surface around vertices to improve smoothness using solid angle ratio between full/empty parts. Zero to disable.', value=0.2, range=(0.0, 0.5, 0.01), uid=[0], advanced=True, ), desc.IntParam( name='nbSolidAngleFilteringIterations', label='Nb Solid Angle Filtering Iterations', description= 'Filter cells status on surface around vertices to improve smoothness using solid angle ratio between full/empty parts. Zero to disable.', value=2, range=(0, 30, 1), uid=[0], advanced=True, ), desc.BoolParam( name='colorizeOutput', label='Colorize Output', description= 'Whether to colorize output dense point cloud and mesh.', value=False, uid=[0], ), desc.BoolParam( name='addMaskHelperPoints', label='Add Mask Helper Points', description= 'Add Helper points on the outline of the depth maps masks.', value=False, uid=[], advanced=True, group='', ), desc.FloatParam( name='maskHelperPointsWeight', label='Mask Helper Points Weight', description= 'Weight value for mask helper points. Zero means no helper point.', value=1.0, range=(0.0, 20.0, 1.0), uid=[0], advanced=True, enabled=lambda node: node.addMaskHelperPoints.value, ), desc.IntParam( name='maskBorderSize', label='Mask Border Size', description='How many pixels on mask borders?', value=4, range=(0, 20, 1), uid=[0], advanced=True, enabled=lambda node: node.addMaskHelperPoints.value, ), desc.IntParam( name='maxNbConnectedHelperPoints', label='Helper Points: Max Segment Size', description= 'Maximum size of a segment of connected helper points before we remove it. Small segments of helper points can be on the real surface and should not be removed to avoid the creation of holes. 0 means that we remove all helper points. -1 means that we do not filter helper points at all.', value=50, range=(-1, 100, 1), uid=[0], advanced=True, ), desc.BoolParam( name='saveRawDensePointCloud', label='Save Raw Dense Point Cloud', description='Save dense point cloud before cut and filtering.', value=False, uid=[], advanced=True, ), desc.BoolParam( name='exportDebugTetrahedralization', label='Export DEBUG Tetrahedralization', description= 'Export debug cells score as tetrahedral mesh.\nWARNING: Could create HUGE meshes, only use on very small datasets.', value=False, uid=[], advanced=True, ), desc.IntParam( name='seed', label='Seed', description= 'Seed used for random operations. Zero means use of random device instead of a fixed seed.', value=0, range=(0, 10000, 1), uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name="outputMesh", label="Mesh", description="Output mesh (OBJ file format).", value="{cache}/{nodeType}/{uid0}/mesh.obj", uid=[], ), desc.File( name="output", label="Dense SfMData", description= "Output dense point cloud with visibilities (SfMData file format).", value="{cache}/{nodeType}/{uid0}/densePointCloud.abc", uid=[], ), ]
class Split360Images(desc.CommandLineNode): commandLine = 'aliceVision_utils_split360Images {allParams}' inputs = [ desc.File( name='input', label='Images Folder', description='Images Folder', value='', uid=[0], ), desc.ChoiceParam( name='splitMode', label='Split Mode', description='''Split mode (equirectangular, dualfisheye)''', value='equirectangular', values=['equirectangular', 'dualfisheye'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='dualFisheyeSplitPreset', label='Dual Fisheye Split Preset', description= '''Dual-Fisheye split type preset (center, top, bottom)''', value='center', values=['center', 'top', 'bottom'], exclusive=True, uid=[0], ), desc.IntParam( name='equirectangularNbSplits', label='Equirectangular Nb Splits', description='''Equirectangular number of splits''', value=2, range=(1, 100, 1), uid=[0], ), desc.IntParam( name='equirectangularSplitResolution', label='Equirectangular Split Resolution', description='''Equirectangular split resolution''', value=1200, range=(100, 10000, 1), uid=[0], ), desc.ChoiceParam( name='equirectangularDemoMode', label='Equirectangular Demo Mode', description='''Export a SVG file that simulate the split''', value='0', values=['0', '1'], exclusive=True, uid=[0], ), ] outputs = [ desc.File( name='output', label='Output Folder', description='''Output folder for extracted frames.''', value=desc.Node.internalFolder, uid=[], ), ]
class MeshDecimate(desc.CommandLineNode): commandLine = 'aliceVision_meshDecimate {allParams}' cpu = desc.Level.NORMAL ram = desc.Level.NORMAL inputs = [ desc.File( name="input", label='Input Mesh (OBJ file format).', description='', value='', uid=[0], ), desc.FloatParam( name='simplificationFactor', label='Simplification factor', description='Simplification factor', value=0.5, range=(0.0, 1.0, 0.01), uid=[0], ), desc.IntParam( name='nbVertices', label='Fixed Number of Vertices', description='Fixed number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='minVertices', label='Min Vertices', description='Min number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.IntParam( name='maxVertices', label='Max Vertices', description='Max number of output vertices.', value=0, range=(0, 1000000, 1), uid=[0], ), desc.BoolParam( name='flipNormals', label='Flip Normals', description='Option to flip face normals.\n' 'It can be needed as it depends on the vertices order in triangles\n' 'and the convention change from one software to another.', value=False, uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name="output", label="Output mesh", description="Output mesh (OBJ file format).", value=desc.Node.internalFolder + 'mesh.obj', uid=[], ), ]
class ImageMatching(desc.CommandLineNode): commandLine = 'aliceVision_imageMatching {allParams}' size = desc.DynamicNodeSize('input') documentation = ''' The goal of this node is to select the image pairs to match. The ambition is to find the images that are looking to the same areas of the scene. Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs. It provides multiple methods: * **VocabularyTree** It uses image retrieval techniques to find images that share some content without the cost of resolving all feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all images descriptors very efficiently. If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected. * **Sequential** If your input is a video sequence, you can use this option to link images between them over time. * **SequentialAndVocabularyTree** Combines sequential approach with Voc Tree to enable connections between keyframes at different times. * **Exhaustive** Export all image pairs. * **Frustum** If images have known poses, computes the intersection between cameras frustums to create the list of image pairs. * **FrustumOrVocabularyTree** If images have known poses, use frustum intersection else use VocabularuTree. ## Online [https://alicevision.org/#photogrammetry/image_matching](https://alicevision.org/#photogrammetry/image_matching) ''' inputs = [ desc.File( name='input', label='Input', description='SfMData file .', value='', uid=[0], ), desc.ListAttribute( elementDesc=desc.File( name="featuresFolder", label="Features Folder", description="", value="", uid=[0], ), name="featuresFolders", label="Features Folders", description= "Folder(s) containing the extracted features and descriptors."), desc.ChoiceParam( name='method', label='Method', description='Method used to select the image pairs to match:\n' ' * VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n' 'feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n' 'images descriptors very efficiently. If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.\n' ' * Sequential: If your input is a video sequence, you can use this option to link images between them over time.\n' ' * SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n' ' * Exhaustive: Export all image pairs.\n' ' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n' ' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n', value='VocabularyTree', values=[ 'VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum', 'FrustumOrVocabularyTree' ], exclusive=True, uid=[0], ), desc.File( name='tree', label='Voc Tree: Tree', description='Input name for the vocabulary tree file.', value=os.environ.get('ALICEVISION_VOCTREE', ''), uid=[], enabled=lambda node: 'VocabularyTree' in node.method.value, ), desc.File( name='weights', label='Voc Tree: Weights', description= 'Input name for the weight file, if not provided the weights will be computed on the database built with the provided set.', value='', uid=[0], advanced=True, enabled=lambda node: 'VocabularyTree' in node.method.value, ), desc.IntParam( name='minNbImages', label='Voc Tree: Minimal Number of Images', description= 'Minimal number of images to use the vocabulary tree. If we have less features than this threshold, we will compute all matching combinations.', value=200, range=(0, 500, 1), uid=[0], advanced=True, enabled=lambda node: 'VocabularyTree' in node.method.value, ), desc.IntParam( name='maxDescriptors', label='Voc Tree: Max Descriptors', description= 'Limit the number of descriptors you load per image. Zero means no limit.', value=500, range=(0, 100000, 1), uid=[0], advanced=True, enabled=lambda node: 'VocabularyTree' in node.method.value, ), desc.IntParam( name='nbMatches', label='Voc Tree: Nb Matches', description= 'The number of matches to retrieve for each image (If 0 it will retrieve all the matches).', value=50, range=(0, 1000, 1), uid=[0], advanced=True, enabled=lambda node: 'VocabularyTree' in node.method.value, ), desc.IntParam( name='nbNeighbors', label='Sequential: Nb Neighbors', description= 'The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).', value=50, range=(0, 1000, 1), uid=[0], advanced=True, enabled=lambda node: 'Sequential' in node.method.value, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= 'verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Output List File', description= 'Filepath to the output file with the list of selected image pairs.', value=desc.Node.internalFolder + 'imageMatches.txt', uid=[], ), ]
class DepthMapFilter(desc.CommandLineNode): commandLine = 'aliceVision_depthMapFiltering {allParams}' gpu = desc.Level.NORMAL size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=10) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' documentation = ''' Filter depth map values that are not coherent in multiple depth maps. This allows to filter unstable points before starting the fusion of all depth maps in the Meshing node. ''' inputs = [ desc.File( name='input', label='SfMData', description='SfMData file.', value='', uid=[0], ), desc.File( name="depthMapsFolder", label="DepthMaps Folder", description="Input depth maps folder", value="", uid=[0], ), desc.FloatParam( name='minViewAngle', label='Min View Angle', description='Minimum angle between two views.', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='maxViewAngle', label='Max View Angle', description='Maximum angle between two views.', value=70.0, range=(10.0, 120.0, 1), uid=[0], advanced=True, ), desc.IntParam( name="nNearestCams", label="Number of Nearest Cameras", description="Number of nearest cameras used for filtering.", value=10, range=(0, 20, 1), uid=[0], advanced=True, ), desc.IntParam( name="minNumOfConsistentCams", label="Min Consistent Cameras", description="Min Number of Consistent Cameras", value=3, range=(0, 10, 1), uid=[0], ), desc.IntParam( name="minNumOfConsistentCamsWithLowSimilarity", label="Min Consistent Cameras Bad Similarity", description= "Min Number of Consistent Cameras for pixels with weak similarity value", value=4, range=(0, 10, 1), uid=[0], ), desc.IntParam( name="pixSizeBall", label="Filtering Size in Pixels", description="Filtering size in pixels", value=0, range=(0, 10, 1), uid=[0], advanced=True, ), desc.IntParam( name="pixSizeBallWithLowSimilarity", label="Filtering Size in Pixels Bad Similarity", description="Filtering size in pixels", value=0, range=(0, 10, 1), uid=[0], advanced=True, ), desc.BoolParam( name='computeNormalMaps', label='Compute Normal Maps', description='Compute normal maps per depth map.', value=False, uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Filtered DepthMaps Folder', description='Output folder for generated depth maps.', value=desc.Node.internalFolder, uid=[], ), ]
class ImageProcessing(desc.CommandLineNode): commandLine = 'aliceVision_utils_imageProcessing {allParams}' size = desc.DynamicNodeSize('input') # parallelization = desc.Parallelization(blockSize=40) # commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' documentation = ''' Convert or apply filtering to the input images. ''' inputs = [ desc.File( name='input', label='Input', description= 'SfMData file input, image filenames or regex(es) on the image file path.\nsupported regex: \'#\' matches a single digit, \'@\' one or more digits, \'?\' one character and \'*\' zero or more.', value='', uid=[0], ), desc.ListAttribute( elementDesc=desc.File( name="inputFolder", label="input Folder", description="", value="", uid=[0], ), name="inputFolders", label="Images input Folders", description='Use images from specific folder(s).', ), desc.ListAttribute( elementDesc=desc.StringParam( name="metadataFolder", label="Metadata Folder", description="", value="", uid=[0], ), name="metadataFolders", label="Metadata input Folders", description='Use images metadata from specific folder(s).', ), desc.ChoiceParam( name='extension', label='Output File Extension', description='Output Image File Extension.', value='', values=['', 'exr', 'jpg', 'tiff', 'png'], exclusive=True, uid=[0], ), desc.BoolParam( name='reconstructedViewsOnly', label='Only Reconstructed Views', description='Process Only Reconstructed Views', value=False, uid=[0], ), desc.BoolParam( name='fixNonFinite', label='Fix Non-Finite', description= 'Fix non-finite pixels based on neighboring pixels average.', value=False, uid=[0], ), desc.BoolParam( name='exposureCompensation', label='Exposure Compensation', description='Exposure Compensation', value=False, uid=[0], ), desc.FloatParam( name='scaleFactor', label='ScaleFactor', description='Scale Factor.', value=1.0, range=(0.0, 1.0, 0.01), uid=[0], ), desc.FloatParam( name='contrast', label='Contrast', description='Contrast.', value=1.0, range=(0.0, 100.0, 0.1), uid=[0], ), desc.IntParam( name='medianFilter', label='Median Filter', description='Median Filter.', value=0, range=(0, 10, 1), uid=[0], ), desc.BoolParam( name='fillHoles', label='Fill Holes', description='Fill holes based on the alpha channel.\n' 'Note: It will enable fixNonFinite, as it is required for the image pyramid construction used to fill holes.', value=False, uid=[0], ), desc.GroupAttribute(name="sharpenFilter", label="Sharpen Filter", description="Sharpen Filtering Parameters.", joinChar=":", groupDesc=[ desc.BoolParam( name='sharpenFilterEnabled', label='Enable', description='Use sharpen.', value=False, uid=[0], ), desc.IntParam( name='width', label='Width', description='Sharpen Width.', value=3, range=(1, 9, 2), uid=[0], enabled=lambda node: node.sharpenFilter. sharpenFilterEnabled.value, ), desc.FloatParam( name='contrast', label='Contrast', description='Sharpen Contrast.', value=1.0, range=(0.0, 100.0, 0.1), uid=[0], enabled=lambda node: node.sharpenFilter. sharpenFilterEnabled.value, ), desc.FloatParam( name='threshold', label='Threshold', description='Sharpen Threshold.', value=0.0, range=(0.0, 1.0, 0.01), uid=[0], enabled=lambda node: node.sharpenFilter. sharpenFilterEnabled.value, ), ]), desc.GroupAttribute( name="bilateralFilter", label="Bilateral Filter", description="Bilateral Filtering Parameters.", joinChar=":", groupDesc=[ desc.BoolParam( name='bilateralFilterEnabled', label='Enable', description='Bilateral Filter.', value=False, uid=[0], ), desc.IntParam( name='bilateralFilterDistance', label='Distance', description= 'Diameter of each pixel neighborhood that is used during bilateral filtering.\nCould be very slow for large filters, so it is recommended to use 5.', value=0, range=(0, 9, 1), uid=[0], enabled=lambda node: node.bilateralFilter. bilateralFilterEnabled.value, ), desc.FloatParam( name='bilateralFilterSigmaSpace', label='Sigma Coordinate Space', description= 'Bilateral Filter sigma in the coordinate space.', value=0.0, range=(0.0, 150.0, 0.01), uid=[0], enabled=lambda node: node.bilateralFilter. bilateralFilterEnabled.value, ), desc.FloatParam( name='bilateralFilterSigmaColor', label='Sigma Color Space', description='Bilateral Filter sigma in the color space.', value=0.0, range=(0.0, 150.0, 0.01), uid=[0], enabled=lambda node: node.bilateralFilter. bilateralFilterEnabled.value, ), ]), desc.GroupAttribute( name="claheFilter", label="Clahe Filter", description="Clahe Filtering Parameters.", joinChar=":", groupDesc=[ desc.BoolParam( name='claheEnabled', label='Enable', description= 'Use Contrast Limited Adaptive Histogram Equalization (CLAHE) Filter.', value=False, uid=[0], ), desc.FloatParam( name='claheClipLimit', label='Clip Limit', description='Sets Threshold For Contrast Limiting.', value=4.0, range=(0.0, 8.0, 1.0), uid=[0], enabled=lambda node: node.claheFilter.claheEnabled.value, ), desc.IntParam( name='claheTileGridSize', label='Tile Grid Size', description= 'Sets Size Of Grid For Histogram Equalization. Input Image Will Be Divided Into Equally Sized Rectangular Tiles.', value=8, range=(4, 64, 4), uid=[0], enabled=lambda node: node.claheFilter.claheEnabled.value, ), ]), desc.GroupAttribute( name="noiseFilter", label="Noise Filter", description="Noise Filtering Parameters.", joinChar=":", groupDesc=[ desc.BoolParam( name='noiseEnabled', label='Enable', description='Add Noise.', value=False, uid=[0], ), desc.ChoiceParam( name='noiseMethod', label='Method', description= " * method: There are several noise types to choose from:\n" " * uniform: adds noise values uninformly distributed on range [A,B).\n" " * gaussian: adds Gaussian (normal distribution) noise values with mean value A and standard deviation B.\n" " * salt: changes to value A a portion of pixels given by B.\n", value='uniform', values=['uniform', 'gaussian', 'salt'], exclusive=True, uid=[0], enabled=lambda node: node.noiseFilter.noiseEnabled.value, ), desc.FloatParam( name='noiseA', label='A', description= 'Parameter that have a different interpretation depending on the method chosen.', value=0.0, range=(0.0, 1.0, 0.0001), uid=[0], enabled=lambda node: node.noiseFilter.noiseEnabled.value, ), desc.FloatParam( name='noiseB', label='B', description= 'Parameter that have a different interpretation depending on the method chosen.', value=1.0, range=(0.0, 1.0, 0.0001), uid=[0], enabled=lambda node: node.noiseFilter.noiseEnabled.value, ), desc.BoolParam( name='noiseMono', label='Mono', description= 'If is Checked, a single noise value will be applied to all channels otherwise a separate noise value will be computed for each channel.', value=True, uid=[0], enabled=lambda node: node.noiseFilter.noiseEnabled.value, ), ]), desc.ChoiceParam( name='outputFormat', label='Output Image Format', description='Allows you to choose the format of the output image.', value='rgba', values=['rgba', 'rgb', 'grayscale'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='storageDataType', label='Storage Data Type for EXR output', description='Storage image data type:\n' ' * float: Use full floating point (32 bits per channel)\n' ' * half: Use half float (16 bits per channel)\n' ' * halfFinite: Use half float, but clamp values to avoid non-finite values\n' ' * auto: Use half float if all values can fit, else use full float\n', value='float', values=['float', 'half', 'halfFinite', 'auto'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= 'verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='outSfMData', label='Output sfmData', description='Output sfmData.', value=lambda attr: (desc.Node.internalFolder + os.path.basename( attr.node.input.value)) if (os.path.splitext( attr.node.input.value)[1] in ['.abc', '.sfm']) else '', uid=[], group='', # do not export on the command line ), desc.File( name='output', label='Output Folder', description='Output Images Folder.', value=desc.Node.internalFolder, uid=[], ), desc.File( name='outputImages', label='Output Images', description='Output Image Files.', value=outputImagesValueFunct, group='', # do not export on the command line uid=[], ), ]
class LdrToHdrSampling(desc.CommandLineNode): commandLine = 'aliceVision_LdrToHdrSampling {allParams}' size = DividedInputNodeSize('input', 'nbBrackets') parallelization = desc.Parallelization(blockSize=2) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' category = 'Panorama HDR' documentation = ''' Sample pixels from Low range images for HDR creation ''' inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.IntParam( name='userNbBrackets', label='Number of Brackets', description='Number of exposure brackets per HDR image (0 for automatic detection).', value=0, range=(0, 15, 1), uid=[], group='user', # not used directly on the command line ), desc.IntParam( name='nbBrackets', label='Automatic Nb Brackets', description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".', value=0, range=(0, 10, 1), uid=[0], ), desc.BoolParam( name='byPass', label='Bypass', description="Bypass HDR creation and use the medium bracket as the source for the next steps", value=False, uid=[0], group='internal', enabled= lambda node: node.nbBrackets.value != 1, ), desc.IntParam( name='channelQuantizationPower', label='Channel Quantization Power', description='Quantization level like 8 bits or 10 bits.', value=10, range=(8, 14, 1), uid=[0], advanced=True, enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.IntParam( name='blockSize', label='Block Size', description='Size of the image tile to extract a sample.', value=256, range=(8, 1024, 1), uid=[0], advanced=True, enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.IntParam( name='radius', label='Patch Radius', description='Radius of the patch used to analyze the sample statistics.', value=5, range=(0, 10, 1), uid=[0], advanced=True, enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.IntParam( name='maxCountSample', label='Max Number of Samples', description='Max number of samples per image group.', value=200, range=(10, 1000, 10), uid=[0], advanced=True, enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.BoolParam( name='debug', label='Export Debug Files', description="Export debug files to analyze the sampling strategy.", value=False, uid=[], enabled= lambda node: node.byPass.enabled and not node.byPass.value, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Output Folder', description='Output path for the samples.', value=desc.Node.internalFolder, uid=[], ), ] def processChunk(self, chunk): if chunk.node.nbBrackets.value == 1 or chunk.node.byPass.value: return super(LdrToHdrSampling, self).processChunk(chunk) @classmethod def update(cls, node): if not isinstance(node.nodeDesc, cls): raise ValueError("Node {} is not an instance of type {}".format(node, cls)) # TODO: use Node version for this test if 'userNbBrackets' not in node.getAttributes().keys(): # Old version of the node return if node.userNbBrackets.value != 0: node.nbBrackets.value = node.userNbBrackets.value return # logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion)) cameraInitOutput = node.input.getLinkParam(recursive=True) if not cameraInitOutput: node.nbBrackets.value = 0 return if not cameraInitOutput.node.hasAttribute('viewpoints'): if cameraInitOutput.node.hasAttribute('input'): cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True) if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute('viewpoints'): viewpoints = cameraInitOutput.node.viewpoints.value else: # No connected CameraInit node.nbBrackets.value = 0 return # logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints))) inputs = [] for viewpoint in viewpoints: jsonMetadata = viewpoint.metadata.value if not jsonMetadata: # no metadata, we cannot found the number of brackets node.nbBrackets.value = 0 return d = json.loads(jsonMetadata) fnumber = findMetadata(d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "") shutterSpeed = findMetadata(d, ["Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], "") iso = findMetadata(d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "") if not fnumber and not shutterSpeed: # If one image without shutter or fnumber, we cannot found the number of brackets. # We assume that there is no multi-bracketing, so nothing to do. node.nbBrackets.value = 1 return inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso))) inputs.sort() exposureGroups = [] exposures = [] for path, exp in inputs: if exposures and exp != exposures[-1] and exp == exposures[0]: exposureGroups.append(exposures) exposures = [exp] else: exposures.append(exp) exposureGroups.append(exposures) exposures = None bracketSizes = set() if len(exposureGroups) == 1: if len(set(exposureGroups[0])) == 1: # Single exposure and multiple views node.nbBrackets.value = 1 else: # Single view and multiple exposures node.nbBrackets.value = len(exposureGroups[0]) else: for expGroup in exposureGroups: bracketSizes.add(len(expGroup)) if len(bracketSizes) == 1: node.nbBrackets.value = bracketSizes.pop() # logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value)) else: node.nbBrackets.value = 0
class DepthMap(desc.CommandLineNode): commandLine = 'aliceVision_depthMapEstimation {allParams}' gpu = desc.Level.INTENSIVE size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=3) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' category = 'Dense Reconstruction' documentation = ''' For each camera that have been estimated by the Structure-From-Motion, it estimates the depth value per pixel. Adjust the downscale factor to compute depth maps at a higher/lower resolution. Use a downscale factor of one (full-resolution) only if the quality of the input images is really high (camera on a tripod with high-quality optics). ## Online [https://alicevision.org/#photogrammetry/depth_maps_estimation](https://alicevision.org/#photogrammetry/depth_maps_estimation) ''' inputs = [ desc.File( name='input', label='SfMData', description='SfMData file.', value='', uid=[0], ), desc.File( name='imagesFolder', label='Images Folder', description= 'Use images from a specific folder instead of those specify in the SfMData file.\nFilename should be the image uid.', value='', uid=[0], ), desc.ChoiceParam( name='downscale', label='Downscale', description='Image downscale factor.', value=2, values=[1, 2, 4, 8, 16], exclusive=True, uid=[0], ), desc.FloatParam( name='minViewAngle', label='Min View Angle', description='Minimum angle between two views.', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.FloatParam( name='maxViewAngle', label='Max View Angle', description='Maximum angle between two views.', value=70.0, range=(10.0, 120.0, 1), uid=[0], advanced=True, ), desc.IntParam( name='sgmMaxTCams', label='SGM: Nb Neighbour Cameras', description='Semi Global Matching: Number of neighbour cameras.', value=10, range=(1, 100, 1), uid=[0], ), desc.IntParam( name='sgmWSH', label='SGM: WSH', description= 'Semi Global Matching: Half-size of the patch used to compute the similarity.', value=4, range=(1, 20, 1), uid=[0], advanced=True, ), desc.FloatParam( name='sgmGammaC', label='SGM: GammaC', description='Semi Global Matching: GammaC Threshold.', value=5.5, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.FloatParam( name='sgmGammaP', label='SGM: GammaP', description='Semi Global Matching: GammaP Threshold.', value=8.0, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.IntParam( name='refineMaxTCams', label='Refine: Nb Neighbour Cameras', description='Refine: Number of neighbour cameras.', value=6, range=(1, 20, 1), uid=[0], ), desc.IntParam( name='refineNSamplesHalf', label='Refine: Number of Samples', description='Refine: Number of samples.', value=150, range=(1, 500, 10), uid=[0], advanced=True, ), desc.IntParam( name='refineNDepthsToRefine', label='Refine: Number of Depths', description='Refine: Number of depths.', value=31, range=(1, 100, 1), uid=[0], advanced=True, ), desc.IntParam( name='refineNiters', label='Refine: Number of Iterations', description='Refine:: Number of iterations.', value=100, range=(1, 500, 10), uid=[0], advanced=True, ), desc.IntParam( name='refineWSH', label='Refine: WSH', description= 'Refine: Half-size of the patch used to compute the similarity.', value=3, range=(1, 20, 1), uid=[0], advanced=True, ), desc.FloatParam( name='refineSigma', label='Refine: Sigma', description='Refine: Sigma Threshold.', value=15, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.FloatParam( name='refineGammaC', label='Refine: GammaC', description='Refine: GammaC Threshold.', value=15.5, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.FloatParam( name='refineGammaP', label='Refine: GammaP', description='Refine: GammaP threshold.', value=8.0, range=(0.0, 30.0, 0.5), uid=[0], advanced=True, ), desc.BoolParam( name='refineUseTcOrRcPixSize', label='Refine: Tc or Rc pixel size', description= 'Refine: Use minimum pixel size of neighbour cameras (Tc) or current camera pixel size (Rc)', value=False, uid=[0], advanced=True, ), desc.BoolParam( name='exportIntermediateResults', label='Export Intermediate Results', description= 'Export intermediate results from the SGM and Refine steps.', value=False, uid=[], advanced=True, ), desc.IntParam( name='nbGPUs', label='Number of GPUs', description= 'Number of GPUs to use (0 means use all available GPUs).', value=0, range=(0, 5, 1), uid=[], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output', description='Output folder for generated depth maps.', value=desc.Node.internalFolder, uid=[], ), ]
class FeatureExtraction(desc.CommandLineNode): commandLine = 'aliceVision_featureExtraction {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=40) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' category = 'Sparse Reconstruction' documentation = ''' This node extracts distinctive groups of pixels that are, to some extent, invariant to changing camera viewpoints during image acquisition. Hence, a feature in the scene should have similar feature descriptions in all images. This node implements multiple methods: * **SIFT** The most standard method. This is the default and recommended value for all use cases. * **AKAZE** AKAZE can be interesting solution to extract features in challenging condition. It could be able to match wider angle than SIFT but has drawbacks. It may extract to many features, the repartition is not always good. It is known to be good on challenging surfaces such as skin. * **CCTAG** CCTag is a marker type with 3 or 4 crowns. You can put markers in the scene during the shooting session to automatically re-orient and re-scale the scene to a known size. It is robust to motion-blur, depth-of-field, occlusion. Be careful to have enough white margin around your CCTags. ## Online [https://alicevision.org/#photogrammetry/natural_feature_extraction](https://alicevision.org/#photogrammetry/natural_feature_extraction) ''' inputs = [ desc.File( name='input', label='SfMData', description='SfMData file.', value='', uid=[0], ), desc.ChoiceParam( name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', value=['sift'], values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='describerPreset', label='Describer Density', description='Control the ImageDescriber density (low, medium, normal, high, ultra).\n' 'Warning: Use ULTRA only on small datasets.', value='normal', values=['low', 'medium', 'normal', 'high', 'ultra', 'custom'], exclusive=True, uid=[0], group=lambda node: 'allParams' if node.describerPreset.value != 'custom' else None, ), desc.IntParam( name='maxNbFeatures', label='Max Nb Features', description='Max number of features extracted (0 means default value based on Describer Density).', value=0, range=(0, 100000, 1000), uid=[0], advanced=True, enabled=lambda node: (node.describerPreset.value == 'custom'), ), desc.ChoiceParam( name='describerQuality', label='Describer Quality', description='Control the ImageDescriber quality (low, medium, normal, high, ultra).', value='normal', values=['low', 'medium', 'normal', 'high', 'ultra'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='contrastFiltering', label='Contrast Filtering', description="Contrast filtering method to ignore features with too low contrast that can be considered as noise:\n" "* Static: Fixed threshold.\n" "* AdaptiveToMedianVariance: Based on image content analysis.\n" "* NoFiltering: Disable contrast filtering.\n" "* GridSortOctaves: Grid Sort but per octaves (and only per scale at the end).\n" "* GridSort: Grid sort per octaves and at the end (scale * peakValue).\n" "* GridSortScaleSteps: Grid sort per octaves and at the end (scale and then peakValue).\n" "* NonExtremaFiltering: Filter non-extrema peakValues.\n", value='GridSort', values=['Static', 'AdaptiveToMedianVariance', 'NoFiltering', 'GridSortOctaves', 'GridSort', 'GridSortScaleSteps', 'GridSortOctaveSteps', 'NonExtremaFiltering'], exclusive=True, advanced=True, uid=[0], ), desc.FloatParam( name='relativePeakThreshold', label='Relative Peak Threshold', description='Peak Threshold relative to median of gradiants.', value=0.01, range=(0.01, 1.0, 0.001), advanced=True, uid=[0], enabled=lambda node: (node.contrastFiltering.value == 'AdaptiveToMedianVariance'), ), desc.BoolParam( name='gridFiltering', label='Grid Filtering', description='Enable grid filtering. Highly recommended to ensure usable number of features.', value=True, advanced=True, uid=[0], ), desc.BoolParam( name='forceCpuExtraction', label='Force CPU Extraction', description='Use only CPU feature extraction.', value=True, uid=[], advanced=True, ), desc.IntParam( name='maxThreads', label='Max Nb Threads', description='Specifies the maximum number of threads to run simultaneously (0 for automatic mode).', value=0, range=(0, 24, 1), uid=[], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Features Folder', description='Output path for the features and descriptors files (*.feat, *.desc).', value=desc.Node.internalFolder, uid=[], ), ]
class StructureFromMotion(desc.CommandLineNode): commandLine = 'aliceVision_incrementalSfM {allParams}' size = desc.DynamicNodeSize('input') inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.ListAttribute( elementDesc=desc.File( name="featuresFolder", label="Features Folder", description="", value="", uid=[0], ), name="featuresFolders", label="Features Folders", description="Folder(s) containing the extracted features and descriptors." ), desc.ListAttribute( elementDesc=desc.File( name="matchesFolder", label="Matches Folder", description="", value="", uid=[0], ), name="matchesFolders", label="Matches Folders", description="Folder(s) in which computed matches are stored." ), desc.ChoiceParam( name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', value=['sift'], values=['sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='localizerEstimator', label='Localizer Estimator', description='Estimator type used to localize cameras (acransac, ransac, lsmeds, loransac, maxconsensus).', value='acransac', values=['acransac', 'ransac', 'lsmeds', 'loransac', 'maxconsensus'], exclusive=True, uid=[0], ), desc.BoolParam( name='lockScenePreviouslyReconstructed', label='Lock Scene Previously Reconstructed', description='This option is useful for SfM augmentation. Lock previously reconstructed poses and intrinsics.', value=False, uid=[0], ), desc.BoolParam( name='useLocalBA', label='Local Bundle Adjustment', description='It reduces the reconstruction time, especially for large datasets (500+ images),\n' 'by avoiding computation of the Bundle Adjustment on areas that are not changing.', value=True, uid=[0], ), desc.IntParam( name='localBAGraphDistance', label='LocalBA Graph Distance', description='Graph-distance limit to define the Active region in the Local Bundle Adjustment strategy.', value=1, range=(2, 10, 1), uid=[0], ), desc.IntParam( name='maxNumberOfMatches', label='Maximum Number of Matches', description='Maximum number of matches per image pair (and per feature type). \n' 'This can be useful to have a quick reconstruction overview. \n' '0 means no limit.', value=0, range=(0, 50000, 1), uid=[0], ), desc.IntParam( name='minInputTrackLength', label='Min Input Track Length', description='Minimum track length in input of SfM', value=2, range=(2, 10, 1), uid=[0], ), desc.IntParam( name='minNumberOfObservationsForTriangulation', label='Min Observation For Triangulation', description='Minimum number of observations to triangulate a point.\n' 'Set it to 3 (or more) reduces drastically the noise in the point cloud,\n' 'but the number of final poses is a little bit reduced\n' '(from 1.5% to 11% on the tested datasets).', value=2, range=(2, 10, 1), uid=[0], ), desc.FloatParam( name='minAngleForTriangulation', label='Min Angle For Triangulation', description='Minimum angle for triangulation.', value=3.0, range=(0.1, 10, 0.1), uid=[0], ), desc.FloatParam( name='minAngleForLandmark', label='Min Angle For Landmark', description='Minimum angle for landmark.', value=2.0, range=(0.1, 10, 0.1), uid=[0], ), desc.FloatParam( name='maxReprojectionError', label='Max Reprojection Error', description='Maximum reprojection error.', value=4.0, range=(0.1, 10, 0.1), uid=[0], ), desc.FloatParam( name='minAngleInitialPair', label='Min Angle Initial Pair', description='Minimum angle for the initial pair.', value=5.0, range=(0.1, 10, 0.1), uid=[0], ), desc.FloatParam( name='maxAngleInitialPair', label='Max Angle Initial Pair', description='Maximum angle for the initial pair.', value=40.0, range=(0.1, 60, 0.1), uid=[0], ), desc.BoolParam( name='useOnlyMatchesFromInputFolder', label='Use Only Matches From Input Folder', description='Use only matches from the input matchesFolder parameter.\n' 'Matches folders previously added to the SfMData file will be ignored.', value=False, uid=[], ), desc.File( name='initialPairA', label='Initial Pair A', description='Filename of the first image (without path).', value='', uid=[0], ), desc.File( name='initialPairB', label='Initial Pair B', description='Filename of the second image (without path).', value='', uid=[0], ), desc.ChoiceParam( name='interFileExtension', label='Inter File Extension', description='Extension of the intermediate file export.', value='.abc', values=('.abc', '.ply'), exclusive=True, uid=[], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Output SfMData File', description='Path to the output sfmdata file', value=desc.Node.internalFolder + 'sfm.abc', uid=[], ), desc.File( name='outputViewsAndPoses', label='Output SfMData File', description='''Path to the output sfmdata file with cameras (views and poses).''', value=desc.Node.internalFolder + 'cameras.sfm', uid=[], ), desc.File( name='extraInfoFolder', label='Output Folder', description='Folder for intermediate reconstruction files and additional reconstruction information files.', value=desc.Node.internalFolder, uid=[], ), ] @staticmethod def getViewsAndPoses(node): """ Parse SfM result and return views and poses as two dict with viewId and poseId as keys. """ reportFile = node.outputViewsAndPoses.value if not os.path.exists(reportFile): return {}, {} with open(reportFile) as jsonFile: report = json.load(jsonFile) views = dict() poses = dict() for view in report['views']: views[view['viewId']] = view for pose in report['poses']: poses[pose['poseId']] = pose['pose'] return views, poses
class Meshing(desc.CommandLineNode): commandLine = 'aliceVision_meshing {allParams}' cpu = desc.Level.INTENSIVE ram = desc.Level.INTENSIVE inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.File( name='imagesFolder', label='Images Folder', description= 'Use images from a specific folder. Filename should be the image uid.', value='', uid=[0], ), desc.File( name="depthMapFolder", label='Depth Maps Folder', description='Input depth maps folder', value='', uid=[0], ), desc.File( name="depthMapFilterFolder", label='Filtered Depth Maps Folder', description='Input filtered depth maps folder', value='', uid=[0], ), desc.BoolParam( name='estimateSpaceFromSfM', label='Estimate Space From SfM', description='Estimate the 3d space from the SfM', value=True, uid=[0], ), desc.IntParam( name='estimateSpaceMinObservations', label='Min Observations For SfM Space Estimation', description= 'Minimum number of observations for SfM space estimation.', value=3, range=(0, 100, 1), uid=[0], ), desc.FloatParam( name='estimateSpaceMinObservationAngle', label='Min Observations Angle For SfM Space Estimation', description= 'Minimum angle between two observations for SfM space estimation.', value=0.2, range=(0, 10, 0.1), uid=[0], ), desc.IntParam( name='maxInputPoints', label='Max Input Points', description='Max input points loaded from depth map images.', value=50000000, range=(500000, 500000000, 1000), uid=[0], ), desc.IntParam( name='maxPoints', label='Max Points', description='Max points at the end of the depth maps fusion.', value=5000000, range=(100000, 10000000, 1000), uid=[0], ), desc.IntParam( name='maxPointsPerVoxel', label='Max Points Per Voxel', description='Max points per voxel', value=1000000, range=(500000, 30000000, 1000), uid=[0], ), desc.IntParam( name='minStep', label='Min Step', description= 'The step used to load depth values from depth maps is computed from maxInputPts. ' 'Here we define the minimal value for this step, so on small datasets we will not spend ' 'too much time at the beginning loading all depth values.', value=2, range=(1, 20, 1), uid=[0], ), desc.ChoiceParam( name='partitioning', label='Partitioning', description='', value='singleBlock', values=('singleBlock', 'auto'), exclusive=True, uid=[0], ), desc.ChoiceParam( name='repartition', label='Repartition', description='', value='multiResolution', values=('multiResolution', 'regularGrid'), exclusive=True, uid=[0], ), desc.FloatParam( name='angleFactor', label='angleFactor', description='angleFactor', value=15.0, range=(0.0, 200.0, 1.0), uid=[0], ), desc.FloatParam( name='simFactor', label='simFactor', description='simFactor', value=15.0, range=(0.0, 200.0, 1.0), uid=[0], ), desc.FloatParam( name='pixSizeMarginInitCoef', label='pixSizeMarginInitCoef', description='pixSizeMarginInitCoef', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.FloatParam( name='pixSizeMarginFinalCoef', label='pixSizeMarginFinalCoef', description='pixSizeMarginFinalCoef', value=4.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.FloatParam( name='voteMarginFactor', label='voteMarginFactor', description='voteMarginFactor', value=4.0, range=(0.1, 10.0, 0.1), uid=[0], ), desc.FloatParam( name='contributeMarginFactor', label='contributeMarginFactor', description='contributeMarginFactor', value=2.0, range=(0.0, 10.0, 0.1), uid=[0], ), desc.FloatParam( name='simGaussianSizeInit', label='simGaussianSizeInit', description='simGaussianSizeInit', value=10.0, range=(0.0, 50.0, 0.1), uid=[0], ), desc.FloatParam( name='simGaussianSize', label='simGaussianSize', description='simGaussianSize', value=10.0, range=(0.0, 50.0, 0.1), uid=[0], ), desc.FloatParam( name='minAngleThreshold', label='minAngleThreshold', description='minAngleThreshold', value=1.0, range=(0.0, 10.0, 0.01), uid=[0], ), desc.BoolParam( name='refineFuse', label='Refine Fuse', description= 'Refine depth map fusion with the new pixels size defined by angle and similarity scores.', value=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name="output", label="Output mesh", description="Output mesh (OBJ file format).", value="{cache}/{nodeType}/{uid0}/mesh.obj", uid=[], ), desc.File( name="outputDenseReconstruction", label="Output reconstruction", description="Output dense reconstruction (BIN file format).", value="{cache}/{nodeType}/{uid0}/denseReconstruction.bin", uid=[], group="", ), ]
class PanoramaWarping(desc.CommandLineNode): commandLine = 'aliceVision_panoramaWarping {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=5) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' documentation = ''' Compute the image warping for each input image in the panorama coordinate system. ''' inputs = [ desc.File( name='input', label='Input', description="SfM Data File", value='', uid=[0], ), desc.BoolParam( name='estimateResolution', label='Estimate Resolution', description='Estimate output panorama resolution automatically based on the input images resolution.', value=True, uid=[0], group=None, # skip group from command line ), desc.IntParam( name='panoramaWidth', label='Panorama Width', description='Choose the output panorama width (in pixels).', value=10000, range=(0, 50000, 1000), uid=[0], enabled=lambda node: (not node.estimateResolution.value), ), desc.IntParam( name='percentUpscale', label='Upscale Ratio', description='Percentage of upscaled pixels.\n' '\n' 'How many percent of the pixels will be upscaled (compared to its original resolution):\n' ' * 0: all pixels will be downscaled\n' ' * 50: on average the input resolution is kept (optimal to reduce over/under-sampling)\n' ' * 100: all pixels will be upscaled\n', value=50, range=(0, 100, 1), enabled=lambda node: (node.estimateResolution.value), uid=[0] ), desc.IntParam( name='maxPanoramaWidth', label='Max Panorama Width', description='Choose the maximal output panorama width (in pixels). Zero means no limit.', value=35000, range=(0, 100000, 1000), uid=[0], enabled=lambda node: (node.estimateResolution.value), ), desc.ChoiceParam( name='storageDataType', label='Storage Data Type', description='Storage image data type:\n' ' * float: Use full floating point (32 bits per channel)\n' ' * half: Use half float (16 bits per channel)\n' ' * halfFinite: Use half float, but clamp values to avoid non-finite values\n' ' * auto: Use half float if all values can fit, else use full float\n', value='float', values=['float', 'half', 'halfFinite', 'auto'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='Verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output directory', description='', value=desc.Node.internalFolder, uid=[], ), ]
class MeshDenoising(desc.CommandLineNode): commandLine = 'aliceVision_meshDenoising {allParams}' documentation = ''' This experimental node allows to reduce noise from a Mesh. for now, the parameters are difficult to control and vary a lot from one dataset to another. ''' inputs = [ desc.File( name='input', label='Input', description='''Input Mesh (OBJ file format).''', value='', uid=[0], ), desc.IntParam( name='denoisingIterations', label='Denoising Iterations', description='''Number of denoising iterations.''', value=5, range=(0, 30, 1), uid=[0], ), desc.FloatParam( name='meshUpdateClosenessWeight', label='Mesh Update Closeness Weight', description= '''Closeness weight for mesh update, must be positive.''', value=0.001, range=(0.0, 0.1, 0.001), uid=[0], ), desc.FloatParam( name='lambda', label='Lambda', description='''Regularization weight.''', value=2.0, range=(0.0, 10.0, 0.01), uid=[0], ), desc.FloatParam( name='eta', label='Eta', description='Gaussian standard deviation for spatial weight, ' 'scaled by the average distance between adjacent face centroids.\n' 'Must be positive.', value=1.5, range=(0.0, 20.0, 0.01), uid=[0], ), desc.FloatParam( name='mu', label='Mu', description='''Gaussian standard deviation for guidance weight.''', value=1.5, range=(0.0, 10.0, 0.01), uid=[0], ), desc.FloatParam( name='nu', label='Nu', description='''Gaussian standard deviation for signal weight.''', value=0.3, range=(0.0, 5.0, 0.01), uid=[0], ), desc.ChoiceParam( name='meshUpdateMethod', label='Mesh Update Method', description='Mesh Update Method\n' ' * ITERATIVE_UPDATE (default): ShapeUp styled iterative solver \n' ' * POISSON_UPDATE: Poisson-based update from [Wang et al. 2015] "Rolling guidance normal filter for geometric processing"', value=0, values=(0, 1), exclusive=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= '''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='output', label='Output', description='''Output mesh (OBJ file format).''', value=desc.Node.internalFolder + 'mesh.obj', uid=[], ), ]
class MeshFiltering(desc.CommandLineNode): commandLine = 'aliceVision_meshFiltering {allParams}' inputs = [ desc.File( name='inputMesh', label='Input Mesh', description='''Input Mesh (OBJ file format).''', value='', uid=[0], ), desc.FloatParam( name='removeLargeTrianglesFactor', label='Filter Large Triangles Factor', description='Remove all large triangles. We consider a triangle as large if one edge is bigger than N times the average edge length. Put zero to disable it.', value=60.0, range=(1.0, 100.0, 0.1), uid=[0], ), desc.BoolParam( name='keepLargestMeshOnly', label='Keep Only the Largest Mesh', description='Keep only the largest connected triangles group.', value=False, uid=[0], ), desc.IntParam( name='iterations', label='Smoothing Iterations', description='Number of smoothing iterations', value=5, range=(0, 50, 1), uid=[0], ), desc.FloatParam( name='lambda', label='Lambda', description='', value=1.0, range=(0.0, 10.0, 0.1), uid=[0], advanced=True, ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description='''verbosity level (fatal, error, warning, info, debug, trace).''', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ), ] outputs = [ desc.File( name='outputMesh', label='Output Mesh', description='''Output mesh (OBJ file format).''', value=desc.Node.internalFolder + 'mesh.obj', uid=[], ), ]
import tempfile import os import copy import pytest import meshroom.core from meshroom.core import desc, registerNodeType, unregisterNodeType from meshroom.core.exception import NodeUpgradeError from meshroom.core.graph import Graph, loadGraph from meshroom.core.node import CompatibilityNode, CompatibilityIssue, Node SampleGroupV1 = [ desc.IntParam(name="a", label="a", description="", value=0, uid=[0], range=None), desc.ListAttribute( name="b", elementDesc=desc.FloatParam(name="p", label="", description="", value=0.0, uid=[0], range=None), label="b", description="", ) ] SampleGroupV2 = [ desc.IntParam(name="a", label="a", description="", value=0, uid=[0], range=None), desc.ListAttribute( name="b", elementDesc=desc.GroupAttribute(name="p", label="", description="", groupDesc=SampleGroupV1), label="b", description="",
class FeatureMatching(desc.CommandLineNode): commandLine = 'aliceVision_featureMatching {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=20) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.ListAttribute( elementDesc=desc.File( name="featuresFolder", label="Features Folder", description="", value="", uid=[0], ), name="featuresFolders", label="Features Folders", description= "Folder(s) containing the extracted features and descriptors."), desc.File( name='imagePairsList', label='Image Pairs List', description= 'Path to a file which contains the list of image pairs to match.', value='', uid=[0], ), desc.ChoiceParam( name='describerTypes', label='Describer Types', description='Describer types used to describe an image.', value=['sift'], values=[ 'sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv' ], exclusive=False, uid=[0], joinChar=',', ), desc.ChoiceParam( name='photometricMatchingMethod', label='Photometric Matching Method', description='For Scalar based regions descriptor\n' ' * BRUTE_FORCE_L2: L2 BruteForce matching\n' ' * ANN_L2: L2 Approximate Nearest Neighbor matching\n' ' * CASCADE_HASHING_L2: L2 Cascade Hashing matching\n' ' * FAST_CASCADE_HASHING_L2: L2 Cascade Hashing with precomputed hashed regions (faster than CASCADE_HASHING_L2 but use more memory) \n' 'For Binary based descriptor\n' ' * BRUTE_FORCE_HAMMING: BruteForce Hamming matching', value='ANN_L2', values=('BRUTE_FORCE_L2', 'ANN_L2', 'CASCADE_HASHING_L2', 'FAST_CASCADE_HASHING_L2', 'BRUTE_FORCE_HAMMING'), exclusive=True, uid=[0], ), desc.ChoiceParam( name='geometricEstimator', label='Geometric Estimator', description= 'Geometric estimator: (acransac: A-Contrario Ransac, loransac: LO-Ransac (only available for "fundamental_matrix" model)', value='acransac', values=['acransac', 'loransac'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='geometricFilterType', label='Geometric Filter Type', description= 'Geometric validation method to filter features matches: \n' ' * fundamental_matrix\n' ' * essential_matrix\n' ' * homography_matrix\n' ' * homography_growing\n' ' * no_filtering', value='fundamental_matrix', values=[ 'fundamental_matrix', 'essential_matrix', 'homography_matrix', 'homography_growing', 'no_filtering' ], exclusive=True, uid=[0], ), desc.FloatParam( name='distanceRatio', label='Distance Ratio', description='Distance ratio to discard non meaningful matches.', value=0.8, range=(0.0, 1.0, 0.01), uid=[0], ), desc.IntParam( name='maxIteration', label='Max Iteration', description='Maximum number of iterations allowed in ransac step.', value=2048, range=(1, 20000, 1), uid=[0], ), desc.IntParam( name='maxMatches', label='Max Matches', description='Maximum number of matches to keep.', value=0, range=(0, 10000, 1), uid=[0], ), desc.BoolParam( name='savePutativeMatches', label='Save Putative Matches', description='putative matches.', value=False, uid=[0], ), desc.BoolParam( name='guidedMatching', label='Guided Matching', description= 'the found model to improve the pairwise correspondences.', value=False, uid=[0], ), desc.BoolParam( name='exportDebugFiles', label='Export Debug Files', description='debug files (svg, dot).', value=False, uid=[], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= 'verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='output', label='Output Folder', description= 'Path to a folder in which computed matches will be stored.', value=desc.Node.internalFolder, uid=[], ), ]
class LdrToHdrMerge(desc.CommandLineNode): commandLine = 'aliceVision_LdrToHdrMerge {allParams}' size = desc.DynamicNodeSize('input') parallelization = desc.Parallelization(blockSize=2) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' documentation = ''' Calibrate LDR to HDR response curve from samples ''' inputs = [ desc.File( name='input', label='Input', description='SfMData file.', value='', uid=[0], ), desc.File( name='response', label='Response file', description='Response file', value='', uid=[0], ), desc.IntParam( name='userNbBrackets', label='Number of Brackets', description= 'Number of exposure brackets per HDR image (0 for automatic detection).', value=0, range=(0, 15, 1), uid=[], group='user', # not used directly on the command line ), desc.IntParam( name='nbBrackets', label='Automatic Nb Brackets', description= 'Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".', value=0, range=(0, 10, 1), uid=[0], ), desc.IntParam( name='offsetRefBracketIndex', label='Offset Ref Bracket Index', description= 'Zero to use the center bracket. +N to use a more exposed bracket or -N to use a less exposed backet.', value=1, range=(-4, 4, 1), uid=[0], enabled=lambda node: node.nbBrackets.value != 1, ), desc.BoolParam( name='byPass', label='Bypass', description= "Bypass HDR creation and use the medium bracket as the source for the next steps.", value=False, uid=[0], enabled=lambda node: node.nbBrackets.value != 1, ), desc.ChoiceParam( name='fusionWeight', label='Fusion Weight', description="Weight function used to fuse all LDR images together:\n" " * gaussian \n" " * triangle \n" " * plateau", value='gaussian', values=['gaussian', 'triangle', 'plateau'], exclusive=True, uid=[0], enabled=lambda node: node.byPass.enabled and not node.byPass.value, ), desc.IntParam( name='channelQuantizationPower', label='Channel Quantization Power', description='Quantization level like 8 bits or 10 bits.', value=10, range=(8, 14, 1), uid=[0], advanced=True, enabled=lambda node: node.byPass.enabled and not node.byPass.value, ), desc.FloatParam( name='highlightCorrectionFactor', label='Highlights Correction', description= 'Pixels saturated in all input images have a partial information about their real luminance.\n' 'We only know that the value should be >= to the standard hdr fusion.\n' 'This parameter allows to perform a post-processing step to put saturated pixels to a constant\n' 'value defined by the `highlightsMaxLuminance` parameter.\n' 'This parameter is float to enable to weight this correction.', value=1.0, range=(0.0, 1.0, 0.01), uid=[0], enabled=lambda node: node.byPass.enabled and not node.byPass.value, ), desc.FloatParam( name='highlightTargetLux', label='Highlight Target Luminance (Lux)', description= 'This is an arbitrary target value (in Lux) used to replace the unknown luminance value of the saturated pixels.\n' '\n' 'Some Outdoor Reference Light Levels:\n' ' * 120,000 lux: Brightest sunlight\n' ' * 110,000 lux: Bright sunlight\n' ' * 20,000 lux: Shade illuminated by entire clear blue sky, midday\n' ' * 1,000 lux: Typical overcast day, midday\n' ' * 400 lux: Sunrise or sunset on a clear day\n' ' * 40 lux: Fully overcast, sunset/sunrise\n' '\n' 'Some Indoor Reference Light Levels:\n' ' * 20000 lux: Max Usually Used Indoor\n' ' * 750 lux: Supermarkets\n' ' * 500 lux: Office Work\n' ' * 150 lux: Home\n', value=120000.0, range=(1000.0, 150000.0, 1.0), uid=[0], enabled=lambda node: node.byPass.enabled and not node.byPass.value and node.highlightCorrectionFactor.value != 0, ), desc.ChoiceParam( name='storageDataType', label='Storage Data Type', description='Storage image data type:\n' ' * float: Use full floating point (32 bits per channel)\n' ' * half: Use half float (16 bits per channel)\n' ' * halfFinite: Use half float, but clamp values to avoid non-finite values\n' ' * auto: Use half float if all values can fit, else use full float\n', value='float', values=['float', 'half', 'halfFinite', 'auto'], exclusive=True, uid=[0], ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', description= 'verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], ) ] outputs = [ desc.File( name='outSfMData', label='Output SfMData File', description='Path to the output sfmdata file', value=desc.Node.internalFolder + 'sfmData.sfm', uid=[], ) ] @classmethod def update(cls, node): if not isinstance(node.nodeDesc, cls): raise ValueError("Node {} is not an instance of type {}".format( node, cls)) # TODO: use Node version for this test if 'userNbBrackets' not in node.getAttributes().keys(): # Old version of the node return if node.userNbBrackets.value != 0: node.nbBrackets.value = node.userNbBrackets.value return # logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion)) cameraInitOutput = node.input.getLinkParam(recursive=True) if not cameraInitOutput: node.nbBrackets.value = 0 return if not cameraInitOutput.node.hasAttribute('viewpoints'): if cameraInitOutput.node.hasAttribute('input'): cameraInitOutput = cameraInitOutput.node.input.getLinkParam( recursive=True) if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute( 'viewpoints'): viewpoints = cameraInitOutput.node.viewpoints.value else: # No connected CameraInit node.nbBrackets.value = 0 return # logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints))) inputs = [] for viewpoint in viewpoints: jsonMetadata = viewpoint.metadata.value if not jsonMetadata: # no metadata, we cannot found the number of brackets node.nbBrackets.value = 0 return d = json.loads(jsonMetadata) fnumber = findMetadata( d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "") shutterSpeed = findMetadata(d, [ "Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed" ], "") iso = findMetadata( d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "") if not fnumber and not shutterSpeed: # If one image without shutter or fnumber, we cannot found the number of brackets. # We assume that there is no multi-bracketing, so nothing to do. node.nbBrackets.value = 1 return inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso))) inputs.sort() exposureGroups = [] exposures = [] for path, exp in inputs: if exposures and exp != exposures[-1] and exp == exposures[0]: exposureGroups.append(exposures) exposures = [exp] else: exposures.append(exp) exposureGroups.append(exposures) exposures = None bracketSizes = set() if len(exposureGroups) == 1: if len(set(exposureGroups[0])) == 1: # Single exposure and multiple views node.nbBrackets.value = 1 else: # Single view and multiple exposures node.nbBrackets.value = len(exposureGroups[0]) else: for expGroup in exposureGroups: bracketSizes.add(len(expGroup)) if len(bracketSizes) == 1: node.nbBrackets.value = bracketSizes.pop() # logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value)) else: node.nbBrackets.value = 0