def estimate_rotation_matrices(chunk, i, j): groups = copy.copy(chunk.camera_groups) groups.append(None) for group in groups: group_cameras = list(filter(lambda c: c.group == group, chunk.cameras)) if len(group_cameras) == 0: continue if len(group_cameras) == 1: if group_cameras[0].reference.rotation is None: group_cameras[0].reference.rotation = ps.Vector([0, 0, 0]) continue for idx, c in enumerate(group_cameras[0:-1]): next_camera = group_cameras[idx + 1] if c.reference.rotation is None: if c.reference.location is None or next_camera.reference.location is None: continue direction = delta_vector_to_chunk( c.reference.location, next_camera.reference.location) cos_yaw = direction * j yaw = math.degrees( math.acos(cos_yaw)) + 90 # TODO not sure about this offset if direction * i > 0: yaw = -yaw c.reference.rotation = ps.Vector([yaw, 0, 0]) group_cameras[-1].reference.rotation = group_cameras[ -2].reference.rotation
def bbox_to_cs(): print("Script started...") doc = PhotoScan.app.document chunk = doc.chunk T = chunk.transform.matrix v_t = T.mulp(PhotoScan.Vector([0, 0, 0])) if chunk.crs: m = chunk.crs.localframe(v_t) else: m = PhotoScan.Matrix().Diag([1, 1, 1, 1]) m = m * T s = math.sqrt(m[0, 0] ** 2 + m[0, 1] ** 2 + m[0, 2] ** 2) # scale factor # s = m.scale() R = PhotoScan.Matrix([[m[0, 0], m[0, 1], m[0, 2]], [m[1, 0], m[1, 1], m[1, 2]], [m[2, 0], m[2, 1], m[2, 2]]]) # R = m.rotation() R = R * (1. / s) reg = chunk.region reg.rot = R.t() chunk.region = reg print("Script finished!")
def getSVGObject(self): photo = I3_Photo() photo.add_point(p1) photo.add_point(p2) photo.label = "test_Label" p_bottom_left = PhotoScan.Vector((0, 2000)) p_bottom_right = PhotoScan.Vector((2000, 2000)) p_upper_left = PhotoScan.Vector((0, 0)) p_upper_right = PhotoScan.Vector((2000, 0)) photo.add_point(I3_Point(measurement_I=p_bottom_left, projection_I=p_bottom_left)) photo.add_point(I3_Point(measurement_I=p_bottom_right, projection_I=PhotoScan.Vector((2001, 2001)))) photo.add_point(I3_Point(measurement_I=p_upper_left, projection_I=p_upper_left)) photo.add_point(I3_Point(measurement_I=p_upper_right, projection_I=p_upper_right)) class psSensor(): height = 2000 width = 2000 class psCamera(): sensor = psSensor() cam_dummy = psCamera() photo.photoScan_camera = cam_dummy return SVG_Photo_Representation([photo], 700)
def test_create_ellipsoid_stl(self): adju = Peseudo_3D_intersection_adjustment() m = PhotoScan.Matrix([[504, 360, 180], [360, 360, 0], [180, 0, 720]]) m = PhotoScan.Matrix([[24.66697238419596, 11.102022651894911, 29.082023223173206], [11.10202265189491, 10.052229488742526, 14.941828405336427], [29.082023223173206, 14.941828405336427, 42.78791682803554]]) eig_valu, eig_vec = adju._get_eigen_vel_vec(m) stl_handler = STL_Handler() stl_handler.importSTL() stl_handler.importSTL("sp_exp_for_test.stl") ellipsoid_stl = "solid OpenSCAD_Model\n" ellipsoid_stl += stl_handler.create_ellipsoid_stl(eig_vec, eig_valu, [10, 0, 0], 1, False) # print(ellipsoid_stl) self.assertEqual('vertex 11.997 0.635 -1.716', ellipsoid_stl.splitlines()[3]) ellipsoid_stl += "endsolid OpenSCAD_Model" path = os.path.dirname(os.path.realpath(__file__)) f = open(path + '\\stl_ell.stl', 'w') f.write(ellipsoid_stl) f.close()
def setup_camera(self): # Imported camera coordinates projection self.chunk.crs = self.WGS_84 # Accuracy for camera position in m self.chunk.camera_location_accuracy = PhotoScan.Vector([1, 1, 1]) # Accuracy for camera orientations in degree self.chunk.camera_rotation_accuracy = PhotoScan.Vector([1, 1, 1])
def camera_calibration(chunk, path_projekt): path = path_projekt + 'kalibrierung.xml' s = chunk.sensors[0] s.user_calib = PhotoScan.Calibration() c = PhotoScan.Calibration() c.load(path) s.user_calib = c s.fixed = True
def getPhotoforRasterTest(cls): photo = I3_Photo() photo.add_point(p1) photo.add_point(p2) photo.label = "test_Label" p_bottom_left = PhotoScan.Vector((0, 2000)) p_bottom_right = PhotoScan.Vector((2000, 2000)) p_upper_left = PhotoScan.Vector((0, 0)) p_upper_right = PhotoScan.Vector((2000, 0)) photo.add_point(I3_Point(measurement_I=p_bottom_left, projection_I=PhotoScan.Vector((-1, 2001)))) photo.add_point(I3_Point(measurement_I=p_bottom_right, projection_I=PhotoScan.Vector((2001, 2001)))) photo.add_point(I3_Point(measurement_I=p_upper_left, projection_I=PhotoScan.Vector((-1, -1)))) photo.add_point(I3_Point(measurement_I=p_upper_right, projection_I=PhotoScan.Vector((2001, -1)))) class psSensor(): height = 2000 width = 1990 class psCamera(): sensor = psSensor() cam_dummy = psCamera() photo.photoScan_camera = cam_dummy return photo
def make_project(project_dir, chunk_dirs): 'Make a new project and a chunk for each directory.' log("Making project " + project_dir) # Create new doc doc = PhotoScan.Document( ) # Operate on a new document for batch proecssing #doc = PhotoScan.app.document # Use the current open document in PhotoScan # Go through each chunk directory for chunk_dir in chunk_dirs: chunk = doc.addChunk() # Create the chunk chunk.label = os.path.basename(chunk_dir) photos = os.listdir(chunk_dir) # Get the photos filenames photos = [os.path.join(chunk_dir, p) for p in photos] # Make them into a full path log("Found {} photos in {}".format(len(photos), chunk_dir)) if not chunk.addPhotos(photos): log("ERROR: Failed to add photos: " + str(photos)) # Save the new project project_name = make_project_filename(project_dir, "psz") log("Saving: " + project_name) if not doc.save(project_name): log("ERROR: Failed to save project: " + project_name) return doc
def project_features(camera, points, features): """ Project feature values from 3D points onto an image using the camera matrix. Requires PhotoScan library. Returns: projected_features - an array of (image_height, image_width, nfeatures) of feature values corresponding to pixels in the image """ import PhotoScan image_height = int(camera.meta['File/ImageHeight']) image_width = int(camera.meta['File/ImageWidth']) _, nfeatures = features.shape projected_features = np.zeros_like((image_height, image_width)) for i, P in enumerate(points): P = PhotoScan.Vector(tuple(P)) x, y = camera.project(P) x_in_image = x >= 0 and x < image_width y_in_image = y >= 0 and y < image_height if x_in_image and y_in_image: projected_features[y, x] = features[i] return projected_features.reshape((image_height, image_width, nfeatures))
def alignphotos(): print("*** Started...Align Photos *** ", datetime.datetime.utcnow()) coord_system = PhotoScan.CoordinateSystem('EPSG::4326') chunk.crs = coord_system #chunk.matchPhotos(accuracy=config['accuracy'], preselection=PhotoScan.Preselection.GenericPreselection, filter_mask=False, keypoint_limit=40000, tiepoint_limit=10000) chunk.matchPhotos(accuracy=PhotoScan.Accuracy.LowestAccuracy, preselection=PhotoScan.Preselection.GenericPreselection, filter_mask=False, keypoint_limit=40000, tiepoint_limit=10000) chunk.alignCameras() if os.path.exists(marker_file) == True: print("marker file exist!") chunk.importMarkers(marker_file) if os.path.exists(reference_file) == True: print("reference file exist!") chunk.loadReference(reference_file, "csv", delimiter=';') chunk.optimizeCameras() for camera in chunk.cameras: camera.reference.enabled = False chunk.updateTransform() print("*** Finished - Align Photos ***")
def transform_chunck(): text = input_field.get("1.0", tk.END) try: # text = '1.000000 0.000000 0.000000 0.000000\n0.000000 1.000000 0.000000 0.000000\n0.000000 0.000000 1.000000 0.000000\n0.000000 0.000000 0.000000 1.000000\n' lines = text.splitlines() lines = list(filter(lambda x: len(x) > 2, lines)) print("Do Transform") matrix_list = [] if len(lines) != 4: print("unvalid number of rows") raise for line in lines: line_value = line.split() if len(line_value) != 4: print("unvalid number of columns", line_value) raise line_value = list(map(lambda x: float(x), line_value)) matrix_list.append(line_value) trafo_matrix = PhotoScan.Matrix(matrix_list) PhotoScan.app.document.chunk.transform.matrix = trafo_matrix print(PhotoScan.app.document.chunk.transform.matrix) label.config(text='transformation succesful!') except Exception as e: print(e) label.config(text='The Matrix is not valid.\n' ' Please use a 4x4 Matrix with blanks as seperator')
def export_ortho(): global path_to_project print(path_to_project['ProjectPath']) export_filename = os.path.basename(path_to_project['ProjectPath']).replace( '.psz', '.tif') export_path = os.path.join(export_folder, export_filename) try: project = PhotoScan.app.document project.open(path_to_project['ProjectPath']) dx, dy = mosaic.get_resolution(path_to_project['Flight_id'], path_to_project['Field'], path_to_project['Camera']) if dx is not None and dy is not None: status = project.activeChunk.exportOrthophoto( export_path, format="tif", color_correction=False, blending='average', dx=dx, dy=dy, projection=project.activeChunk.projection) else: status = project.activeChunk.exportOrthophoto( export_path, format="tif", color_correction=False, blending='average', projection=project.activeChunk.projection) if status is True: app = PhotoScan.Application() app.quit() except Exception as e: print(e) return
def reset_view(self, magnification=80): doc.chunk = self.chunk chunk = self.chunk T = chunk.transform.matrix viewpoint = PhotoScan.app.viewpoint cx = viewpoint.width cy = viewpoint.height region = chunk.region r_center = region.center r_rotate = region.rot r_size = region.size r_vert = list() for i in range(8): # bounding box corners r_vert.append( PhotoScan.Vector([ 0.5 * r_size[0] * ((i & 2) - 1), r_size[1] * ((i & 1) - 0.5), 0.25 * r_size[2] * ((i & 4) - 2) ])) r_vert[i] = r_center + r_rotate * r_vert[i] height = T.mulv(r_vert[1] - r_vert[0]).norm() width = T.mulv(r_vert[2] - r_vert[0]).norm() if width / cx > height / cy: scale = cx / width else: scale = cy / height PhotoScan.app.viewpoint.coo = T.mulp(chunk.region.center) PhotoScan.app.viewpoint.mag = magnification
def make_project(project_dir, photos_dir): 'Make a new project and add the photos from the photos_dir to it.' log("Making project " + project_dir) doc = PhotoScan.Document( ) # Operate on a new document for batch proecssing #doc = PhotoScan.app.document # Use the current open document in PhotoScan # Add the photos to a chunk chunk = doc.addChunk() photos_dir = os.path.join(project_dir, photos_dir) photos = os.listdir(photos_dir) photos = [os.path.join(photos_dir, p) for p in photos] log("Found {} photos in {}".format(len(photos), photos_dir)) if not chunk.addPhotos(photos): log("ERROR: Failed to add photos: " + str(photos)) # Save the new project project_name = make_project_filename(project_dir, "psz") log("Saving: " + project_name) if not doc.save(project_name): log("ERROR: Failed to save project: " + project_name) return doc
def main(): doc = PhotoScan.app.document chunk = doc.chunk T0 = chunk.transform.matrix region = chunk.region R0 = region.rot C0 = region.center s0 = region.size for chunk in doc.chunks: if chunk == doc.chunk: continue T = chunk.transform.matrix.inv() * T0 R = PhotoScan.Matrix( [[T[0,0],T[0,1],T[0,2]], [T[1,0],T[1,1],T[1,2]], [T[2,0],T[2,1],T[2,2]]]) scale = R.row(0).norm() R = R * (1/scale) region.rot = R * R0 c = T.mulp(C0) region.center = c region.size = s0 * scale / 1. chunk.region = region print("Script finished. Bounding box copied.\n")
def open_project(project_dir): log("Opening project " + project_dir) project_name = make_project_filename(project_dir, "psz") doc = PhotoScan.Document() if not doc.open(project_name): log("ERROR: Cold not open document: " + project_name) return doc
def create_new_project(self): if not os.path.exists(path=self.project_file_name + self.PROJECT_TYPE): new_project = PhotoScan.Document() chunk = new_project.addChunk() new_project.save( path=self.project_file_name + self.PROJECT_TYPE, chunks=[chunk] )
def rotY(angle): sinAngle = sin(angle) cosAngle = cos(angle) mat = PhotoScan.Matrix([[cosAngle, 0., sinAngle, 0.], [0., 1., 0., 0.], [-sinAngle, 0., cosAngle, 0], [0., 0., 0., 1.]]) # print("matY " + str(mat)) return mat
def add_network_tasks_to_queue(chunks, tasks): """Adds network tasks to queue.""" network_tasks = [] network_client = PhotoScan.NetworkClient() network_client.connect(SERVER_IP) for task_parameters in tasks: new_network_task = PhotoScan.NetworkTask() for chunk in chunks: new_network_task.frames.append((chunk.key, 0)) new_network_task.name = task_parameters['name'] for key in task_parameters: if key != 'name': new_network_task.params[key] = task_parameters[key] network_tasks.append(new_network_task) network_save = PhotoScan.app.document.path.replace(SHARED_ROOT, '') batch_id = network_client.createBatch(network_save, network_tasks) network_client.resumeBatch(batch_id)
def setCoordinateSystem(chunk, doc): ''' Установить систему координат :return: ''' chunk.crs = PhotoScan.CoordinateSystem(CK="EPSG::4326") chunk.updateTransform() doc.save()
class TestMyProject(unittest.TestCase): pho1 = I3_Photo() pho1.sigma_I = PhotoScan.Vector((2, 13)) pho2 = I3_Photo() pho2.sigma_I = PhotoScan.Vector((5, 11)) project = I3_Project() project.photos = [pho1, pho2] def test_calcGlobalSigma(self): rms_x, rms_y = self.project._get_RMS_4_all_photos() self.assertAlmostEqual(rms_x, 3.807886553, 6) self.assertAlmostEqual(rms_y, 12.04159458, 6) def test_createProjectSVG(self): pass
def export_model(self): """ Export LAS and OBJ to path using the file name prefix #Texture file format is JPG for JPG input images and TIF for all others. """ for _ in self.chunks: #Texture JPG for now as Cloudcompare doesn't like the TIFF format ext = PhotoScan.ImageFormatJPEG """ext = _.cameras[0].label[-3:] if ext.upper() == 'JPG': ext = PhotoScan.ImageFormatJPEG else: ext = PhotoScan.ImageFormatTIFF""" if self.exp_crs == 0: crs = _.crs else: crs = PhotoScan.CoordinateSystem('EPSG::{}' .format(self.exp_crs)) #write log information with open(self.log, 'a+') as logfile: start_t = datetime.now() logfile.write('Exporting model {} at {} \n'. format(_, start_t.strftime('%H:%M:%S'))) logfile.write(' Export CRS: {}\n'. format(crs)) #create export file name if str(_)[8:-4] == 'Chunk': name = '' num = str(_)[-3] else: name = str(_)[8:-2] num = '' try: file = '{}{}_LAS{}.las'.format(self.prefix, name, num) _.exportPoints(path = os.path.join(self.path, file), format=PhotoScan.PointsFormatLAS, projection=crs) except RuntimeError as e: if str(e) == 'Null point cloud': print('There is no point cloud to export in chunk: {}' .format(_)) continue else: raise try: file = '{}{}_OBJ{}.obj'.format(self.prefix, name, num) _.exportModel(path = os.path.join(self.path, file), texture_format=ext, projection=crs) except RuntimeError as e: if str(e) == 'Null model': print('There is no model to export in chunk: {}'.format(_)) continue else: raise
def photoscan_alignphotos(images, reference_eo, sequence): start_time = time.time() doc = PhotoScan.app.document chunk = doc.addChunk() chunk.addPhotos(images) for i in range(len(chunk.cameras)): chunk.cameras[i].reference.location = (float(reference_eo[6 * i]), float(reference_eo[6 * i + 1]), float(reference_eo[6 * i + 2])) chunk.cameras[i].reference.rotation = (float(reference_eo[6 * i + 5]), float(reference_eo[6 * i + 4]), float(reference_eo[6 * i + 3])) chunk.camera_location_accuracy = PhotoScan.Vector([0.001, 0.001, 0.001]) chunk.camera_rotation_accuracy = PhotoScan.Vector([0.01, 0.01, 0.01]) # chunk.cameras[-1].reference.location_accuracy = PhotoScan.Vector([10, 10, 10]) # chunk.cameras[-1].reference.rotation_accuracy = PhotoScan.Vector([10, 10, 10]) chunk.cameras[-1].reference.accuracy = PhotoScan.Vector([10, 10, 10]) chunk.cameras[-1].reference.accuracy_ypr = PhotoScan.Vector([10, 10, 10]) chunk.matchPhotos(accuracy=PhotoScan.MediumAccuracy) chunk.alignCameras() # doc.save("test_" + str(int(sequence)+1) + ".psz") camera = chunk.cameras[-1] if not camera.transform: print("There is no transformation matrix") estimated_coord = chunk.crs.project( chunk.transform.matrix.mulp(camera.center)) # estimated XYZ in coordinate system units T = chunk.transform.matrix m = chunk.crs.localframe( T.mulp(camera.center)) # transformation matrix to the LSE coordinates in the given point R = (m * T * camera.transform * PhotoScan.Matrix().Diag([1, -1, -1, 1])).rotation() estimated_ypr = PhotoScan.utils.mat2ypr(R) # estimated orientation angles - yaw, pitch, roll estimated_opk = PhotoScan.utils.mat2opk(R) # estimated orientation angles - omega, phi, kappa print(estimated_coord[0]) print(estimated_coord[1]) print(estimated_coord[2]) print(estimated_ypr[0]) print(estimated_ypr[1]) print(estimated_ypr[2]) print(estimated_opk[0]) print(estimated_opk[1]) print(estimated_opk[2])
def main(): #prompting for path to photos path_photos = PhotoScan.app.getExistingDirectory("Specify INPUT photo folder(containing all metashape files):") path_export = PhotoScan.app.getExistingDirectory("Specify EXPORT folder:") #processing parameters accuracy = PhotoScan.Accuracy.HighAccuracy #align photos accuracy preselection = PhotoScan.Preselection.GenericPreselection keypoints = 40000 #align photos key point limit tiepoints = 10000 #align photos tie point limit threshold=0.5 fold_list = os.listdir(path_photos) for folder in fold_list: #print("folder name is : "+folder) #loading images folderPath = path_photos + "\\" + folder image_list = os.listdir(folderPath) photo_list = list() for photo in image_list: if ("jpg" or "jpeg" or "JPG" or "JPEG") in photo.lower(): photo_list.append(os.path.join(folderPath,photo)) doc = PhotoScan.Document() doc.save(path_export+"\\"+folder+".psx") chunk=doc.addChunk() chunk.addPhotos(photo_list) #align photos chunk.matchPhotos(accuracy = accuracy, preselection = preselection, filter_mask = False, keypoint_limit = keypoints, tiepoint_limit = tiepoints) chunk.alignCameras() #Removing points outside bounding box chunk = doc.chunks[-1] R = chunk.region.rot #Bounding box rotation matrix C = chunk.region.center #Bounding box center vertor size = chunk.region.size if not (chunk.point_cloud and chunk.enabled): continue elif not len(chunk.point_cloud.points): continue for point in chunk.point_cloud.points: if point.valid: v = point.coord v.size = 3 v_c = v - C v_r = R.t() * v_c if abs(v_r.x) > abs(size.x / 2.): point.valid = False elif abs(v_r.y) > abs(size.y / 2.): point.valid = False elif abs(v_r.z) > abs(size.z / 2.): point.valid = False else: continue #Points outside the region were removed. #Read reprojection error and delete any 0.5 or greater f = PhotoScan.PointCloud.Filter() f.init(chunk, criterion=PhotoScan.PointCloud.Filter.ReprojectionError) f.removePoints(threshold) doc.save()
def runNetwork(project_path, argstring, tname='RunScript', PSscript='scripts/reef3D/PyToolbox/PSmodel.py'): '''' Run a task over the network ''' client = PhotoScan.NetworkClient() task1 = PhotoScan.NetworkTask() task1.name = tname task1.params['path'] = PSscript #path to the script to be executed task1.params[ 'args'] = argstring #string of the arguments with space as separator client.connect('agisoft-qmgr.aims.gov.au') #server ip batch_id = client.createBatch(proj_path, [task1]) client.resumeBatch(batch_id) print("Job started...")
def alignphotos(): print("*** Started...Align Photos *** ", datetime.datetime.utcnow()) coord_system = PhotoScan.CoordinateSystem('EPSG::4326') chunk.crs = coord_system chunk.matchPhotos(accuracy=PhotoScan.Accuracy.LowestAccuracy, preselection=PhotoScan.Preselection.GenericPreselection, filter_mask=False, keypoint_limit=4000, tiepoint_limit=500) chunk.alignCameras() chunk.optimizeCameras() doc.save(doc_name) PhotoScan.app.update() print("*** Finished - Align Photos ***")
def dbg_test1(): point2d = PhotoScan.Vector([1448, 1186]) sensor = camera.sensor calibration = sensor.calibration p_x = camera.transform.mulp(sensor.calibration.unproject(point2d)) print('p_x', p_x) print('camera center', camera.center) X = chunk.dense_cloud.pickPoint(camera.center, p_x) print('X', X) chunk.addMarker(point=X)
def center_bbox_xyz(): """Centers bounding box to XYZ center.""" chunk = PhotoScan.app.document.chunk transform_matrix = chunk.transform.matrix if chunk.crs: vect_tm = transform_matrix * PhotoScan.Vector([0, 0, 0, 1]) vect_tm.size = 3 locfrm = chunk.crs.localframe(vect_tm) else: locfrm = PhotoScan.Matrix().diag([1, 1, 1, 1]) locfrm = locfrm * transform_matrix sqrt = math.sqrt(locfrm[0, 0]**2 + locfrm[0, 1]**2 + locfrm[0, 2]**2) mat = PhotoScan.Matrix([[locfrm[0, 0], locfrm[0, 1], locfrm[0, 2]], [locfrm[1, 0], locfrm[1, 1], locfrm[1, 2]], [locfrm[2, 0], locfrm[2, 1], locfrm[2, 2]]]) mat = mat * (1. / sqrt) reg = chunk.region reg.rot = mat.t() chunk.region = reg
def get_photos_delta(chunk): mid_idx = int(len(chunk.cameras) / 2) if mid_idx == 0: return ps.Vector([0, 0, 0]) c1 = chunk.cameras[:mid_idx][-1] c2 = chunk.cameras[:mid_idx][-2] offset = c1.reference.location - c2.reference.location for i in range(len(offset)): offset[i] = math.fabs(offset[i]) return offset
def scale_cams(chunk, camdict, thd=[0.5, 0.7], lstring='_LC', d=0.4): ''' thd: Min and Max overlaping desired to select camera pairs for scalebars lstring: unique string that identify the name of the cameras as the ones on the left-hand side d: Distance between the centre of the lens from each camera ''' overlap = [] cams = {'right': [], 'left': []} chunk.decimateModel(100000) ## this is to minimise memory load for cam in chunk.cameras: if cam.transform: if cam.label.__contains__(camdict['lstring']): cams['left'].append(np.r_[cam.key, np.asarray(cam.center)]) else: cams['right'].append(np.r_[cam.key, np.asarray(cam.center)]) if chunk.crs == None: crs = PhotoScan.CoordinateSystem( 'LOCAL_CS["Local CS",LOCAL_DATUM["Local Datum",0],UNIT["metre",1]]' ) chunk.crs = crs #Find the closed camera pair and check that is a camera pair based on overlaping to set the scalebars for l in cams['left'][5::10]: lcam = l[1:] lcam_index = np.int(l[0]) rcams = np.asarray(cams['right']) rcams = rcams[:, 1:] rcam_index = np.int(cams['right'][closest_pair(lcam, rcams)][0]) scalebar = chunk.addScalebar(chunk.cameras[lcam_index], chunk.cameras[rcam_index]) scalebar.label = chunk.cameras[ lcam_index].label + " - " + chunk.cameras[rcam_index].label scalebar.reference.distance = camdict['cam_dist'] PhotoScan.app.update() chunk.updateTransform() #fine-tune scalebars based on image-pair overlapping. #Remove those scalebars where images are not overlapping enough or too much. #This avoid false pairs try: thisIOI = co.IOI(lcam_index, rcam_index, chunk) overlap = np.r_[overlap, thisIOI] if (thisIOI < camdict['overlap_threshold'][0] or thisIOI > camdict['overlap_threshold'][1]): chunk.remove(scalebar) except Exception as e: overlap = np.r_[ overlap, 0] #most likely, these are cameras which edge falls outside the chunk.remove(scalebar) pass return (overlap)