def get_format(self, obj, mesh_name, matrix_list): xml_format = { 'mesh_name': [mesh_name], # scale == 1.0 because scale data is included in rot matrix 'scale': [1.0], } # Add a base static rotation. xml_format.update(exportutil.getTransform(self.scene, obj, matrix_list[0][1], xml_format='matrix')) if len(matrix_list) > 1: # Remove pos, conflicts with keyframes. del(xml_format['pos']) keyframes = exportutil.matrixListToKeyframes(self.scene, obj, matrix_list) xml_format['keyframe'] = tuple(keyframes) xml_format.update(self.get_additional_elements(obj)) return xml_format
def build_xml_element(self, scene, matrix_list): xml = self.Element('camera') xml_format = { 'aperture_radius': [aperture_radius(scene, self)], 'sensor_width': [scene.camera.data.sensor_width / 1000.0], 'lens_sensor_dist': [lens_sensor_dist(scene, self)], 'aspect_ratio': [aspect_ratio(scene, self)], 'exposure_duration': 'exposure', } if self.whitebalance == 'Custom': xml_format['white_point'] = { 'chromaticity_coordinates': { 'x': [self.whitebalanceX], 'y': [self.whitebalanceY], } } else: xml_format['white_balance'] = 'whitebalance', ws = get_worldscale(scene) if(scene.camera.data.type == 'ORTHO'): xml_format['camera_type'] = ['orthographic'] xml_format['sensor_width'] = [scene.camera.data.ortho_scale * ws] # Blender seems to use 'ortho_scale' for the sensor width. mat = matrix_list[0][1].transposed() xml_format['pos'] = [ i*ws for i in mat[3][0:3]] xml_format['forwards'] = [-i*ws for i in mat[2][0:3]] xml_format['up'] = [ i*ws for i in mat[1][0:3]] if len(matrix_list) > 1: # Remove pos, conflicts with keyframes. del(xml_format['pos']) keyframes = exportutil.matrixListToKeyframes(scene, scene.camera, matrix_list) xml_format['keyframe'] = tuple(keyframes) if self.autofocus: xml_format['autofocus'] = '' # is empty element xml_format['focus_distance'] = [10.0] # any non-zero value will do else: if scene.camera.data.dof_object is not None: xml_format['focus_distance'] = [((scene.camera.location - scene.camera.data.dof_object.location).length*ws)] elif scene.camera.data.dof_distance > 0: xml_format['focus_distance'] = [scene.camera.data.dof_distance*ws] else: #autofocus xml_format['autofocus'] = '' # is empty element xml_format['focus_distance'] = [10.0] # any non-zero value will do if self.ad: xml_format.update({ 'aperture_shape': {} }) if self.ad_obstacle != '': ad_obstacle = efutil.filesystem_path(self.ad_obstacle) if os.path.exists(ad_obstacle): xml_format.update({ 'obstacle_map': { 'path': [efutil.path_relative_to_export(ad_obstacle)] } }) else: indigo_log('WARNING: Camera Obstacle Map specified, but image path is not valid') if self.ad_type == 'image': ad_image = efutil.filesystem_path(self.ad_image) if os.path.exists(ad_image): xml_format['aperture_shape'].update({ 'image': { 'path': [efutil.path_relative_to_export(ad_image)] } }) else: indigo_log('WARNING: Camera Aperture Diffraction type "Image" selected, but image path is not valid') elif self.ad_type == 'generated': xml_format['aperture_shape'].update({ 'generated': { 'num_blades': [self.ad_blades], 'start_angle': [self.ad_angle], 'blade_offset': [self.ad_offset], 'blade_curvature_radius': [self.ad_curvature] } }) elif self.ad_type == 'circular': xml_format['aperture_shape'][self.ad_type] = {} aspect = aspect_ratio(scene, self) if scene.camera.data.shift_x != 0: sx = scene.camera.data.shift_x * 0.001*scene.camera.data.sensor_width if aspect < 1.0: sx /= aspect xml_format['lens_shift_right_distance'] = [sx] if scene.camera.data.shift_y != 0: sy = scene.camera.data.shift_y * 0.001*scene.camera.data.sensor_width if aspect < 1.0: sy /= aspect xml_format['lens_shift_up_distance'] = [sy] self.build_subelements(scene, xml_format, xml) return xml