def get_shader_preset(shader_presets_filepath, template_name): """Returns requested Shader Preset data from preset file. :param shader_presets_filepath: A file path to SCS shader preset file, can be absolute or relative :type shader_presets_filepath: str :param template_name: Preset name :type template_name: str :return: Preset data section :rtype: SectionData """ # print('shader_presets_filepath: %r' % shader_presets_filepath) if shader_presets_filepath.startswith(str(os.sep + os.sep)): # IF RELATIVE PATH, MAKE IT ABSOLUTE shader_presets_filepath = _path.get_abs_path(shader_presets_filepath) preset_section = None if os.path.isfile(shader_presets_filepath): presets_container = _pix_container.get_data_from_file(shader_presets_filepath, ' ') if presets_container: for section in presets_container: if section.type == "Shader": for prop in section.props: if prop[0] == "PresetName": if prop[1] == template_name: # print(' + template name: "%s"' % template_name) preset_section = section break else: lprint('\nW The file path "%s" is not valid!', (shader_presets_filepath,)) return preset_section
def get_shader_presets_container(shader_presets_filepath): """Returns shader presets data continaer from given path. :param shader_presets_filepath: relative or absolute shader presets filepath :type shader_presets_filepath: str :return: data container if file is found; None otherwise :rtype: io_scs_tools.internals.structure.SectionData """ presets_container = None if shader_presets_filepath.startswith( "//"): # IF RELATIVE PATH, MAKE IT ABSOLUTE shader_presets_filepath = _path.get_abs_path(shader_presets_filepath) if os.path.isfile(shader_presets_filepath): presets_container = _pix_container.get_data_from_file( shader_presets_filepath, ' ') else: lprint('\nW The file path "%s" is not valid!', (shader_presets_filepath, )) return presets_container
def update_item_in_file(item_pointer, new_value): """Resaves config file with updated given item to a new value. The "item_pointer" variable must be in form of 'SectionName.PropertyName', example: 'Paths.ProjectPath'.""" if _get_scs_globals().config_update_lock: return False else: filepath = get_config_filepath() ind = ' ' config_container = _pix.get_data_from_file(filepath, ind) new_settings_container = [] if config_container: new_value_changed = False item_pointer_split = item_pointer.split('.', 1) for section in config_container: new_section = _SectionData(section.type) for prop in section.props: if section.type == item_pointer_split[0] and prop[0] == item_pointer_split[1]: new_section.props.append((prop[0], new_value)) new_value_changed = True else: new_section.props.append((prop[0], prop[1])) # append new properties if they are not yet there if not new_value_changed and section.type == item_pointer_split[0]: new_section.props.append((item_pointer_split[1], new_value)) new_settings_container.append(new_section) write_file(new_settings_container, filepath, ind) return True
def get_shader_preset(shader_presets_filepath, template_name): """Returns requested Shader Preset data from preset file. :param shader_presets_filepath: A file path to SCS shader preset file, can be absolute or relative :type shader_presets_filepath: str :param template_name: Preset name :type template_name: str :return: Preset data section :rtype: SectionData """ # print('shader_presets_filepath: %r' % shader_presets_filepath) if shader_presets_filepath.startswith(str(os.sep + os.sep)): # IF RELATIVE PATH, MAKE IT ABSOLUTE shader_presets_filepath = _path.get_abs_path(shader_presets_filepath) preset_section = None if os.path.isfile(shader_presets_filepath): presets_container = _pix_container.get_data_from_file(shader_presets_filepath, ' ') if presets_container: for section in presets_container: if section.type == "Shader": for prop in section.props: if prop[0] == "PresetName": if prop[1] == template_name: # print(' + template name: "%s"' % template_name) preset_section = section break else: lprint('\nW The file path "%s" is not valid!', shader_presets_filepath) return preset_section
def update_item_in_file(item_pointer, new_value): """Resaves config file with updated given item to a new value. The "item_pointer" variable must be in form of 'SectionName.PropertyName', example: 'Paths.ProjectPath'.""" # interrupt if config update is locked if _get_scs_globals().config_update_lock: return False # interrupt if settings storage place is set to blend file (except when config storage place itself is being updated) if _get_scs_globals( ).config_storage_place == "BlendFile" and not item_pointer == "Header.ConfigStoragePlace": return False filepath = get_config_filepath() ind = ' ' config_container = _pix.get_data_from_file(filepath, ind) # if config container is still none, there had to be permission denied # by it's creation, so no sense to try to write it again if config_container is None: return False new_settings_container = [] new_value_changed = False item_pointer_split = item_pointer.split('.', 1) for section in config_container: new_section = _SectionData(section.type) for prop in section.props: if section.type == item_pointer_split[0] and prop[ 0] == item_pointer_split[1]: new_section.props.append((prop[0], new_value)) new_value_changed = True else: new_section.props.append((prop[0], prop[1])) # append new properties if they are not yet there if not new_value_changed and section.type == item_pointer_split[0]: new_section.props.append((item_pointer_split[1], new_value)) new_settings_container.append(new_section) _pix.write_data_to_file(new_settings_container, filepath, ind) return True
def update_item_in_file(item_pointer, new_value): """Resaves config file with updated given item to a new value. The "item_pointer" variable must be in form of 'SectionName.PropertyName', example: 'Paths.ProjectPath'.""" # interrupt if config update is locked if _get_scs_globals().config_update_lock: return False # interrupt if settings storage place is set to blend file (except when config storage place itself is being updated) if _get_scs_globals().config_storage_place == "BlendFile" and not item_pointer == "Header.ConfigStoragePlace": return False filepath = get_config_filepath() ind = ' ' config_container = _pix.get_data_from_file(filepath, ind) # if config container is still none, there had to be permission denied # by it's creation, so no sense to try to write it again if config_container is None: return False new_settings_container = [] new_value_changed = False item_pointer_split = item_pointer.split('.', 1) for section in config_container: new_section = _SectionData(section.type) for prop in section.props: if section.type == item_pointer_split[0] and prop[0] == item_pointer_split[1]: new_section.props.append((prop[0], new_value)) new_value_changed = True else: new_section.props.append((prop[0], prop[1])) # append new properties if they are not yet there if not new_value_changed and section.type == item_pointer_split[0]: new_section.props.append((item_pointer_split[1], new_value)) new_settings_container.append(new_section) _pix.write_data_to_file(new_settings_container, filepath, ind) return True
def update_shader_presets_path(scs_shader_presets_inventory, shader_presets_filepath): """The function deletes and populates again a list of Shader Preset items in inventory. It also updates corresponding record in config file. :param shader_presets_filepath: Absolute or relative path to the file with Shader Presets :type shader_presets_filepath: str """ # print('shader_presets_filepath: %r' % shader_presets_filepath) if shader_presets_filepath.startswith("//"): # RELATIVE PATH shader_presets_abs_path = _path.get_abs_path(shader_presets_filepath) else: shader_presets_abs_path = shader_presets_filepath # CLEAR INVENTORY scs_shader_presets_inventory.clear() if os.path.isfile(shader_presets_abs_path): # ADD DEFAULT PRESET ITEM "<none>" INTO INVENTORY new_shader_preset = scs_shader_presets_inventory.add() new_shader_preset.name = "<none>" presets_container = _pix.get_data_from_file(shader_presets_abs_path, ' ') # ADD ALL PRESET ITEMS FROM FILE INTO INVENTORY if presets_container: for section in presets_container: if section.type == "Shader": for prop in section.props: if prop[0] == "PresetName": preset_name = prop[1] # print(' + preset name: "%s"' % preset_name) new_shader_preset = scs_shader_presets_inventory.add( ) new_shader_preset.name = preset_name else: lprint('\nW The file path "%s" is not valid!', (shader_presets_abs_path, )) update_item_in_file('Paths.ShaderPresetsFilePath', shader_presets_filepath)
def get_shader_presets_container(shader_presets_filepath): """Returns shader presets data continaer from given path. :param shader_presets_filepath: relative or absolute shader presets filepath :type shader_presets_filepath: str :return: data container if file is found; None otherwise :rtype: io_scs_tools.internals.structure.SectionData """ presets_container = None if shader_presets_filepath.startswith("//"): # IF RELATIVE PATH, MAKE IT ABSOLUTE shader_presets_filepath = _path.get_abs_path(shader_presets_filepath) if os.path.isfile(shader_presets_filepath): presets_container = _pix_container.get_data_from_file(shader_presets_filepath, ' ') else: lprint('\nW The file path "%s" is not valid!', (shader_presets_filepath,)) return presets_container
def update_shader_presets_path(scs_shader_presets_inventory, shader_presets_filepath): """The function deletes and populates again a list of Shader Preset items in inventory. It also updates corresponding record in config file. :param shader_presets_filepath: Absolute or relative path to the file with Shader Presets :type shader_presets_filepath: str """ # print('shader_presets_filepath: %r' % shader_presets_filepath) if shader_presets_filepath.startswith("//"): # RELATIVE PATH shader_presets_abs_path = _path.get_abs_path(shader_presets_filepath) else: shader_presets_abs_path = shader_presets_filepath # CLEAR INVENTORY scs_shader_presets_inventory.clear() if os.path.isfile(shader_presets_abs_path): # ADD DEFAULT PRESET ITEM "<none>" INTO INVENTORY new_shader_preset = scs_shader_presets_inventory.add() new_shader_preset.name = "<none>" presets_container = _pix.get_data_from_file(shader_presets_abs_path, ' ') # ADD ALL PRESET ITEMS FROM FILE INTO INVENTORY if presets_container: for section in presets_container: if section.type == "Shader": for prop in section.props: if prop[0] == "PresetName": preset_name = prop[1] # print(' + preset name: "%s"' % preset_name) new_shader_preset = scs_shader_presets_inventory.add() new_shader_preset.name = preset_name else: lprint('\nW The file path "%s" is not valid!', (shader_presets_abs_path,)) update_item_in_file('Paths.ShaderPresetsFilePath', shader_presets_filepath)
def load(filepath): scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIC Importer **") print("** (c)2014 SCS Software **") print("************************************\n") ind = ' ' pic_container = _pix_container.get_data_from_file(filepath, ind) # TEST PRINTOUTS # ind = ' ' # for section in pic_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pic_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pic_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed format_version, source, f_type, f_name, source_filename, author = _get_header(pic_container) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed vertex_count, triangle_count, material_count, piece_count, part_count, locator_count = _get_global(pic_container) ''' # LOAD MATERIALS if 0: # NOTE: NO MATERIALS USED FOR COLLIDERS AT A MOMENT! loaded_materials = [] for section in pic_container: if section.type == 'Material': material_alias, material_effect = _get_material(section) lprint('I Adding a Material Alias: "%s"', material_alias) loaded_materials.append(material_alias) # PRINT "MATERIAL SETTINGS" TO CONSOLE... if 0: import pprint pp = pprint.PrettyPrinter(indent=1) print("=== MATERIAL SETTINGS ==========================") pp.pprint(material_effect) print("==================================================") # LOAD PARTS parts = [] for section in pic_container: if section.type == "Part": (name, pieces, locators) = _get_part(section) parts.append({ "name": name, "pieces": pieces, "locators": locators }) # LOAD (CONVEX) PIECES pieces = [] for section in pic_container: if section.type == 'Piece': pieces.append(_get_piece(section)) # LOAD AND CREATE LOCATORS import_scale = scs_globals.import_scale locators = [] for section in pic_container: if section.type == 'Locator': (locator_name, locator_index, locator_position, locator_rotation, locator_alias, locator_weight, locator_type, locator_parameters, locator_convex_piece) = _get_locator(section) lprint('I Adding a Locator: "%s"', locator_name) locator = _object_utils.create_locator_empty( locator_name, locator_position, locator_rotation, (1, 1, 1), 1.0, 'Collision') locator.scs_props.scs_part = _get_locator_part( parts, locator_index) locator.scs_props.locator_collider_centered = True locator.scs_props.locator_collider_mass = locator_weight locator.scs_props.locator_collider_type = locator_type if locator_type == 'Box': locator.scs_props.locator_collider_box_x = locator_parameters[ 0] * import_scale locator.scs_props.locator_collider_box_y = locator_parameters[ 2] * import_scale locator.scs_props.locator_collider_box_z = locator_parameters[ 1] * import_scale elif locator_type in ('Sphere', 'Capsule', 'Cylinder'): locator.scs_props.locator_collider_dia = locator_parameters[ 0] * 2 * import_scale locator.scs_props.locator_collider_len = locator_parameters[ 1] * import_scale elif locator_type == 'Convex': piece_index, piece_material, verts, faces = pieces[ locator_convex_piece] if verts and faces: # BOUNDING BOX DATA CREATION AND SPACE CONVERSION min_val = [None, None, None] max_val = [None, None, None] scs_verts = [] for vert in verts: scs_vert = _convert_utils.change_to_scs_xyz_coordinates( vert, import_scale) scs_verts.append(scs_vert) min_val, max_val = _math_utils.evaluate_minmax( scs_vert, min_val, max_val) bbox, bbcenter = _math_utils.get_bb(min_val, max_val) # FACE FLIPPING flipped_faces = _mesh_utils.flip_faceverts(faces) # COLLIDER CREATION geom_data = (scs_verts, flipped_faces, bbox, bbcenter) _object_utils.add_collider_convex_locator( geom_data, {}, locator) locators.append(locator) # DATA BUILDING # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') print("************************************") return {'FINISHED'}, locators
def load(filepath): """Enty point for importing PIT file :param filepath: filepath of PIT file :type filepath: str """ print("\n************************************") print("** SCS PIT Importer **") print("** (c)2014 SCS Software **") print("************************************\n") ind = ' ' pit_container = _pix_container.get_data_from_file(filepath, ind) # TEST PRINTOUTS # ind = ' ' # for section in pit_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pit_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pit_container, export_filepath, ind, dump_level) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed (format_version, source, f_type, f_name, source_filename, author) = _get_header(pit_container, dump_level) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed (look_count, variant_count, part_count, material_count) = _get_global(pit_container, dump_level) ''' # LOAD LOOKS AND VARIANTS loaded_looks = [] loaded_variants = [] for section in pit_container: if section.type == 'Look': look_name, look_mat_settings = _get_look(section) look_record = (look_name, look_mat_settings) loaded_looks.append(look_record) elif section.type == 'Variant': variant_name, variantparts = _get_variant(section) variant_record = (variant_name, variantparts) # variant_record = (getVariant(section)) loaded_variants.append(variant_record) # loaded_variants.append((getVariant(section))) print("************************************") return {'FINISHED'}, loaded_variants, loaded_looks
def export(root_object, used_parts, used_materials, scene, filepath): scs_globals = _get_scs_globals() output_type = scs_globals.output_type file_name = root_object.name print("\n************************************") print("** SCS PIT Exporter **") print("** (c)2014 SCS Software **") print("************************************\n") # DATA GATHERING look_list = [] variant_list = [] saved_active_look = root_object.scs_props.active_scs_look looks_inventory = root_object.scs_object_look_inventory looks_count = len(looks_inventory) if looks_count <= 0: looks_count = 1 for i in range(0, looks_count): # apply each look from inventory first if len(looks_inventory) > 0: root_object.scs_props.active_scs_look = i # actually write values to material because Blender might not refresh data yet _looks.apply_active_look(root_object) curr_look_name = looks_inventory[i].name else: # if no looks create default curr_look_name = "default" material_dict = {} material_list = [] # get materials data for material in used_materials: if material is not None: # if material in ("_empty_slot_", "_empty_material_"): # NOTE: only workaround until module doesn't gets rewritten if material in bpy.data.materials: material = bpy.data.materials[material] if isinstance(material, str): material_name = str(material + "-_default_settings_") # DEFAULT MATERIAL material_export_data = _default_material(material_name) material_list.append(material_name) else: # print('material name: %r' % material.name) material_name = material.name material_list.append(material) # SUBSTANCE if material.scs_props.substance != 'None': lprint('D material.name: %r\tmat.scs_props.substance: "%s"', (material.name, str(material.scs_props.substance))) # TODO: Substance Export... # MATERIAL EFFECT # shader_data = material.get("scs_shader_attributes", {}) # effect_name = shader_data.get('effect', "NO EFFECT") effect_name = material.scs_props.mat_effect_name # CgFX SHADERS # print("\n=== GET SHADER EXPORT DATA =======================") ## NOTE: The following code is OBSOLETE!!! # cgfx_export_data = None # print(" cgfx_export_data:\n%s" % str(cgfx_export_data)) # if cgfx_export_data: # print("\nAttributes:") # for attribute in cgfx_export_data['attributes']: # if cgfx_export_data['attributes'][attribute]: # print(" %s:" % str(attribute)) # for rec in cgfx_export_data['attributes'][attribute]: # print(" %s: %s" % (str(rec), str(cgfx_export_data['attributes'][attribute][rec]))) # else: # print("%s:\n %s" % (str(attribute), cgfx_export_data['attributes'][attribute])) # print("\nTextures:") # for attribute in cgfx_export_data['textures']: # if cgfx_export_data['textures'][attribute]: # print(" %s:" % str(attribute)) # for rec in cgfx_export_data['textures'][attribute]: # print(" %s: %s" % (str(rec), str(cgfx_export_data['textures'][attribute][rec]))) # else: # print("%s:\n %s" % (str(attribute), cgfx_export_data['textures'][attribute])) # else: # Print(1, 'E No CgFX data for material %r!' % material.name) # print("==================================================") # PRESET SHADERS preset_found = False alias = "NO SHADER" def_cnt = attribute_cnt = texture_cnt = 0 def_sections = [] attribute_sections = [] texture_sections = [] active_shader_preset_name = material.scs_props.active_shader_preset_name # print(' active_shader_preset_name: %r' % active_shader_preset_name) for preset_i, preset in enumerate(scene.scs_shader_presets_inventory): # print(' preset[%i]: %r' % (preset_i, preset.name)) if preset.name == active_shader_preset_name: # print(' - material %r - %r' % (material.name, preset.name)) # LOAD PRESET shader_presets_abs_path = _path_utils.get_abs_path(scs_globals.shader_presets_filepath) # shader_presets_filepath = _get_scs_globals().shader_presets_filepath # print('shader_presets_filepath: %r' % shader_presets_filepath) # if shader_presets_filepath.startswith(str(os.sep + os.sep)): ## RELATIVE PATH # shader_presets_abs_path = get_abs_path(shader_presets_filepath) # else: # shader_presets_abs_path = shader_presets_filepath if os.path.isfile(shader_presets_abs_path): presets_container = _pix_container.get_data_from_file(shader_presets_abs_path, ' ') # FIND THE PRESET IN FILE if presets_container: for section in presets_container: if section.type == "Shader": section_properties = _get_properties(section) if 'PresetName' in section_properties: preset_name = section_properties['PresetName'] if preset_name == active_shader_preset_name: alias = material.name # print(' + preset name: %r' % preset_name) # COLLECT ATTRIBUTES AND TEXTURES for item in section.sections: # DATA EXCHANGE FORMAT ATRIBUTE if item.type == "DataExchangeFormat": def_data = _SectionData("DataExchangeFormat") for rec in item.props: def_data.props.append((rec[0], rec[1])) def_sections.append(def_data) def_cnt += 1 # ATTRIBUTES if item.type == "Attribute": # print(' Attribute:') attribute_data = _SectionData("Attribute") for rec in item.props: # print(' rec: %r' % str(rec)) if rec[0] == "Format": attribute_data.props.append((rec[0], rec[1])) elif rec[0] == "Tag": # tag_prop = rec[1].replace("[", "").replace("]", "") # attribute_data.props.append((rec[0], tag_prop)) attribute_data.props.append((rec[0], rec[1])) elif rec[0] == "Value": format_prop = item.get_prop("Format")[1] tag_prop = item.get_prop("Tag")[1] tag_prop = tag_prop.replace("[", "").replace("]", "") # print(' format_prop: %r' % str(format_prop)) # print(' tag_prop: %r' % str(tag_prop)) if "aux" in tag_prop: aux_props = getattr(material.scs_props, "shader_attribute_" + tag_prop) value = [] for aux_prop in aux_props: value.append(aux_prop.value) else: value = getattr(material.scs_props, "shader_attribute_" + tag_prop, "NO TAG") # print(' value: %s' % str(value)) if format_prop == 'FLOAT': attribute_data.props.append((rec[0], ["&&", (value,)])) else: attribute_data.props.append((rec[0], ["i", tuple(value)])) attribute_sections.append(attribute_data) attribute_cnt += 1 # TEXTURES elif item.type == "Texture": # print(' Texture:') texture_data = _SectionData("Texture") for rec in item.props: # print(' rec: %r' % str(rec)) if rec[0] == "Tag": tag_prop = rec[1].split(":")[1] tag = str("texture[" + str(texture_cnt) + "]:" + tag_prop) texture_data.props.append((rec[0], tag)) elif rec[0] == "Value": tag_prop = item.get_prop("Tag")[1].split(":")[1] # print(' tag_prop: %r' % str(tag_prop)) # create and get path to tobj tobj_rel_path = _get_texture_path_from_material(material, tag_prop, os.path.dirname(filepath)) texture_data.props.append((rec[0], tobj_rel_path)) texture_sections.append(texture_data) texture_cnt += 1 preset_found = True break else: lprint('\nW The file path "%s" is not valid!', (shader_presets_abs_path,)) if preset_found: break if preset_found: material_export_data = _SectionData("Material") material_export_data.props.append(("Alias", alias)) material_export_data.props.append(("Effect", effect_name)) material_export_data.props.append(("Flags", 0)) if output_type.startswith('def'): material_export_data.props.append(("DataExchangeFormatCount", def_cnt)) material_export_data.props.append(("AttributeCount", attribute_cnt)) material_export_data.props.append(("TextureCount", texture_cnt)) if output_type.startswith('def'): for def_section in def_sections: material_export_data.sections.append(def_section) for attribute in attribute_sections: material_export_data.sections.append(attribute) for texture in texture_sections: material_export_data.sections.append(texture) elif active_shader_preset_name == "<imported>": material_attributes = material['scs_shader_attributes']['attributes'].to_dict().values() material_textures = material['scs_shader_attributes']['textures'].to_dict().values() material_export_data = _SectionData("Material") material_export_data.props.append(("Alias", material.name)) material_export_data.props.append(("Effect", effect_name)) material_export_data.props.append(("Flags", 0)) material_export_data.props.append(("AttributeCount", len(material_attributes))) material_export_data.props.append(("TextureCount", len(material_textures))) for attribute_dict in material_attributes: attribute_section = _SectionData("Attribute") format_value = "" for attr_prop in sorted(attribute_dict.keys()): # get the format of current attribute (we assume that "Format" attribute is before "Value" attribute in this for loop) if attr_prop == "Format": format_value = attribute_dict[attr_prop] if attr_prop == "Value" and ("FLOAT" in format_value or "STRING" in format_value): attribute_section.props.append((attr_prop, ["i", tuple(attribute_dict[attr_prop])])) elif attr_prop == "Tag" and "aux" in attribute_dict[attr_prop]: attribute_section.props.append((attr_prop, "aux[" + attribute_dict[attr_prop][3:] + "]")) else: attribute_section.props.append((attr_prop, attribute_dict[attr_prop])) material_export_data.sections.append(attribute_section) for texture_dict in material_textures: texture_section = _SectionData("Texture") tag_id_string = "" for tex_prop in sorted(texture_dict.keys()): if tex_prop == "Tag": tag_id_string = texture_dict[tex_prop].split(':')[1] if tex_prop == "Value" and tag_id_string != "": tobj_rel_path = _get_texture_path_from_material(material, tag_id_string, os.path.dirname(filepath)) texture_section.props.append((tex_prop, tobj_rel_path)) else: texture_section.props.append((tex_prop, texture_dict[tex_prop])) material_export_data.sections.append(texture_section) else: # DEFAULT MATERIAL material_name = str("_" + material_name + "_-_default_settings_") material_export_data = _default_material(material_name) material_dict[material_name] = material_export_data # create materials sections for looks material_sections = _fill_material_sections(material_list, material_dict) look_data = { "name": curr_look_name, "material_sections": material_sections } look_list.append(look_data) # restore look applied before export root_object.scs_props.active_scs_look = saved_active_look # PARTS AND VARIANTS... part_list_cnt = len(used_parts.keys()) if len(root_object.scs_object_variant_inventory) == 0: # If there is no Variant, add the Default one... part_list = _fill_part_list(root_object.scs_object_part_inventory, used_parts, all_parts=True) variant_list.append((_VARIANT_consts.default_name, part_list), ) else: for variant in root_object.scs_object_variant_inventory: part_list = _fill_part_list(variant.parts, used_parts) variant_list.append((variant.name, part_list), ) # DATA CREATION header_section = _fill_header_section(file_name, scs_globals.sign_export) look_section = _fill_look_sections(look_list) # part_sections = fill_part_section(part_list) variant_section = _fill_variant_sections(variant_list) comment_header_section = _fill_comment_header_section(look_list, variant_list) global_section = _fill_global_section(len(look_list), len(variant_list), part_list_cnt, len(used_materials)) # DATA ASSEMBLING pit_container = [comment_header_section, header_section, global_section] for section in look_section: pit_container.append(section) for section in variant_section: pit_container.append(section) # FILE EXPORT ind = " " pit_filepath = str(filepath + ".pit") result = _pix_container.write_data_to_file(pit_container, pit_filepath, ind) # print("************************************") return result
def load(filepath, armature, get_only=False): scs_globals = _get_scs_globals() import_scale = scs_globals.import_scale bone_import_scale = scs_globals.bone_import_scale connected_bones = scs_globals.connected_bones print("\n************************************") print("** SCS PIS Importer **") print("** (c)2014 SCS Software **") print("************************************\n") # scene = context.scene ind = ' ' pis_container = _pix_container.get_data_from_file(filepath, ind) # TEST PRINTOUTS # ind = ' ' # for section in pis_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pis_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pis_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed format_version, source, f_type, f_name, source_filename, author = _get_header(pis_container) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed # bone_count = _get_global(pis_container) ''' # LOAD BONES bones = _get_bones(pis_container) if get_only: # only return bones (used when importing PIA from panel) return bones # PROVIDE AN ARMATURE if not armature: lprint('\nE No Armature for file "%s"!', (os.path.basename(filepath),)) return {'CANCELLED'}, None bpy.context.scene.objects.active = armature bpy.ops.object.mode_set(mode='EDIT') # CONNECTED BONES - Add information about all children... if connected_bones: for bone in bones: # print(' bone: %r - %r\n%s\n' % (bone, bones[bone][0], str(bones[bone][1]))) children = [] for item in bones: if bone == bones[item][0]: children.append(item) bones[bone].append(children) # print(' bone: %r - %r\n%s\n' % (bone, bones[bone][0], str(bones[bone][2]))) for bone_i, bone in enumerate(armature.data.bones): # print('----- bone: %r ------------------------------' % bone.name) # SET PARENT if bones[bone.name][0] != "": # if bone has parent... # print(' %r --> %r' % (bone.name, bones[bone.name][0])) # armature.data.edit_bones[bone.name].use_connect = False armature.data.edit_bones[bone.name].parent = armature.data.edit_bones[bones[bone.name][0]] # else: # print(' %r - NO parent' % bone.name) # COMPUTE BONE TRANSFORMATION matrix = bones[bone.name][1] bone_matrix = _convert_utils.scs_to_blend_matrix() * matrix.transposed() axis, angle = _convert_utils.mat3_to_vec_roll(bone_matrix) # print(' * %r - angle: %s' % (bone.name, angle)) # SET BONE TRANSFORMATION armature.data.edit_bones[bone.name].head = bone_matrix.to_translation().to_3d() * import_scale armature.data.edit_bones[bone.name].tail = (armature.data.edit_bones[bone.name].head + Vector(axis).normalized() * bone_import_scale * import_scale) armature.data.edit_bones[bone.name].roll = angle # CONNECTED BONES # NOTE: Doesn't work as expected! Disabled for now in UI. # Child bones gets position offset and there is also a problem when translation # is animated, for which connected bones doesn't allow. if connected_bones: if len(bones[bone.name][2]) == 1: matrix = bones[bones[bone.name][2][0]][1] bone_matrix = _convert_utils.scs_to_blend_matrix() * matrix.transposed() armature.data.edit_bones[bone.name].tail = bone_matrix.to_translation().to_3d() * import_scale armature.data.edit_bones[bones[bone.name][2][0]].use_connect = True bpy.ops.object.mode_set(mode='OBJECT') armature.data.show_axes = True armature.draw_type = 'WIRE' # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') print("************************************") return bones
def load(root_object, pia_files, armature, pis_filepath=None, bones=None): scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIA Importer **") print("** (c)2014 SCS Software **") print("************************************\n") import_scale = scs_globals.import_scale ind = ' ' imported_count = 0 for pia_filepath in pia_files: # Check if PIA file is for the actual skeleton... if pis_filepath and bones: skeleton_match = _pix_container.fast_check_for_pia_skeleton( pia_filepath, pis_filepath) else: skeleton_match, pia_skeleton = _pix_container.utter_check_for_pia_skeleton( pia_filepath, armature) if skeleton_match: path = os.path.split(pia_filepath)[0] pia_skeleton = os.path.join(path, pia_skeleton) if os.path.isfile(pia_skeleton): bones = _pis.load(pia_skeleton, armature, get_only=True) else: lprint("\nE The filepath %r doesn't exist!", (_path_utils.readable_norm(pia_skeleton), )) else: lprint( str("E Animation doesn't match the skeleton. Animation won't be loaded!\n\t " "Animation file: %r"), (pia_filepath, )) if skeleton_match: lprint('I ++ "%s" IMPORTING animation data...', (os.path.basename(pia_filepath), )) pia_container = _pix_container.get_data_from_file( pia_filepath, ind) if not pia_container: lprint('\nE File "%s" is empty!', (_path_utils.readable_norm(pia_filepath), )) continue # TEST PRINTOUTS # ind = ' ' # for section in pia_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pia_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(pia_filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pia_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER format_version, source, f_type, animation_name, source_filename, author = _get_header( pia_container) if format_version != 3 or f_type != "Animation": continue # LOAD GLOBALS skeleton, total_time, bone_channel_count, custom_channel_count = _get_globals( pia_container) # CREATE ANIMATION ACTIONS anim_action = bpy.data.actions.new(animation_name + "_action") anim_action.use_fake_user = True anim_data = armature.animation_data if armature.animation_data else armature.animation_data_create( ) anim_data.action = anim_action # LOAD BONE CHANNELS bone_channels = _get_anim_channels(pia_container, section_name="BoneChannel") if len(bone_channels) > 0: for bone_name in bone_channels: if bone_name in armature.data.bones: ''' NOTE: skipped for now as no data needs to be readed stream_count = bone_channels[bone_name][0] keyframe_count = bone_channels[bone_name][1] ''' streams = bone_channels[bone_name][2] # CREATE ANIMATION GROUP anim_group = anim_action.groups.new(bone_name) armature.pose.bones[ bone_name].rotation_mode = 'XYZ' # Set rotation mode. # use pose bone scale set on PIS import init_scale = Vector((1, 1, 1)) if _BONE_consts.init_scale_key in armature.pose.bones[ bone_name]: init_scale = armature.pose.bones[bone_name][ _BONE_consts.init_scale_key] # CREATE FCURVES (pos_fcurves, rot_fcurves, sca_fcurves) = _create_fcurves(anim_action, anim_group, str('pose.bones["' + bone_name + '"]'), rot_euler=True) # GET BONE REST POSITION MATRIX bone_rest_matrix_scs = bones[bone_name][1].transposed() parent_bone_name = bones[bone_name][0] if parent_bone_name in bones: parent_bone_rest_matrix_scs = bones[ parent_bone_name][1].transposed() else: parent_bone_rest_matrix_scs = Matrix() parent_bone_rest_matrix_scs.identity() for key_time_i, key_time in enumerate(streams[0]): keyframe = key_time_i + 1 # GET BONE ANIMATION MATRIX bone_animation_matrix_scs = streams[1][ key_time_i].transposed() # CREATE DELTA MATRIX delta_matrix = _get_delta_matrix( bone_rest_matrix_scs, parent_bone_rest_matrix_scs, bone_animation_matrix_scs, import_scale) # DECOMPOSE ANIMATION MATRIX location, rotation, scale = delta_matrix.decompose( ) # CALCULATE CURRENT SCALE - subtract difference between initial bone scale and current scale from 1 # NOTE: if imported PIS had initial bone scale different than 1, # initial scale was saved into pose bones custom properties and # has to be used here as bones after import in Blender always have scale of 1 scale = Vector((1 + scale[0] - init_scale[0], 1 + scale[1] - init_scale[1], 1 + scale[2] - init_scale[2])) # NOTE: this scaling rotation switch came from UK variants which had scale -1 loc, rot, sca = bone_rest_matrix_scs.decompose() if sca.y < 0: rotation.y *= -1 if sca.z < 0: rotation.z *= -1 rotation = rotation.to_euler('XYZ') # BUILD TRANSFORMATION CURVES for i in range(0, 3): pos_fcurves[i].keyframe_points.insert( frame=float(keyframe), value=location[i], options={'FAST'}) rot_fcurves[i].keyframe_points.insert( frame=float(keyframe), value=rotation[i], options={'FAST'}) sca_fcurves[i].keyframe_points.insert( frame=float(keyframe), value=scale[i], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES color_mode = 'AUTO_RAINBOW' # Or better 'AUTO_RGB'? for curve in pos_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in sca_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: _animation_utils.apply_euler_filter(curve) # LOAD CUSTOM CHANNELS (ARMATURE OFFSET ANIMATION) custom_channels = _get_anim_channels(pia_container, section_name="CustomChannel") if len(custom_channels) > 0: for channel_name in custom_channels: # print(' >>> channel %r - %s' % (channel_name, str(custom_channels[channel_name]))) if channel_name == 'Prism Movement': ''' NOTE: skipped for now as no data needs to be readed stream_count = custom_channels[channel_name][0] keyframe_count = custom_channels[channel_name][1] ''' streams = custom_channels[channel_name][2] # print(' channel %r - streams %s - keyframes %s' % (channel_name, stream_count, keyframe_count)) # CREATE ANIMATION GROUP # anim_group = anim_action.groups.new(channel_name) anim_group = anim_action.groups.new('Location') # armature.[channel_name].rotation_mode = 'XYZ' ## Set rotation mode. # active_bone = armature.data.bones[channel_name] # parent_bone = active_bone.parent # CREATE FCURVES # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, rot_euler=True, # types='LocRotSca') # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, types='Loc') fcurve_pos_x = anim_action.fcurves.new('location', 0) fcurve_pos_y = anim_action.fcurves.new('location', 1) fcurve_pos_z = anim_action.fcurves.new('location', 2) fcurve_pos_x.group = anim_group fcurve_pos_y.group = anim_group fcurve_pos_z.group = anim_group pos_fcurves = (fcurve_pos_x, fcurve_pos_y, fcurve_pos_z) location = None for key_time_i, key_time in enumerate(streams[0]): # print(' key_time: %s' % str(key_time[0])) # keyframe = key_time_i * (key_time[0] * 10) ## TODO: Do proper timing... keyframe = key_time_i + 1 scs_offset = _convert_utils.change_to_scs_xyz_coordinates( custom_channels[channel_name][2][1] [key_time_i], import_scale) offset = Vector(scs_offset) if location is None: location = offset else: location = location + offset # print(' > location: %s' % str(location)) # BUILD TRANSLATION CURVES pos_fcurves[0].keyframe_points.insert( frame=float(keyframe), value=location[0], options={'FAST'}) pos_fcurves[1].keyframe_points.insert( frame=float(keyframe), value=location[1], options={'FAST'}) pos_fcurves[2].keyframe_points.insert( frame=float(keyframe), value=location[2], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES for curve in pos_fcurves: for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' else: lprint('W Unknown channel %r in "%s" file.', (channel_name, os.path.basename(pia_filepath))) # CREATE SCS ANIMATION animation = _animation_utils.add_animation_to_root( root_object, animation_name) animation.export = True animation.action = anim_action.name animation.anim_start = anim_action.frame_range[0] animation.anim_end = anim_action.frame_range[1] if total_time: animation.length = total_time # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') imported_count += 1 else: lprint('I "%s" file REJECTED', (os.path.basename(pia_filepath), )) # at the end of batch import make sure to select last animation always if imported_count > 0: root_object.scs_props.active_scs_animation = len( root_object.scs_object_animation_inventory) - 1 print("************************************") return imported_count
def load_pim_file(context, filepath, terrain_points_trans=None, preview_model=False): """Loads the actual PIM file type. This is used also for loading of 'Preview Models'. :param filepath: File path to be imported :type filepath: str :param preview_model: Load geomety as Preview Model :type preview_model: bool :param terrain_points_trans: transitional structure with filled terrain points from PIP; or None :type terrain_points_trans: io_scs_tools.imp.transition_structs.terrain_points.TerrainPntsTrans | None :return: ({'FINISHED'}, objects, skinned_objects, locators, armature, skeleton) or preview model object :rtype: tuple | bpy.types.Object """ # create empty terrain points transitional structure if none is passed if terrain_points_trans is None: terrain_points_trans = TerrainPntsTrans() scs_globals = _get_scs_globals() lprint("I Reading data from PIM file...") ind = ' ' pim_container = _pix_container.get_data_from_file(filepath, ind) lprint("I Assembling data...") # LOAD HEADER format_version, source, f_type, f_name, source_filename, author = get_header( pim_container) if format_version not in (5, ): lprint( '\nE Unknown PIM file version! Version %r is not currently supported by PIM importer.', format_version) return {'CANCELLED'}, None, None, [], None, None # LOAD GLOBALS (vertex_count, face_count, edge_count, material_count, piece_count, part_count, bone_count, locator_count, skeleton, piece_skin_count) = get_global(pim_container) # DATA LOADING materials_data = {} objects_data = {} parts_data = {} locators_data = {} bones = {} skin_streams = [] piece_skin_data = {} for section in pim_container: if section.type == 'Material': if scs_globals.import_pim_file: material_i, materials_alias, materials_effect = get_material_properties( section) # print('\nmaterials_alias: %r' % materials_alias) # print(' materials_effect: %s' % materials_effect) # suport legacy format without index if not material_i: material_i = len(materials_data.keys()) materials_data[material_i] = [ materials_alias, materials_effect, ] elif section.type == 'Piece': if scs_globals.import_pim_file: ob_index, ob_material, ob_vertex_cnt, ob_edge_cnt, ob_face_cnt, ob_stream_cnt = get_piece_properties( section) piece_name = 'piece_' + str(ob_index) # print('Piece %i going to "get_piece_5_streams"...' % ob_index) (mesh_vertices, mesh_normals, mesh_tangents, mesh_rgb, mesh_rgba, mesh_scalars, mesh_uv, mesh_tuv, mesh_triangles) = _get_piece_streams(section) points_to_weld_list = {} if mesh_normals: # print('Piece %i going to "make_posnorm_list"...' % ob_index) if scs_globals.import_use_welding: points_to_weld_list = _mesh_utils.make_points_to_weld_list( mesh_vertices, mesh_normals, mesh_rgb, mesh_rgba, scs_globals.import_welding_precision) objects_data[ob_index] = ( context, piece_name, ob_material, mesh_vertices, mesh_normals, mesh_tangents, mesh_rgb, mesh_rgba, mesh_scalars, mesh_uv, mesh_tuv, mesh_triangles, points_to_weld_list, ) # print('piece_name: %s' % piece_name) # print('ob_material: %s' % ob_material) # print('mesh_vertices: %s' % mesh_vertices) # print('mesh_rgba 1: %s' % str(mesh_rgba)) # print('mesh_uv count: %s' % len(mesh_uv)) # print('mesh_triangles: %s' % mesh_triangles) # print('mesh_faces: %s' % mesh_faces) # print('mesh_face_materials: %s' % mesh_face_materials) # print('mesh_edges: %s' % mesh_edges) # print('piece_count: %s' % str(piece_count)) piece_count -= 1 elif section.type == 'Part': if scs_globals.import_pim_file: part_name, part_piece_count, part_locator_count, part_pieces, part_locators = get_part_properties( section) # print('\npart_name: %r' % part_name) # print(' part_piece_count: %i' % part_piece_count) # print(' part_locator_count: %i' % part_locator_count) # print(' part_pieces: %s' % str(part_pieces)) # print(' part_locators: %s' % str(part_locators)) if part_pieces is not None and isinstance(part_pieces, int): part_pieces = [part_pieces] if part_locators is not None and isinstance( part_locators, int): part_locators = [part_locators] parts_data[part_name] = ( part_pieces, part_locators, ) elif section.type == 'Locator': if scs_globals.import_pim_file: loc_index, loc_name, loc_hookup, loc_position, loc_rotation, loc_scale = get_locator_properties( section) # print('\nloc_index: %r' % loc_index) # print(' loc_name: %s' % loc_name) # if loc_hookup: # print(' loc_hookup: %s' % loc_hookup) # print(' loc_position: %s' % loc_position) # print(' loc_rotation: %s' % loc_rotation) # print(' loc_scale: %s' % str(loc_scale)) locators_data[loc_index] = ( loc_name, loc_hookup, loc_position, loc_rotation, loc_scale, ) # BONES elif section.type == 'Bones': if scs_globals.import_pis_file: bones = get_bones_properties(section, scs_globals.import_pis_file) # print('\nbones: %r' % str(bones)) # SKINNING elif section.type == 'Skin': # Always only one skin in current SCS game implementation. if scs_globals.import_pim_file and scs_globals.import_pis_file: skin_stream_cnt, skin_streams = get_skin_properties(section) # print('\nskin_stream_cnt: %r' % skin_stream_cnt) # print('skin_data: %r\n' % str(skin_data)) elif section.type == "PieceSkin": if scs_globals.import_pim_file and scs_globals.import_pis_file: skin_piece_idx, skin_stream_cnt, skin_piece_streams = get_piece_skin_properties( section) piece_skin_data[skin_piece_idx] = skin_piece_streams piece_skin_count -= 1 # CREATE MATERIALS if scs_globals.import_pim_file and not preview_model: lprint("\nI ------ Creating materials: ------") for mat_i in materials_data: mat = bpy.data.materials.new(materials_data[mat_i][0]) mat.scs_props.mat_effect_name = materials_data[mat_i][1] materials_data[mat_i].append(materials_data[mat_i][0]) materials_data[mat_i][0] = mat.name lprint("I Created material %r...", (mat.name, )) lprint("I ---------------------------------") # PREPARE VERTEX GROUPS FOR SKINNING object_skinning = {} if scs_globals.import_pim_file and scs_globals.import_pis_file and bones: if skin_streams: # global skinning section for skin_stream in skin_streams: for stream_i, stream in enumerate(skin_stream): for data in stream[ 5]: # index 5 is data block, see _get_skin_stream # print(' ORIGIN - data: %s' % str(data)) for rec in data['clones']: obj = objects_data[rec[0]][1] # piece name if obj not in object_skinning: object_skinning[obj] = {} vertex = rec[1] for weight in data['weights']: vg = bones[weight[0]] if vg not in object_skinning[obj]: object_skinning[obj][vg] = {} vw = weight[1] object_skinning[obj][vg][vertex] = vw elif piece_skin_data: # or skinning per piece for piece_idx, piece_skin_streams in piece_skin_data.items(): obj = objects_data[piece_idx][1] # piece name for skin_stream in piece_skin_streams: for stream_i, stream in enumerate(skin_stream): for data in stream[ 5]: # index 5 is data block, see _get_skin_stream # print(' ORIGIN - data: %s' % str(data)) for vertex_idx in data['vertex_indices']: if obj not in object_skinning: object_skinning[obj] = {} for weight in data['weights']: vg = bones[weight[0]] if vg not in object_skinning[obj]: object_skinning[obj][vg] = {} vw = weight[1] object_skinning[obj][vg][vertex_idx] = vw # CREATE OBJECTS lprint("\nI ------ Creating mesh objects: -------") objects = [] skinned_objects = [] for obj_i in objects_data: # PARTS - search part first so preview model can possibly ignore objects with prescribed variant part_name = None for part in parts_data: if parts_data[part][0] is not None and obj_i in parts_data[part][0]: part_name = part.lower() if preview_model: # ignore pieces with "coll" parts if match(r'^coll([0-9]?|_.*)$', part_name): lprint( "I Ignoring piece with part collision part name %r for preview model!", (part_name, )) continue # ignore pieces with shadow and none material effects used_mat_effect = materials_data[objects_data[obj_i][2]][1] if ".shadowonly" in used_mat_effect or ".fakeshadow" in used_mat_effect or used_mat_effect.startswith( "eut2.none"): lprint( "I Ignoring piece with material having shadow or none effect for preview model!" ) continue # print('objects_data[obj_i]: %s' % str(objects_data[obj_i])) obj = _create_piece( objects_data[obj_i][0], # context preview_model, objects_data[obj_i][1], # piece_name objects_data[obj_i][2], # ob_material objects_data[obj_i][3], # mesh_vertices objects_data[obj_i][4], # mesh_normals objects_data[obj_i][5], # mesh_tangents objects_data[obj_i][6], # mesh_rgb objects_data[obj_i][7], # mesh_rgba objects_data[obj_i][8], # mesh_scalars object_skinning, objects_data[obj_i][9], # mesh_uv objects_data[obj_i][10], # mesh_tuv objects_data[obj_i][11], # mesh_triangles materials_data, objects_data[obj_i][12], # points_to_weld_list terrain_points_trans, ) piece_name = objects_data[obj_i][1] if obj: # make sure that objects are using Z depth calculation # comes handy when we have any kind of transparent materials. # Moreover as this property doesn't do any change on # none transparent materials we can easily set this to all imported objects obj.show_transparent = True if piece_name in object_skinning: skinned_objects.append(obj) else: objects.append(obj) lprint("I Created object %r (%s/%s)...", (obj.name, obj_i, len(objects_data))) # PARTS if part_name: obj.scs_props.scs_part = part_name else: lprint("E %r - Object creation FAILED!", piece_name) lprint("I -------------------------------------") if preview_model: # abort loading if no meshes inside imported model if len(objects) == 0 and len(skinned_objects) == 0: return None # get active object for joining meshes into it active_object = objects[0] if len(objects) > 0 else skinned_objects[0] override = context.copy() override["active_object"] = active_object override["selected_editable_objects"] = objects + skinned_objects bpy.ops.object.join(override) return active_object # CREATE MODEL LOCATORS locators = [] if scs_globals.import_pim_file and not preview_model: lprint("\nI ------ Creating model locators: ------") for loc_i in locators_data: # print('locators_data[loc_i]: %s' % str(locators_data[loc_i])) loc = _object_utils.create_locator_empty( locators_data[loc_i][0], # loc_name locators_data[loc_i][2], # loc_position locators_data[loc_i][3], # loc_rotation locators_data[loc_i][4], # loc_scale 1.0, # loc_size 'Model', # loc_type locators_data[loc_i][1], # loc_hookup ) # loc = create_locator( # locators_data[loc_i][0], # loc_name # locators_data[loc_i][1], # loc_hookup # locators_data[loc_i][2], # loc_position # locators_data[loc_i][3], # loc_rotation # locators_data[loc_i][4], # loc_scale # 'Model', ## loc_type # ) locator_name = locators_data[loc_i][0] if loc: lprint("I Created locator %r...", (locator_name, )) locators.append(loc) for part in parts_data: # print('parts_data[part]: %s' % str(parts_data[part])) if parts_data[part][1] is not None: if loc_i in parts_data[part][1]: # print(' loc_i: %s - part: %s - parts_data[part][1]: %s' % (loc_i, part, parts_data[part][1])) loc.scs_props.scs_part = part.lower() else: lprint("E %r - Locator creation FAILED!", (locator_name, )) lprint("I --------------------------------------") # CREATE SKELETON (ARMATURE) armature = None if scs_globals.import_pis_file and bones: bpy.ops.object.add(type='ARMATURE') bpy.ops.object.editmode_toggle() for bone in bones: bpy.ops.armature.bone_primitive_add(name=bone) bpy.ops.object.editmode_toggle() # bpy.context.object.data.show_names = True armature = bpy.context.object # ADD ARMATURE MODIFIERS TO SKINNED OBJECTS for obj in skinned_objects: # print('...adding Armature modifier to %r...' % str(obj.name)) bpy.context.view_layer.objects.active = obj bpy.ops.object.modifier_add(type='ARMATURE') arm_modifier = None for modifier in obj.modifiers: if modifier.type == 'ARMATURE': arm_modifier = modifier break if arm_modifier: arm_modifier.object = armature obj.parent = armature # WARNING PRINTOUTS if piece_count < 0: lprint("W More Pieces found than were declared!") if piece_count > 0: lprint("W Some Pieces not found, but were declared!") if piece_skin_count > 0: lprint("W More PieceSkins found than were declared!") if piece_skin_count < 0: lprint("W Some PieceSkins not found, but were declared!") return {'FINISHED' }, objects, locators, armature, skeleton, materials_data.values()
def apply_settings(): """Applies all the settings to the active scene.""" config_container = _pix.get_data_from_file(get_config_filepath(), " ") # save file paths in extra variables and apply them on the end # to make sure all of the settings are loaded first. # This is needed as some libraries reading are driven by other values from config file. # For example: "use_infixed" scs_project_path = _property_utils.get_default(bpy.types.GlobalSCSProps.scs_project_path) shader_presets_filepath = _property_utils.get_default(bpy.types.GlobalSCSProps.shader_presets_filepath) trigger_actions_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.trigger_actions_rel_path) sign_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.sign_library_rel_path) tsem_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.tsem_library_rel_path) traffic_rules_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.traffic_rules_library_rel_path) hookup_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.hookup_library_rel_path) matsubs_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.matsubs_library_rel_path) _get_scs_globals().config_update_lock = True # print(' > apply_settings...') settings_file_valid = 0 for section in config_container: if settings_file_valid == 2: if section.type == "Paths": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ProjectPath": scs_project_path = prop[1] elif prop[0] == "ShaderPresetsFilePath": shader_presets_filepath = prop[1] elif prop[0] == "TriggerActionsRelFilePath": trigger_actions_rel_path = prop[1] elif prop[0] == "TriggerActionsUseInfixed": _get_scs_globals().trigger_actions_use_infixed = prop[1] elif prop[0] == "SignRelFilePath": sign_library_rel_path = prop[1] elif prop[0] == "SignUseInfixed": _get_scs_globals().sign_library_use_infixed = prop[1] elif prop[0] == "TSemProfileRelFilePath": tsem_library_rel_path = prop[1] elif prop[0] == "TSemProfileUseInfixed": _get_scs_globals().tsem_library_use_infixed = prop[1] elif prop[0] == "TrafficRulesRelFilePath": traffic_rules_library_rel_path = prop[1] elif prop[0] == "TrafficRulesUseInfixed": _get_scs_globals().traffic_rules_library_use_infixed = prop[1] elif prop[0] == "HookupRelDirPath": hookup_library_rel_path = prop[1] elif prop[0] == "MatSubsRelFilePath": matsubs_library_rel_path = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "Import": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ImportScale": _get_scs_globals().import_scale = float(prop[1]) elif prop[0] == "ImportPimFile": _get_scs_globals().import_pim_file = prop[1] elif prop[0] == "UseWelding": _get_scs_globals().use_welding = prop[1] elif prop[0] == "WeldingPrecision": _get_scs_globals().welding_precision = prop[1] elif prop[0] == "UseNormals": _get_scs_globals().use_normals = prop[1] elif prop[0] == "ImportPitFile": _get_scs_globals().import_pit_file = prop[1] elif prop[0] == "LoadTextures": _get_scs_globals().load_textures = prop[1] elif prop[0] == "ImportPicFile": _get_scs_globals().import_pic_file = prop[1] elif prop[0] == "ImportPipFile": _get_scs_globals().import_pip_file = prop[1] elif prop[0] == "ImportPisFile": _get_scs_globals().import_pis_file = prop[1] elif prop[0] == "ConnectedBones": _get_scs_globals().connected_bones = prop[1] elif prop[0] == "BoneImportScale": _get_scs_globals().bone_import_scale = float(prop[1]) elif prop[0] == "ImportPiaFile": _get_scs_globals().import_pia_file = prop[1] elif prop[0] == "IncludeSubdirsForPia": _get_scs_globals().include_subdirs_for_pia = prop[1] elif section.type == "Export": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ContentType": _get_scs_globals().content_type = prop[1] elif prop[0] == "ExportScale": _get_scs_globals().export_scale = float(prop[1]) elif prop[0] == "ApplyModifiers": _get_scs_globals().apply_modifiers = prop[1] elif prop[0] == "ExcludeEdgesplit": _get_scs_globals().exclude_edgesplit = prop[1] elif prop[0] == "IncludeEdgesplit": _get_scs_globals().include_edgesplit = prop[1] elif prop[0] == "ActiveUVOnly": _get_scs_globals().active_uv_only = prop[1] elif prop[0] == "ExportVertexGroups": _get_scs_globals().export_vertex_groups = prop[1] elif prop[0] == "ExportVertexColor": _get_scs_globals().export_vertex_color = prop[1] elif prop[0] == "ExportVertexColorType": _get_scs_globals().export_vertex_color_type = str(prop[1]) elif prop[0] == "ExportVertexColorType7": _get_scs_globals().export_vertex_color_type_7 = str(prop[1]) # elif prop[0] == "ExportAnimFile": # _get_scs_globals().export_anim_file = prop[1] elif prop[0] == "ExportPimFile": _get_scs_globals().export_pim_file = prop[1] elif prop[0] == "OutputType": _get_scs_globals().output_type = prop[1] elif prop[0] == "ExportPitFile": _get_scs_globals().export_pit_file = prop[1] elif prop[0] == "ExportPicFile": _get_scs_globals().export_pic_file = prop[1] elif prop[0] == "ExportPipFile": _get_scs_globals().export_pip_file = prop[1] elif prop[0] == "SignExport": _get_scs_globals().sign_export = prop[1] elif section.type == "GlobalDisplay": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "DisplayLocators": bpy.context.scene.scs_props.display_locators = prop[1] elif prop[0] == "LocatorSize": bpy.context.scene.scs_props.locator_size = float(prop[1]) elif prop[0] == "LocatorEmptySize": bpy.context.scene.scs_props.locator_empty_size = float(prop[1]) elif prop[0] == "DisplayConnections": bpy.context.scene.scs_props.display_connections = prop[1] elif prop[0] == "CurveSegments": bpy.context.scene.scs_props.curve_segments = prop[1] elif prop[0] == "OptimizedConnsDrawing": bpy.context.scene.scs_props.optimized_connections_drawing = prop[1] elif prop[0] == "DisplayTextInfo": bpy.context.scene.scs_props.display_info = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "GlobalColors": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "PrefabLocatorsWire": bpy.context.scene.scs_props.locator_prefab_wire_color = prop[1] elif prop[0] == "ModelLocatorsWire": bpy.context.scene.scs_props.locator_model_wire_color = prop[1] elif prop[0] == "ColliderLocatorsWire": bpy.context.scene.scs_props.locator_coll_wire_color = prop[1] elif prop[0] == "ColliderLocatorsFace": bpy.context.scene.scs_props.locator_coll_face_color = prop[1] elif prop[0] == "NavigationCurveBase": bpy.context.scene.scs_props.np_connection_base_color = prop[1] elif prop[0] == "MapLineBase": bpy.context.scene.scs_props.mp_connection_base_color = prop[1] elif prop[0] == "TriggerLineBase": bpy.context.scene.scs_props.tp_connection_base_color = prop[1] elif prop[0] == "InfoText": bpy.context.scene.scs_props.info_text_color = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "Various": for prop in section.props: # if prop[0] == "#": if prop[0] in ("", "#"): pass elif prop[0] == "DumpLevel": _get_scs_globals().dump_level = prop[1] elif section.type == "Header": for prop in section.props: if prop[0] == "FormatVersion": if prop[1] == 1: settings_file_valid += 1 elif prop[0] == "Type": if prop[1] == "Configuration": settings_file_valid += 1 elif prop[0] == "DumpLevel": _get_scs_globals().dump_level = prop[1] # now as last apply all of the file paths _get_scs_globals().scs_project_path = scs_project_path _get_scs_globals().shader_presets_filepath = shader_presets_filepath _get_scs_globals().trigger_actions_rel_path = trigger_actions_rel_path _get_scs_globals().sign_library_rel_path = sign_library_rel_path _get_scs_globals().tsem_library_rel_path = tsem_library_rel_path _get_scs_globals().traffic_rules_library_rel_path = traffic_rules_library_rel_path _get_scs_globals().hookup_library_rel_path = hookup_library_rel_path _get_scs_globals().matsubs_library_rel_path = matsubs_library_rel_path _get_scs_globals().config_update_lock = False return True
def apply_settings(): """Applies all the settings to the active scene.""" config_container = _pix.get_data_from_file(get_config_filepath(), " ") _get_scs_globals().config_update_lock = True # print(' > apply_settings...') settings_file_valid = 0 for section in config_container: if settings_file_valid == 2: if section.type == "Paths": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ProjectPath": _get_scs_globals().scs_project_path = prop[1] elif prop[0] == "ShaderPresetsFilePath": _get_scs_globals().shader_presets_filepath = prop[1] # elif prop[0] == "CgFXTemplatesFilePath": # _get_scs_globals().cgfx_templates_filepath = prop[1] # elif prop[0] == "CgFXRelDirPath": # _get_scs_globals().cgfx_library_rel_path = prop[1] elif prop[0] == "SignRelFilePath": _get_scs_globals().sign_library_rel_path = prop[1] elif prop[0] == "TSemProfileRelFilePath": _get_scs_globals().tsem_library_rel_path = prop[1] elif prop[0] == "TrafficRulesRelFilePath": _get_scs_globals().traffic_rules_library_rel_path = prop[1] elif prop[0] == "HookupRelDirPath": _get_scs_globals().hookup_library_rel_path = prop[1] elif prop[0] == "MatSubsRelFilePath": _get_scs_globals().matsubs_library_rel_path = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "Import": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ImportScale": _get_scs_globals().import_scale = float(prop[1]) elif prop[0] == "ImportPimFile": _get_scs_globals().import_pim_file = prop[1] elif prop[0] == "UseWelding": _get_scs_globals().use_welding = prop[1] elif prop[0] == "WeldingPrecision": _get_scs_globals().welding_precision = prop[1] elif prop[0] == "ImportPitFile": _get_scs_globals().import_pit_file = prop[1] elif prop[0] == "LoadTextures": _get_scs_globals().load_textures = prop[1] elif prop[0] == "ImportPicFile": _get_scs_globals().import_pic_file = prop[1] elif prop[0] == "ImportPipFile": _get_scs_globals().import_pip_file = prop[1] elif prop[0] == "ImportPisFile": _get_scs_globals().import_pis_file = prop[1] elif prop[0] == "ConnectedBones": _get_scs_globals().connected_bones = prop[1] elif prop[0] == "BoneImportScale": _get_scs_globals().bone_import_scale = float(prop[1]) elif prop[0] == "ImportPiaFile": _get_scs_globals().import_pia_file = prop[1] elif prop[0] == "IncludeSubdirsForPia": _get_scs_globals().include_subdirs_for_pia = prop[1] elif prop[0] == "MeshCreationType": _get_scs_globals().mesh_creation_type = prop[1] elif section.type == "Export": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ContentType": _get_scs_globals().content_type = prop[1] elif prop[0] == "ExportScale": _get_scs_globals().export_scale = float(prop[1]) elif prop[0] == "ApplyModifiers": _get_scs_globals().apply_modifiers = prop[1] elif prop[0] == "ExcludeEdgesplit": _get_scs_globals().exclude_edgesplit = prop[1] elif prop[0] == "IncludeEdgesplit": _get_scs_globals().include_edgesplit = prop[1] elif prop[0] == "ActiveUVOnly": _get_scs_globals().active_uv_only = prop[1] elif prop[0] == "ExportVertexGroups": _get_scs_globals().export_vertex_groups = prop[1] elif prop[0] == "ExportVertexColor": _get_scs_globals().export_vertex_color = prop[1] elif prop[0] == "ExportVertexColorType": _get_scs_globals().export_vertex_color_type = str(prop[1]) elif prop[0] == "ExportVertexColorType7": _get_scs_globals().export_vertex_color_type_7 = str(prop[1]) # elif prop[0] == "ExportAnimFile": # _get_scs_globals().export_anim_file = prop[1] elif prop[0] == "ExportPimFile": _get_scs_globals().export_pim_file = prop[1] elif prop[0] == "OutputType": _get_scs_globals().output_type = prop[1] elif prop[0] == "ExportPitFile": _get_scs_globals().export_pit_file = prop[1] elif prop[0] == "ExportPicFile": _get_scs_globals().export_pic_file = prop[1] elif prop[0] == "ExportPipFile": _get_scs_globals().export_pip_file = prop[1] elif prop[0] == "SignExport": _get_scs_globals().sign_export = prop[1] elif section.type == "GlobalDisplay": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "DisplayLocators": bpy.context.scene.scs_props.display_locators = prop[1] elif prop[0] == "LocatorSize": bpy.context.scene.scs_props.locator_size = float(prop[1]) elif prop[0] == "LocatorEmptySize": bpy.context.scene.scs_props.locator_empty_size = float(prop[1]) elif prop[0] == "DisplayConnections": bpy.context.scene.scs_props.display_connections = prop[1] elif prop[0] == "CurveSegments": bpy.context.scene.scs_props.curve_segments = prop[1] elif prop[0] == "OptimizedConnsDrawing": bpy.context.scene.scs_props.optimized_connections_drawing = prop[1] elif prop[0] == "DisplayTextInfo": bpy.context.scene.scs_props.display_info = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "GlobalColors": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "PrefabLocatorsWire": bpy.context.scene.scs_props.locator_prefab_wire_color = prop[1] elif prop[0] == "ModelLocatorsWire": bpy.context.scene.scs_props.locator_model_wire_color = prop[1] elif prop[0] == "ColliderLocatorsWire": bpy.context.scene.scs_props.locator_coll_wire_color = prop[1] elif prop[0] == "ColliderLocatorsFace": bpy.context.scene.scs_props.locator_coll_face_color = prop[1] elif prop[0] == "NavigationCurveBase": bpy.context.scene.scs_props.np_connection_base_color = prop[1] elif prop[0] == "MapLineBase": bpy.context.scene.scs_props.mp_connection_base_color = prop[1] elif prop[0] == "TriggerLineBase": bpy.context.scene.scs_props.tp_connection_base_color = prop[1] elif prop[0] == "InfoText": bpy.context.scene.scs_props.info_text_color = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "Various": for prop in section.props: # if prop[0] == "#": if prop[0] in ("", "#"): pass elif prop[0] == "DumpLevel": _get_scs_globals().dump_level = prop[1] elif section.type == "Header": for prop in section.props: if prop[0] == "FormatVersion": if prop[1] == 1: settings_file_valid += 1 elif prop[0] == "Type": if prop[1] == "Configuration": settings_file_valid += 1 elif prop[0] == "DumpLevel": _get_scs_globals().dump_level = prop[1] _get_scs_globals().config_update_lock = False return True
def load(filepath, pim_mats_info): """Enty point for importing PIT file :param filepath: filepath of PIT file :type filepath: str :param pim_mats_info: list of material info, one material info consists of list: [ blend_mat_name, mat_effect, original_mat_alias ] :type pim_mats_info: list of list """ print("\n************************************") print("** SCS PIT Importer **") print("** (c)2014 SCS Software **") print("************************************\n") ind = ' ' pit_container = _pix_container.get_data_from_file(filepath, ind) # TEST PRINTOUTS # ind = ' ' # for section in pit_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pit_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pit_container, export_filepath, ind, dump_level) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed (format_version, source, f_type, f_name, source_filename, author) = _get_header(pit_container, dump_level) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed (look_count, variant_count, part_count, material_count) = _get_global(pit_container, dump_level) ''' # LOAD LOOKS AND VARIANTS loaded_looks = {} looks = [] loaded_variants = [] for section in pit_container: if section.type == 'Look': look_name, look_mat_settings = _get_look(section) loaded_looks[look_name] = look_mat_settings looks.append(look_name) elif section.type == 'Variant': variant_name, variantparts = _get_variant(section) variant_record = (variant_name, variantparts) # variant_record = (getVariant(section)) loaded_variants.append(variant_record) # loaded_variants.append((getVariant(section))) # PICK ONE LOOK DATA BLOCK, NOTE: This is temporal now as we don't have proper support for Looks. if "Default" in loaded_looks: look_name = "Default" else: look_name = looks[0] # print(" look_name: %r" % look_name) look_mat_settings = loaded_looks[look_name] # SETUP ALL THE MATERIALS, NOTE: They should be already created by PIM import. for mat_info in pim_mats_info: mat = bpy.data.materials[mat_info[0]] if mat_info[2] in look_mat_settings: # ASSIGN IMPORTED SHADER DATA material_effect, material_attributes, material_textures, material_section = _get_shader_data(look_mat_settings[mat_info[2]]) # TRY TO FIND SUITABLE PRESET (preset_index, preset_name) = _find_preset(material_effect) if preset_index: # preset name is found within presets shaders mat.scs_props.active_shader_preset_name = preset_name preset_section = _material_utils.get_shader_preset(_get_scs_globals().shader_presets_filepath, preset_name) if preset_section: preset_effect = preset_section.get_prop_value("Effect") mat.scs_props.mat_effect_name = preset_effect # apply default shader settings _material_utils.set_shader_data_to_material(mat, preset_section, preset_effect, is_import=True) # reapply settings from material _material_utils.set_shader_data_to_material(mat, material_section, material_effect, is_import=True, override_back_data=False) lprint("I Using shader preset on material %r.", (mat.name,)) else: print('''NO "preset_section"! (Shouldn't happen!)''') else: # import shader directly from material and mark it as imported mat.scs_props.active_shader_preset_name = "<imported>" _material_utils.set_shader_data_to_material(mat, material_section, material_effect, is_import=True) lprint("I Using imported shader on material %r.", (mat.name,)) # delete not needed data on material if "scs_tex_aliases" in mat: del mat["scs_tex_aliases"] print("************************************") return {'FINISHED'}, loaded_variants
def apply_settings(): """Applies all the settings to the active scene.""" config_container = _pix.get_data_from_file(get_config_filepath(), " ") _get_scs_globals().config_update_lock = True # print(' > apply_settings...') settings_file_valid = 0 for section in config_container: if settings_file_valid == 2: if section.type == "Paths": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ProjectPath": _get_scs_globals().scs_project_path = prop[1] elif prop[0] == "ShaderPresetsFilePath": _get_scs_globals().shader_presets_filepath = prop[1] # elif prop[0] == "CgFXTemplatesFilePath": # _get_scs_globals().cgfx_templates_filepath = prop[1] # elif prop[0] == "CgFXRelDirPath": # _get_scs_globals().cgfx_library_rel_path = prop[1] elif prop[0] == "SignRelFilePath": _get_scs_globals().sign_library_rel_path = prop[1] elif prop[0] == "TSemProfileRelFilePath": _get_scs_globals().tsem_library_rel_path = prop[1] elif prop[0] == "TrafficRulesRelFilePath": _get_scs_globals( ).traffic_rules_library_rel_path = prop[1] elif prop[0] == "HookupRelDirPath": _get_scs_globals().hookup_library_rel_path = prop[1] elif prop[0] == "MatSubsRelFilePath": _get_scs_globals().matsubs_library_rel_path = prop[1] else: lprint( 'W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]), )) elif section.type == "Import": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ImportScale": _get_scs_globals().import_scale = float(prop[1]) elif prop[0] == "ImportPimFile": _get_scs_globals().import_pim_file = prop[1] elif prop[0] == "UseWelding": _get_scs_globals().use_welding = prop[1] elif prop[0] == "WeldingPrecision": _get_scs_globals().welding_precision = prop[1] elif prop[0] == "ImportPitFile": _get_scs_globals().import_pit_file = prop[1] elif prop[0] == "LoadTextures": _get_scs_globals().load_textures = prop[1] elif prop[0] == "ImportPicFile": _get_scs_globals().import_pic_file = prop[1] elif prop[0] == "ImportPipFile": _get_scs_globals().import_pip_file = prop[1] elif prop[0] == "ImportPisFile": _get_scs_globals().import_pis_file = prop[1] elif prop[0] == "ConnectedBones": _get_scs_globals().connected_bones = prop[1] elif prop[0] == "BoneImportScale": _get_scs_globals().bone_import_scale = float(prop[1]) elif prop[0] == "ImportPiaFile": _get_scs_globals().import_pia_file = prop[1] elif prop[0] == "IncludeSubdirsForPia": _get_scs_globals().include_subdirs_for_pia = prop[1] elif prop[0] == "MeshCreationType": _get_scs_globals().mesh_creation_type = prop[1] elif section.type == "Export": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ContentType": _get_scs_globals().content_type = prop[1] elif prop[0] == "ExportScale": _get_scs_globals().export_scale = float(prop[1]) elif prop[0] == "ApplyModifiers": _get_scs_globals().apply_modifiers = prop[1] elif prop[0] == "ExcludeEdgesplit": _get_scs_globals().exclude_edgesplit = prop[1] elif prop[0] == "IncludeEdgesplit": _get_scs_globals().include_edgesplit = prop[1] elif prop[0] == "ActiveUVOnly": _get_scs_globals().active_uv_only = prop[1] elif prop[0] == "ExportVertexGroups": _get_scs_globals().export_vertex_groups = prop[1] elif prop[0] == "ExportVertexColor": _get_scs_globals().export_vertex_color = prop[1] elif prop[0] == "ExportVertexColorType": _get_scs_globals().export_vertex_color_type = str( prop[1]) elif prop[0] == "ExportVertexColorType7": _get_scs_globals().export_vertex_color_type_7 = str( prop[1]) # elif prop[0] == "ExportAnimFile": # _get_scs_globals().export_anim_file = prop[1] elif prop[0] == "ExportPimFile": _get_scs_globals().export_pim_file = prop[1] elif prop[0] == "OutputType": _get_scs_globals().output_type = prop[1] elif prop[0] == "ExportPitFile": _get_scs_globals().export_pit_file = prop[1] elif prop[0] == "ExportPicFile": _get_scs_globals().export_pic_file = prop[1] elif prop[0] == "ExportPipFile": _get_scs_globals().export_pip_file = prop[1] elif prop[0] == "SignExport": _get_scs_globals().sign_export = prop[1] elif section.type == "GlobalDisplay": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "DisplayLocators": bpy.context.scene.scs_props.display_locators = prop[1] elif prop[0] == "LocatorSize": bpy.context.scene.scs_props.locator_size = float( prop[1]) elif prop[0] == "LocatorEmptySize": bpy.context.scene.scs_props.locator_empty_size = float( prop[1]) elif prop[0] == "DisplayConnections": bpy.context.scene.scs_props.display_connections = prop[ 1] elif prop[0] == "CurveSegments": bpy.context.scene.scs_props.curve_segments = prop[1] elif prop[0] == "OptimizedConnsDrawing": bpy.context.scene.scs_props.optimized_connections_drawing = prop[ 1] elif prop[0] == "DisplayTextInfo": bpy.context.scene.scs_props.display_info = prop[1] else: lprint( 'W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]), )) elif section.type == "GlobalColors": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "PrefabLocatorsWire": bpy.context.scene.scs_props.locator_prefab_wire_color = prop[ 1] elif prop[0] == "ModelLocatorsWire": bpy.context.scene.scs_props.locator_model_wire_color = prop[ 1] elif prop[0] == "ColliderLocatorsWire": bpy.context.scene.scs_props.locator_coll_wire_color = prop[ 1] elif prop[0] == "ColliderLocatorsFace": bpy.context.scene.scs_props.locator_coll_face_color = prop[ 1] elif prop[0] == "NavigationCurveBase": bpy.context.scene.scs_props.np_connection_base_color = prop[ 1] elif prop[0] == "MapLineBase": bpy.context.scene.scs_props.mp_connection_base_color = prop[ 1] elif prop[0] == "TriggerLineBase": bpy.context.scene.scs_props.tp_connection_base_color = prop[ 1] elif prop[0] == "InfoText": bpy.context.scene.scs_props.info_text_color = prop[1] else: lprint( 'W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]), )) elif section.type == "Various": for prop in section.props: # if prop[0] == "#": if prop[0] in ("", "#"): pass elif prop[0] == "DumpLevel": _get_scs_globals().dump_level = prop[1] elif section.type == "Header": for prop in section.props: if prop[0] == "FormatVersion": if prop[1] == 1: settings_file_valid += 1 elif prop[0] == "Type": if prop[1] == "Configuration": settings_file_valid += 1 elif prop[0] == "DumpLevel": _get_scs_globals().dump_level = prop[1] _get_scs_globals().config_update_lock = False return True
def apply_settings(): """Applies all the settings to the active scene.""" scs_globals = _get_scs_globals() # avoid recursion if another apply settings is running already if scs_globals.config_update_lock: return False # NOTE: save file paths in extra variables and apply them on the end # to make sure all of the settings are loaded first. # This is needed as some libraries reading are driven by other values from config file. # For example: "use_infixed" scs_project_path = _property_utils.get_by_type( bpy.types.GlobalSCSProps.scs_project_path, scs_globals) shader_presets_filepath = _property_utils.get_by_type( bpy.types.GlobalSCSProps.shader_presets_filepath, scs_globals) trigger_actions_rel_path = _property_utils.get_by_type( bpy.types.GlobalSCSProps.trigger_actions_rel_path, scs_globals) sign_library_rel_path = _property_utils.get_by_type( bpy.types.GlobalSCSProps.sign_library_rel_path, scs_globals) tsem_library_rel_path = _property_utils.get_by_type( bpy.types.GlobalSCSProps.tsem_library_rel_path, scs_globals) traffic_rules_library_rel_path = _property_utils.get_by_type( bpy.types.GlobalSCSProps.traffic_rules_library_rel_path, scs_globals) hookup_library_rel_path = _property_utils.get_by_type( bpy.types.GlobalSCSProps.hookup_library_rel_path, scs_globals) matsubs_library_rel_path = _property_utils.get_by_type( bpy.types.GlobalSCSProps.matsubs_library_rel_path, scs_globals) conv_hlpr_converters_path = _property_utils.get_by_type( bpy.types.GlobalSCSProps.conv_hlpr_converters_path, scs_globals) # NOTE: as dump level is written in same section as config type # applying it directly might take place before we get information about config type # so it has to be saved into variable and applied only if global settings are loaded from config file dump_level = scs_globals.dump_level scs_globals.config_update_lock = True config_container = _pix.get_data_from_file(get_config_filepath(), " ") # avoid applying process of config if not present (most probably permission problems on config creation) if config_container is not None: settings_file_valid = 0 for section in config_container: if settings_file_valid == 2: if section.type == "Paths": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ProjectPath": scs_project_path = prop[1] elif prop[0] == "ShaderPresetsFilePath": shader_presets_filepath = prop[1] elif prop[0] == "TriggerActionsRelFilePath": trigger_actions_rel_path = prop[1] elif prop[0] == "TriggerActionsUseInfixed": scs_globals.trigger_actions_use_infixed = prop[1] elif prop[0] == "SignRelFilePath": sign_library_rel_path = prop[1] elif prop[0] == "SignUseInfixed": scs_globals.sign_library_use_infixed = prop[1] elif prop[0] == "TSemProfileRelFilePath": tsem_library_rel_path = prop[1] elif prop[0] == "TSemProfileUseInfixed": scs_globals.tsem_library_use_infixed = prop[1] elif prop[0] == "TrafficRulesRelFilePath": traffic_rules_library_rel_path = prop[1] elif prop[0] == "TrafficRulesUseInfixed": scs_globals.traffic_rules_library_use_infixed = prop[ 1] elif prop[0] == "HookupRelDirPath": hookup_library_rel_path = prop[1] elif prop[0] == "MatSubsRelFilePath": matsubs_library_rel_path = prop[1] elif prop[0] == "ConvertersPath": conv_hlpr_converters_path = prop[1] else: lprint( 'W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]), )) elif section.type == "Import": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ImportScale": scs_globals.import_scale = float(prop[1]) elif prop[0] == "ImportPimFile": scs_globals.import_pim_file = prop[1] elif prop[0] == "UseWelding": scs_globals.use_welding = prop[1] elif prop[0] == "WeldingPrecision": scs_globals.welding_precision = prop[1] elif prop[0] == "UseNormals": scs_globals.use_normals = prop[1] elif prop[0] == "ImportPitFile": scs_globals.import_pit_file = prop[1] elif prop[0] == "LoadTextures": scs_globals.load_textures = prop[1] elif prop[0] == "ImportPicFile": scs_globals.import_pic_file = prop[1] elif prop[0] == "ImportPipFile": scs_globals.import_pip_file = prop[1] elif prop[0] == "ImportPisFile": scs_globals.import_pis_file = prop[1] elif prop[0] == "ConnectedBones": scs_globals.connected_bones = prop[1] elif prop[0] == "BoneImportScale": scs_globals.bone_import_scale = float(prop[1]) elif prop[0] == "ImportPiaFile": scs_globals.import_pia_file = prop[1] elif prop[0] == "IncludeSubdirsForPia": scs_globals.include_subdirs_for_pia = prop[1] elif section.type == "Export": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ExportScale": scs_globals.export_scale = float(prop[1]) elif prop[0] == "ApplyModifiers": scs_globals.apply_modifiers = prop[1] elif prop[0] == "ExcludeEdgesplit": scs_globals.exclude_edgesplit = prop[1] elif prop[0] == "IncludeEdgesplit": scs_globals.include_edgesplit = prop[1] elif prop[0] == "ActiveUVOnly": scs_globals.active_uv_only = prop[1] elif prop[0] == "ExportVertexGroups": scs_globals.export_vertex_groups = prop[1] elif prop[0] == "ExportVertexColor": scs_globals.export_vertex_color = prop[1] elif prop[0] == "ExportVertexColorType": scs_globals.export_vertex_color_type = str(prop[1]) elif prop[0] == "ExportVertexColorType7": scs_globals.export_vertex_color_type_7 = str( prop[1]) elif prop[0] == "ExportPimFile": scs_globals.export_pim_file = prop[1] elif prop[0] == "OutputType": scs_globals.output_type = prop[1] elif prop[0] == "ExportPitFile": scs_globals.export_pit_file = prop[1] elif prop[0] == "ExportPicFile": scs_globals.export_pic_file = prop[1] elif prop[0] == "ExportPipFile": scs_globals.export_pip_file = prop[1] elif prop[0] == "SignExport": scs_globals.sign_export = prop[1] elif section.type == "GlobalDisplay": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "DisplayLocators": scs_globals.display_locators = prop[1] elif prop[0] == "LocatorSize": scs_globals.locator_size = float(prop[1]) elif prop[0] == "LocatorEmptySize": scs_globals.locator_empty_size = float(prop[1]) elif prop[0] == "DisplayConnections": scs_globals.display_connections = prop[1] elif prop[0] == "CurveSegments": scs_globals.curve_segments = prop[1] elif prop[0] == "OptimizedConnsDrawing": scs_globals.optimized_connections_drawing = prop[1] elif prop[0] == "DisplayTextInfo": scs_globals.display_info = prop[1] else: lprint( 'W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]), )) elif section.type == "GlobalColors": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "PrefabLocatorsWire": scs_globals.locator_prefab_wire_color = prop[1] elif prop[0] == "ModelLocatorsWire": scs_globals.locator_model_wire_color = prop[1] elif prop[0] == "ColliderLocatorsWire": scs_globals.locator_coll_wire_color = prop[1] elif prop[0] == "ColliderLocatorsFace": scs_globals.locator_coll_face_color = prop[1] elif prop[0] == "NavigationCurveBase": scs_globals.np_connection_base_color = prop[1] elif prop[0] == "MapLineBase": scs_globals.mp_connection_base_color = prop[1] elif prop[0] == "TriggerLineBase": scs_globals.tp_connection_base_color = prop[1] elif prop[0] == "InfoText": scs_globals.info_text_color = prop[1] else: lprint( 'W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]), )) elif section.type == "Header": for prop in section.props: if prop[0] == "FormatVersion": if prop[1] == 1: settings_file_valid += 1 elif prop[0] == "Type": if prop[1] == "Configuration": settings_file_valid += 1 elif prop[0] == "DumpLevel": dump_level = prop[1] elif prop[0] == "ConfigStoragePlace": scs_globals.config_storage_place = prop[1] # if settings are read directly from blend file, # release update lock and don't search/apply any settings further if prop[1] == "BlendFile": settings_file_valid += 1 scs_globals.dump_level = dump_level # now as last apply all of the file paths # NOTE: applying paths is crucial for libraries # (they are reloaded/initiated in property update functions) scs_globals.scs_project_path = scs_project_path scs_globals.shader_presets_filepath = shader_presets_filepath scs_globals.trigger_actions_rel_path = trigger_actions_rel_path scs_globals.sign_library_rel_path = sign_library_rel_path scs_globals.tsem_library_rel_path = tsem_library_rel_path scs_globals.traffic_rules_library_rel_path = traffic_rules_library_rel_path scs_globals.hookup_library_rel_path = hookup_library_rel_path scs_globals.matsubs_library_rel_path = matsubs_library_rel_path scs_globals.conv_hlpr_converters_path = conv_hlpr_converters_path scs_globals.config_update_lock = False return True
def update_shader_presets_path(scs_shader_presets_inventory, shader_presets_filepath): """The function deletes and populates again a list of Shader Preset items in inventory. It also updates corresponding record in config file. :param shader_presets_filepath: Absolute or relative path to the file with Shader Presets :type shader_presets_filepath: str """ # print('shader_presets_filepath: %r' % shader_presets_filepath) if shader_presets_filepath.startswith("//"): # RELATIVE PATH shader_presets_abs_path = _path_utils.get_abs_path(shader_presets_filepath) else: shader_presets_abs_path = shader_presets_filepath # CLEAR INVENTORY AND CACHE scs_shader_presets_inventory.clear() _shader_presets_cache.clear() if os.path.isfile(shader_presets_abs_path): # ADD DEFAULT PRESET ITEM "<none>" INTO INVENTORY new_shader_preset = scs_shader_presets_inventory.add() new_shader_preset.name = "<none>" presets_container = _pix.get_data_from_file(shader_presets_abs_path, ' ') # ADD ALL SHADER PRESET ITEMS FROM FILE INTO INVENTORY if presets_container: # sort sections to shaders and flavors shaders = [] flavors = {} for section in presets_container: if section.type == "Shader": shaders.append(section) elif section.type == "Flavor": flavors[section.get_prop_value("Type")] = section for shader in shaders: unique_names = [] shader_flavors = shader.get_prop_value("Flavors") # create new preset item new_shader_preset = scs_shader_presets_inventory.add() new_shader_preset.name = shader.get_prop_value("PresetName") new_shader_preset.effect = shader.get_prop_value("Effect") unique_names.append("") _shader_presets_cache.add_section(new_shader_preset, "", shader) if shader_flavors: for j, flavor_types in enumerate(shader_flavors): # create new flavor item flavor_item = new_shader_preset.flavors.add() new_unique_names = [] for i, flavor_type in enumerate(flavor_types.split("|")): if flavor_type not in flavors: lprint("D Flavor used by shader preset, but not defined: %s", (flavor_type,)) continue # create new flavor variant item flavor_variant = flavor_item.variants.add() flavor_variant.name = flavors[flavor_type].get_prop_value("Name") flavor_variant.preset_name = new_shader_preset.name # modify and save section as string into cache for unique_name in unique_names: section = _shader_presets_cache.get_section(new_shader_preset, unique_name) for flavor_section in flavors[flavor_type].sections: flavor_section_tag = flavor_section.get_prop_value("Tag") # check if current flavor section already exists in section, # then override props and sections directly otherwise add flavor section for subsection in section.sections: subsection_tag = subsection.get_prop_value("Tag") if subsection_tag and subsection_tag == flavor_section_tag: subsection.props = flavor_section.props subsection.sections = flavor_section.sections break else: section.sections.append(flavor_section) new_unique_names.append(unique_name + "." + flavors[flavor_type].get_prop_value("Name")) assert section.set_prop_value("Effect", new_shader_preset.effect + new_unique_names[-1]) _shader_presets_cache.add_section(new_shader_preset, new_unique_names[-1], section) unique_names.extend(new_unique_names) update_item_in_file('Paths.ShaderPresetsFilePath', shader_presets_filepath)
def apply_settings(): """Applies all the settings to the active scene.""" config_container = _pix.get_data_from_file(get_config_filepath(), " ") # save file paths in extra variables and apply them on the end # to make sure all of the settings are loaded first. # This is needed as some libraries reading are driven by other values from config file. # For example: "use_infixed" scs_project_path = _property_utils.get_default(bpy.types.GlobalSCSProps.scs_project_path) shader_presets_filepath = _property_utils.get_default(bpy.types.GlobalSCSProps.shader_presets_filepath) trigger_actions_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.trigger_actions_rel_path) sign_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.sign_library_rel_path) tsem_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.tsem_library_rel_path) traffic_rules_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.traffic_rules_library_rel_path) hookup_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.hookup_library_rel_path) matsubs_library_rel_path = _property_utils.get_default(bpy.types.GlobalSCSProps.matsubs_library_rel_path) conv_hlpr_converters_path = _property_utils.get_default(bpy.types.GlobalSCSProps.conv_hlpr_converters_path) _get_scs_globals().config_update_lock = True # print(' > apply_settings...') settings_file_valid = 0 for section in config_container: if settings_file_valid == 2: if section.type == "Paths": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ProjectPath": scs_project_path = prop[1] elif prop[0] == "ShaderPresetsFilePath": shader_presets_filepath = prop[1] elif prop[0] == "TriggerActionsRelFilePath": trigger_actions_rel_path = prop[1] elif prop[0] == "TriggerActionsUseInfixed": _get_scs_globals().trigger_actions_use_infixed = prop[1] elif prop[0] == "SignRelFilePath": sign_library_rel_path = prop[1] elif prop[0] == "SignUseInfixed": _get_scs_globals().sign_library_use_infixed = prop[1] elif prop[0] == "TSemProfileRelFilePath": tsem_library_rel_path = prop[1] elif prop[0] == "TSemProfileUseInfixed": _get_scs_globals().tsem_library_use_infixed = prop[1] elif prop[0] == "TrafficRulesRelFilePath": traffic_rules_library_rel_path = prop[1] elif prop[0] == "TrafficRulesUseInfixed": _get_scs_globals().traffic_rules_library_use_infixed = prop[1] elif prop[0] == "HookupRelDirPath": hookup_library_rel_path = prop[1] elif prop[0] == "MatSubsRelFilePath": matsubs_library_rel_path = prop[1] elif prop[0] == "ConvertersPath": conv_hlpr_converters_path = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "Import": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ImportScale": _get_scs_globals().import_scale = float(prop[1]) elif prop[0] == "ImportPimFile": _get_scs_globals().import_pim_file = prop[1] elif prop[0] == "UseWelding": _get_scs_globals().use_welding = prop[1] elif prop[0] == "WeldingPrecision": _get_scs_globals().welding_precision = prop[1] elif prop[0] == "UseNormals": _get_scs_globals().use_normals = prop[1] elif prop[0] == "ImportPitFile": _get_scs_globals().import_pit_file = prop[1] elif prop[0] == "LoadTextures": _get_scs_globals().load_textures = prop[1] elif prop[0] == "ImportPicFile": _get_scs_globals().import_pic_file = prop[1] elif prop[0] == "ImportPipFile": _get_scs_globals().import_pip_file = prop[1] elif prop[0] == "ImportPisFile": _get_scs_globals().import_pis_file = prop[1] elif prop[0] == "ConnectedBones": _get_scs_globals().connected_bones = prop[1] elif prop[0] == "BoneImportScale": _get_scs_globals().bone_import_scale = float(prop[1]) elif prop[0] == "ImportPiaFile": _get_scs_globals().import_pia_file = prop[1] elif prop[0] == "IncludeSubdirsForPia": _get_scs_globals().include_subdirs_for_pia = prop[1] elif section.type == "Export": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ExportScale": _get_scs_globals().export_scale = float(prop[1]) elif prop[0] == "ApplyModifiers": _get_scs_globals().apply_modifiers = prop[1] elif prop[0] == "ExcludeEdgesplit": _get_scs_globals().exclude_edgesplit = prop[1] elif prop[0] == "IncludeEdgesplit": _get_scs_globals().include_edgesplit = prop[1] elif prop[0] == "ActiveUVOnly": _get_scs_globals().active_uv_only = prop[1] elif prop[0] == "ExportVertexGroups": _get_scs_globals().export_vertex_groups = prop[1] elif prop[0] == "ExportVertexColor": _get_scs_globals().export_vertex_color = prop[1] elif prop[0] == "ExportVertexColorType": _get_scs_globals().export_vertex_color_type = str(prop[1]) elif prop[0] == "ExportVertexColorType7": _get_scs_globals().export_vertex_color_type_7 = str(prop[1]) # elif prop[0] == "ExportAnimFile": # _get_scs_globals().export_anim_file = prop[1] elif prop[0] == "ExportPimFile": _get_scs_globals().export_pim_file = prop[1] elif prop[0] == "OutputType": _get_scs_globals().output_type = prop[1] elif prop[0] == "ExportPitFile": _get_scs_globals().export_pit_file = prop[1] elif prop[0] == "ExportPicFile": _get_scs_globals().export_pic_file = prop[1] elif prop[0] == "ExportPipFile": _get_scs_globals().export_pip_file = prop[1] elif prop[0] == "SignExport": _get_scs_globals().sign_export = prop[1] elif section.type == "GlobalDisplay": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "DisplayLocators": _get_scs_globals().display_locators = prop[1] elif prop[0] == "LocatorSize": _get_scs_globals().locator_size = float(prop[1]) elif prop[0] == "LocatorEmptySize": _get_scs_globals().locator_empty_size = float(prop[1]) elif prop[0] == "DisplayConnections": _get_scs_globals().display_connections = prop[1] elif prop[0] == "CurveSegments": _get_scs_globals().curve_segments = prop[1] elif prop[0] == "OptimizedConnsDrawing": _get_scs_globals().optimized_connections_drawing = prop[1] elif prop[0] == "DisplayTextInfo": _get_scs_globals().display_info = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "GlobalColors": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "PrefabLocatorsWire": _get_scs_globals().locator_prefab_wire_color = prop[1] elif prop[0] == "ModelLocatorsWire": _get_scs_globals().locator_model_wire_color = prop[1] elif prop[0] == "ColliderLocatorsWire": _get_scs_globals().locator_coll_wire_color = prop[1] elif prop[0] == "ColliderLocatorsFace": _get_scs_globals().locator_coll_face_color = prop[1] elif prop[0] == "NavigationCurveBase": _get_scs_globals().np_connection_base_color = prop[1] elif prop[0] == "MapLineBase": _get_scs_globals().mp_connection_base_color = prop[1] elif prop[0] == "TriggerLineBase": _get_scs_globals().tp_connection_base_color = prop[1] elif prop[0] == "InfoText": _get_scs_globals().info_text_color = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "Various": for prop in section.props: # if prop[0] == "#": if prop[0] in ("", "#"): pass elif prop[0] == "DumpLevel": _get_scs_globals().dump_level = prop[1] elif section.type == "Header": for prop in section.props: if prop[0] == "FormatVersion": if prop[1] == 1: settings_file_valid += 1 elif prop[0] == "Type": if prop[1] == "Configuration": settings_file_valid += 1 elif prop[0] == "DumpLevel": _get_scs_globals().dump_level = prop[1] # now as last apply all of the file paths _get_scs_globals().scs_project_path = scs_project_path _get_scs_globals().shader_presets_filepath = shader_presets_filepath _get_scs_globals().trigger_actions_rel_path = trigger_actions_rel_path _get_scs_globals().sign_library_rel_path = sign_library_rel_path _get_scs_globals().tsem_library_rel_path = tsem_library_rel_path _get_scs_globals().traffic_rules_library_rel_path = traffic_rules_library_rel_path _get_scs_globals().hookup_library_rel_path = hookup_library_rel_path _get_scs_globals().matsubs_library_rel_path = matsubs_library_rel_path _get_scs_globals().conv_hlpr_converters_path = conv_hlpr_converters_path _get_scs_globals().config_update_lock = False return True
def apply_settings(): """Applies all the settings to the active scene.""" scs_globals = _get_scs_globals() # avoid recursion if another apply settings is running already if scs_globals.config_update_lock: return False # NOTE: save file paths in extra variables and apply them on the end # to make sure all of the settings are loaded first. # This is needed as some libraries reading are driven by other values from config file. # For example: "use_infixed" scs_project_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.scs_project_path, scs_globals) shader_presets_filepath = _property_utils.get_by_type(bpy.types.GlobalSCSProps.shader_presets_filepath, scs_globals) trigger_actions_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.trigger_actions_rel_path, scs_globals) sign_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.sign_library_rel_path, scs_globals) tsem_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.tsem_library_rel_path, scs_globals) traffic_rules_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.traffic_rules_library_rel_path, scs_globals) hookup_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.hookup_library_rel_path, scs_globals) matsubs_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.matsubs_library_rel_path, scs_globals) conv_hlpr_converters_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.conv_hlpr_converters_path, scs_globals) # NOTE: as dump level is written in same section as config type # applying it directly might take place before we get information about config type # so it has to be saved into variable and applied only if global settings are loaded from config file dump_level = scs_globals.dump_level scs_globals.config_update_lock = True config_container = _pix.get_data_from_file(get_config_filepath(), " ") # avoid applying process of config if not present (most probably permission problems on config creation) if config_container is not None: settings_file_valid = 0 for section in config_container: if settings_file_valid == 2: if section.type == "Paths": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ProjectPath": scs_project_path = prop[1] elif prop[0] == "ShaderPresetsFilePath": shader_presets_filepath = prop[1] elif prop[0] == "TriggerActionsRelFilePath": trigger_actions_rel_path = prop[1] elif prop[0] == "TriggerActionsUseInfixed": scs_globals.trigger_actions_use_infixed = prop[1] elif prop[0] == "SignRelFilePath": sign_library_rel_path = prop[1] elif prop[0] == "SignUseInfixed": scs_globals.sign_library_use_infixed = prop[1] elif prop[0] == "TSemProfileRelFilePath": tsem_library_rel_path = prop[1] elif prop[0] == "TSemProfileUseInfixed": scs_globals.tsem_library_use_infixed = prop[1] elif prop[0] == "TrafficRulesRelFilePath": traffic_rules_library_rel_path = prop[1] elif prop[0] == "TrafficRulesUseInfixed": scs_globals.traffic_rules_library_use_infixed = prop[1] elif prop[0] == "HookupRelDirPath": hookup_library_rel_path = prop[1] elif prop[0] == "MatSubsRelFilePath": matsubs_library_rel_path = prop[1] elif prop[0] == "ConvertersPath": conv_hlpr_converters_path = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "Import": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ImportScale": scs_globals.import_scale = float(prop[1]) elif prop[0] == "ImportPimFile": scs_globals.import_pim_file = prop[1] elif prop[0] == "UseWelding": scs_globals.use_welding = prop[1] elif prop[0] == "WeldingPrecision": scs_globals.welding_precision = prop[1] elif prop[0] == "UseNormals": scs_globals.use_normals = prop[1] elif prop[0] == "ImportPitFile": scs_globals.import_pit_file = prop[1] elif prop[0] == "LoadTextures": scs_globals.load_textures = prop[1] elif prop[0] == "ImportPicFile": scs_globals.import_pic_file = prop[1] elif prop[0] == "ImportPipFile": scs_globals.import_pip_file = prop[1] elif prop[0] == "ImportPisFile": scs_globals.import_pis_file = prop[1] elif prop[0] == "ConnectedBones": scs_globals.connected_bones = prop[1] elif prop[0] == "BoneImportScale": scs_globals.bone_import_scale = float(prop[1]) elif prop[0] == "ImportPiaFile": scs_globals.import_pia_file = prop[1] elif prop[0] == "IncludeSubdirsForPia": scs_globals.include_subdirs_for_pia = prop[1] elif section.type == "Export": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "ExportScale": scs_globals.export_scale = float(prop[1]) elif prop[0] == "ApplyModifiers": scs_globals.apply_modifiers = prop[1] elif prop[0] == "ExcludeEdgesplit": scs_globals.exclude_edgesplit = prop[1] elif prop[0] == "IncludeEdgesplit": scs_globals.include_edgesplit = prop[1] elif prop[0] == "ActiveUVOnly": scs_globals.active_uv_only = prop[1] elif prop[0] == "ExportVertexGroups": scs_globals.export_vertex_groups = prop[1] elif prop[0] == "ExportVertexColor": scs_globals.export_vertex_color = prop[1] elif prop[0] == "ExportVertexColorType": scs_globals.export_vertex_color_type = str(prop[1]) elif prop[0] == "ExportVertexColorType7": scs_globals.export_vertex_color_type_7 = str(prop[1]) elif prop[0] == "ExportPimFile": scs_globals.export_pim_file = prop[1] elif prop[0] == "OutputType": scs_globals.output_type = prop[1] elif prop[0] == "ExportPitFile": scs_globals.export_pit_file = prop[1] elif prop[0] == "ExportPicFile": scs_globals.export_pic_file = prop[1] elif prop[0] == "ExportPipFile": scs_globals.export_pip_file = prop[1] elif prop[0] == "SignExport": scs_globals.sign_export = prop[1] elif section.type == "GlobalDisplay": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "DisplayLocators": scs_globals.display_locators = prop[1] elif prop[0] == "LocatorSize": scs_globals.locator_size = float(prop[1]) elif prop[0] == "LocatorEmptySize": scs_globals.locator_empty_size = float(prop[1]) elif prop[0] == "DisplayConnections": scs_globals.display_connections = prop[1] elif prop[0] == "CurveSegments": scs_globals.curve_segments = prop[1] elif prop[0] == "OptimizedConnsDrawing": scs_globals.optimized_connections_drawing = prop[1] elif prop[0] == "DisplayTextInfo": scs_globals.display_info = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "GlobalColors": for prop in section.props: if prop[0] in ("", "#"): pass elif prop[0] == "PrefabLocatorsWire": scs_globals.locator_prefab_wire_color = prop[1] elif prop[0] == "ModelLocatorsWire": scs_globals.locator_model_wire_color = prop[1] elif prop[0] == "ColliderLocatorsWire": scs_globals.locator_coll_wire_color = prop[1] elif prop[0] == "ColliderLocatorsFace": scs_globals.locator_coll_face_color = prop[1] elif prop[0] == "NavigationCurveBase": scs_globals.np_connection_base_color = prop[1] elif prop[0] == "MapLineBase": scs_globals.mp_connection_base_color = prop[1] elif prop[0] == "TriggerLineBase": scs_globals.tp_connection_base_color = prop[1] elif prop[0] == "InfoText": scs_globals.info_text_color = prop[1] else: lprint('W Unrecognised item "%s" has been found in setting file! Skipping...', (str(prop[0]),)) elif section.type == "Header": for prop in section.props: if prop[0] == "FormatVersion": if prop[1] == 1: settings_file_valid += 1 elif prop[0] == "Type": if prop[1] == "Configuration": settings_file_valid += 1 elif prop[0] == "DumpLevel": dump_level = prop[1] elif prop[0] == "ConfigStoragePlace": scs_globals.config_storage_place = prop[1] # if settings are read directly from blend file, # release update lock and don't search/apply any settings further if prop[1] == "BlendFile": settings_file_valid += 1 scs_globals.dump_level = dump_level # now as last apply all of the file paths # NOTE: applying paths is crucial for libraries # (they are reloaded/initiated in property update functions) scs_globals.scs_project_path = scs_project_path scs_globals.shader_presets_filepath = shader_presets_filepath scs_globals.trigger_actions_rel_path = trigger_actions_rel_path scs_globals.sign_library_rel_path = sign_library_rel_path scs_globals.tsem_library_rel_path = tsem_library_rel_path scs_globals.traffic_rules_library_rel_path = traffic_rules_library_rel_path scs_globals.hookup_library_rel_path = hookup_library_rel_path scs_globals.matsubs_library_rel_path = matsubs_library_rel_path scs_globals.conv_hlpr_converters_path = conv_hlpr_converters_path scs_globals.config_update_lock = False return True
def export(root_object, used_parts, used_materials, scene, filepath): scs_globals = _get_scs_globals() output_type = scs_globals.output_type file_name = root_object.name print("\n************************************") print("** SCS PIT Exporter **") print("** (c)2014 SCS Software **") print("************************************\n") # DATA GATHERING look_list = [] variant_list = [] saved_active_look = root_object.scs_props.active_scs_look looks_inventory = root_object.scs_object_look_inventory looks_count = len(looks_inventory) if looks_count <= 0: looks_count = 1 for i in range(0, looks_count): # apply each look from inventory first if len(looks_inventory) > 0: root_object.scs_props.active_scs_look = i # actually write values to material because Blender might not refresh data yet _looks.apply_active_look(root_object) curr_look_name = looks_inventory[i].name else: # if no looks create default curr_look_name = "default" material_dict = {} material_list = [] # get materials data for material in used_materials: if material is not None: # if material in ("_empty_slot_", "_empty_material_"): # NOTE: only workaround until module doesn't gets rewritten if material in bpy.data.materials: material = bpy.data.materials[material] if isinstance(material, str): material_name = str(material + "-_default_settings_") # DEFAULT MATERIAL material_export_data = _default_material(material_name) material_list.append(material_name) else: # print('material name: %r' % material.name) material_name = material.name material_list.append(material) # SUBSTANCE if material.scs_props.substance != 'None': lprint( 'D material.name: %r\tmat.scs_props.substance: "%s"', (material.name, str(material.scs_props.substance))) # TODO: Substance Export... # MATERIAL EFFECT # shader_data = material.get("scs_shader_attributes", {}) # effect_name = shader_data.get('effect', "NO EFFECT") effect_name = material.scs_props.mat_effect_name # CgFX SHADERS # print("\n=== GET SHADER EXPORT DATA =======================") ## NOTE: The following code is OBSOLETE!!! # cgfx_export_data = None # print(" cgfx_export_data:\n%s" % str(cgfx_export_data)) # if cgfx_export_data: # print("\nAttributes:") # for attribute in cgfx_export_data['attributes']: # if cgfx_export_data['attributes'][attribute]: # print(" %s:" % str(attribute)) # for rec in cgfx_export_data['attributes'][attribute]: # print(" %s: %s" % (str(rec), str(cgfx_export_data['attributes'][attribute][rec]))) # else: # print("%s:\n %s" % (str(attribute), cgfx_export_data['attributes'][attribute])) # print("\nTextures:") # for attribute in cgfx_export_data['textures']: # if cgfx_export_data['textures'][attribute]: # print(" %s:" % str(attribute)) # for rec in cgfx_export_data['textures'][attribute]: # print(" %s: %s" % (str(rec), str(cgfx_export_data['textures'][attribute][rec]))) # else: # print("%s:\n %s" % (str(attribute), cgfx_export_data['textures'][attribute])) # else: # Print(1, 'E No CgFX data for material %r!' % material.name) # print("==================================================") # PRESET SHADERS preset_found = False alias = "NO SHADER" def_cnt = attribute_cnt = texture_cnt = 0 def_sections = [] attribute_sections = [] texture_sections = [] active_shader_preset_name = material.scs_props.active_shader_preset_name # print(' active_shader_preset_name: %r' % active_shader_preset_name) for preset_i, preset in enumerate( bpy.data.worlds[0].scs_shader_presets_inventory): # print(' preset[%i]: %r' % (preset_i, preset.name)) if preset.name == active_shader_preset_name: # print(' - material %r - %r' % (material.name, preset.name)) # LOAD PRESET shader_presets_abs_path = _path_utils.get_abs_path( scs_globals.shader_presets_filepath) # shader_presets_filepath = _get_scs_globals().shader_presets_filepath # print('shader_presets_filepath: %r' % shader_presets_filepath) # if shader_presets_filepath.startswith(str(os.sep + os.sep)): ## RELATIVE PATH # shader_presets_abs_path = get_abs_path(shader_presets_filepath) # else: # shader_presets_abs_path = shader_presets_filepath if os.path.isfile(shader_presets_abs_path): presets_container = _pix_container.get_data_from_file( shader_presets_abs_path, ' ') # FIND THE PRESET IN FILE if presets_container: for section in presets_container: if section.type == "Shader": section_properties = _get_properties( section) if 'PresetName' in section_properties: preset_name = section_properties[ 'PresetName'] if preset_name == active_shader_preset_name: alias = material.name # print(' + preset name: %r' % preset_name) # COLLECT ATTRIBUTES AND TEXTURES for item in section.sections: # DATA EXCHANGE FORMAT ATRIBUTE if item.type == "DataExchangeFormat": def_data = _SectionData( "DataExchangeFormat" ) for rec in item.props: def_data.props.append( (rec[0], rec[1])) def_sections.append( def_data) def_cnt += 1 # ATTRIBUTES if item.type == "Attribute": # print(' Attribute:') attribute_data = _SectionData( "Attribute") for rec in item.props: # print(' rec: %r' % str(rec)) if rec[0] == "Format": attribute_data.props.append( (rec[ 0], rec[1] )) elif rec[ 0] == "Tag": # tag_prop = rec[1].replace("[", "").replace("]", "") # attribute_data.props.append((rec[0], tag_prop)) attribute_data.props.append( (rec[ 0], rec[1] )) elif rec[ 0] == "Value": format_prop = item.get_prop( "Format" )[1] tag_prop = item.get_prop( "Tag" )[1] tag_prop = tag_prop.replace( "[", "" ).replace( "]", "") # print(' format_prop: %r' % str(format_prop)) # print(' tag_prop: %r' % str(tag_prop)) if "aux" in tag_prop: aux_props = getattr( material . scs_props, "shader_attribute_" + tag_prop ) value = [] for aux_prop in aux_props: value.append( aux_prop . value ) else: value = getattr( material . scs_props, "shader_attribute_" + tag_prop, "NO TAG" ) # print(' value: %s' % str(value)) if format_prop == 'FLOAT': attribute_data.props.append( (rec[ 0], [ "&&", (value, ) ] )) else: attribute_data.props.append( (rec[ 0], [ "i", tuple( value ) ] )) attribute_sections.append( attribute_data) attribute_cnt += 1 # TEXTURES elif item.type == "Texture": # print(' Texture:') texture_data = _SectionData( "Texture") for rec in item.props: # print(' rec: %r' % str(rec)) if rec[0] == "Tag": tag_prop = rec[ 1].split( ":" )[1] tag = str( "texture[" + str(texture_cnt ) + "]:" + tag_prop ) texture_data.props.append( (rec[ 0], tag)) elif rec[ 0] == "Value": tag_prop = item.get_prop( "Tag" )[1].split( ":")[1] # print(' tag_prop: %r' % str(tag_prop)) # create and get path to tobj tobj_rel_path = _get_texture_path_from_material( material, tag_prop, os. path. dirname( filepath )) texture_data.props.append( (rec[ 0], tobj_rel_path )) texture_sections.append( texture_data) texture_cnt += 1 preset_found = True break else: lprint('\nW The file path "%s" is not valid!', (shader_presets_abs_path, )) if preset_found: break if preset_found: material_export_data = _SectionData("Material") material_export_data.props.append(("Alias", alias)) material_export_data.props.append( ("Effect", effect_name)) material_export_data.props.append(("Flags", 0)) if output_type.startswith('def'): material_export_data.props.append( ("DataExchangeFormatCount", def_cnt)) material_export_data.props.append( ("AttributeCount", attribute_cnt)) material_export_data.props.append( ("TextureCount", texture_cnt)) if output_type.startswith('def'): for def_section in def_sections: material_export_data.sections.append( def_section) for attribute in attribute_sections: material_export_data.sections.append(attribute) for texture in texture_sections: material_export_data.sections.append(texture) elif active_shader_preset_name == "<imported>": material_attributes = material[ 'scs_shader_attributes']['attributes'].to_dict( ).values() material_textures = material['scs_shader_attributes'][ 'textures'].to_dict().values() material_export_data = _SectionData("Material") material_export_data.props.append( ("Alias", material.name)) material_export_data.props.append( ("Effect", effect_name)) material_export_data.props.append(("Flags", 0)) material_export_data.props.append( ("AttributeCount", len(material_attributes))) material_export_data.props.append( ("TextureCount", len(material_textures))) for attribute_dict in material_attributes: attribute_section = _SectionData("Attribute") format_value = "" for attr_prop in sorted(attribute_dict.keys()): # get the format of current attribute (we assume that "Format" attribute is before "Value" attribute in this for loop) if attr_prop == "Format": format_value = attribute_dict[attr_prop] if attr_prop == "Value" and ( "FLOAT" in format_value or "STRING" in format_value): attribute_section.props.append( (attr_prop, [ "i", tuple(attribute_dict[attr_prop]) ])) elif attr_prop == "Tag" and "aux" in attribute_dict[ attr_prop]: attribute_section.props.append( (attr_prop, "aux[" + attribute_dict[attr_prop][3:] + "]")) else: attribute_section.props.append( (attr_prop, attribute_dict[attr_prop])) material_export_data.sections.append( attribute_section) for texture_dict in material_textures: texture_section = _SectionData("Texture") tag_id_string = "" for tex_prop in sorted(texture_dict.keys()): if tex_prop == "Tag": tag_id_string = texture_dict[ tex_prop].split(':')[1] if tex_prop == "Value" and tag_id_string != "": tobj_rel_path = _get_texture_path_from_material( material, tag_id_string, os.path.dirname(filepath)) texture_section.props.append( (tex_prop, tobj_rel_path)) else: texture_section.props.append( (tex_prop, texture_dict[tex_prop])) material_export_data.sections.append( texture_section) else: # DEFAULT MATERIAL material_name = str("_" + material_name + "_-_default_settings_") material_export_data = _default_material(material_name) material_dict[material_name] = material_export_data # create materials sections for looks material_sections = _fill_material_sections(material_list, material_dict) look_data = { "name": curr_look_name, "material_sections": material_sections } look_list.append(look_data) # restore look applied before export root_object.scs_props.active_scs_look = saved_active_look # PARTS AND VARIANTS... part_list_cnt = len(used_parts.keys()) if len(root_object.scs_object_variant_inventory) == 0: # If there is no Variant, add the Default one... part_list = _fill_part_list(root_object.scs_object_part_inventory, used_parts, all_parts=True) variant_list.append((_VARIANT_consts.default_name, part_list), ) else: for variant in root_object.scs_object_variant_inventory: part_list = _fill_part_list(variant.parts, used_parts) variant_list.append((variant.name, part_list), ) # DATA CREATION header_section = _fill_header_section(file_name, scs_globals.sign_export) look_section = _fill_look_sections(look_list) # part_sections = fill_part_section(part_list) variant_section = _fill_variant_sections(variant_list) comment_header_section = _fill_comment_header_section( look_list, variant_list) global_section = _fill_global_section(len(look_list), len(variant_list), part_list_cnt, len(used_materials)) # DATA ASSEMBLING pit_container = [comment_header_section, header_section, global_section] for section in look_section: pit_container.append(section) for section in variant_section: pit_container.append(section) # FILE EXPORT ind = " " pit_filepath = str(filepath + ".pit") result = _pix_container.write_data_to_file(pit_container, pit_filepath, ind) # print("************************************") return result
def load(filepath): scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIC Importer **") print("** (c)2014 SCS Software **") print("************************************\n") ind = ' ' pic_container = _pix_container.get_data_from_file(filepath, ind) # TEST PRINTOUTS # ind = ' ' # for section in pic_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pic_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pic_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed format_version, source, f_type, f_name, source_filename, author = _get_header(pic_container) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed vertex_count, triangle_count, material_count, piece_count, part_count, locator_count = _get_global(pic_container) ''' # LOAD MATERIALS if 0: # NOTE: NO MATERIALS USED FOR COLLIDERS AT A MOMENT! loaded_materials = [] for section in pic_container: if section.type == 'Material': material_alias, material_effect = _get_material(section) lprint('I Adding a Material Alias: "%s"', material_alias) loaded_materials.append(material_alias) # PRINT "MATERIAL SETTINGS" TO CONSOLE... if 0: import pprint pp = pprint.PrettyPrinter(indent=1) print("=== MATERIAL SETTINGS ==========================") pp.pprint(material_effect) print("==================================================") # LOAD PARTS parts = [] for section in pic_container: if section.type == "Part": (name, pieces, locators) = _get_part(section) parts.append({"name": name, "pieces": pieces, "locators": locators}) # LOAD (CONVEX) PIECES pieces = [] for section in pic_container: if section.type == 'Piece': pieces.append(_get_piece(section)) # LOAD AND CREATE LOCATORS import_scale = scs_globals.import_scale locators = [] for section in pic_container: if section.type == 'Locator': (locator_name, locator_index, locator_position, locator_rotation, locator_alias, locator_weight, locator_type, locator_parameters, locator_convex_piece) = _get_locator(section) lprint('I Adding a Locator: "%s"', locator_name) locator = _object_utils.create_locator_empty(locator_name, locator_position, locator_rotation, (1, 1, 1), 1.0, 'Collision') locator.scs_props.scs_part = _get_locator_part(parts, locator_index) locator.scs_props.locator_collider_centered = True locator.scs_props.locator_collider_mass = locator_weight locator.scs_props.locator_collider_type = locator_type if locator_type == 'Box': locator.scs_props.locator_collider_box_x = locator_parameters[0] * import_scale locator.scs_props.locator_collider_box_y = locator_parameters[2] * import_scale locator.scs_props.locator_collider_box_z = locator_parameters[1] * import_scale elif locator_type in ('Sphere', 'Capsule', 'Cylinder'): locator.scs_props.locator_collider_dia = locator_parameters[0] * 2 * import_scale locator.scs_props.locator_collider_len = locator_parameters[1] * import_scale elif locator_type == 'Convex': piece_index, piece_material, verts, faces = pieces[locator_convex_piece] if verts and faces: # BOUNDING BOX DATA CREATION AND SPACE CONVERSION min_val = [None, None, None] max_val = [None, None, None] scs_verts = [] for vert in verts: scs_vert = _convert_utils.change_to_scs_xyz_coordinates(vert, import_scale) scs_verts.append(scs_vert) min_val, max_val = _math_utils.evaluate_minmax(scs_vert, min_val, max_val) bbox, bbcenter = _math_utils.get_bb(min_val, max_val) # FACE FLIPPING flipped_faces = _mesh_utils.flip_faceverts(faces) # COLLIDER CREATION geom_data = (scs_verts, flipped_faces, bbox, bbcenter) _object_utils.add_collider_convex_locator(geom_data, {}, locator) locators.append(locator) # DATA BUILDING # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') print("************************************") return {'FINISHED'}, locators
def load(filepath, armature, get_only=False): scs_globals = _get_scs_globals() import_scale = scs_globals.import_scale bone_import_scale = scs_globals.import_bone_scale connected_bones = scs_globals.import_connected_bones print("\n************************************") print("** SCS PIS Importer **") print("** (c)2014 SCS Software **") print("************************************\n") # scene = context.scene ind = ' ' pis_container = _pix_container.get_data_from_file(filepath, ind) # TEST PRINTOUTS # ind = ' ' # for section in pis_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pis_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pis_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed format_version, source, f_type, f_name, source_filename, author = _get_header(pis_container) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed # bone_count = _get_global(pis_container) ''' # LOAD BONES bones = _get_bones(pis_container) if get_only: # only return bones (used when importing PIA from panel) return bones # PROVIDE AN ARMATURE if not armature: lprint('\nE No Armature for file "%s"!', (os.path.basename(filepath), )) return {'CANCELLED'}, None bpy.context.view_layer.objects.active = armature bpy.ops.object.mode_set(mode='EDIT') # CONNECTED BONES - Add information about all children... if connected_bones: for bone in bones: # print(' bone: %r - %r\n%s\n' % (bone, bones[bone][0], str(bones[bone][1]))) children = [] for item in bones: if bone == bones[item][0]: children.append(item) bones[bone].append(children) # print(' bone: %r - %r\n%s\n' % (bone, bones[bone][0], str(bones[bone][2]))) for bone_i, bone in enumerate(armature.data.bones): # print('----- bone: %r ------------------------------' % bone.name) # SET PARENT if bones[bone.name][0] != "": # if bone has parent... # print(' %r --> %r' % (bone.name, bones[bone.name][0])) # armature.data.edit_bones[bone.name].use_connect = False armature.data.edit_bones[ bone.name].parent = armature.data.edit_bones[bones[bone.name] [0]] # else: # print(' %r - NO parent' % bone.name) # COMPUTE BONE TRANSFORMATION matrix = bones[bone.name][1] bone_matrix = _convert_utils.scs_to_blend_matrix() @ matrix.transposed( ) axis, angle = _convert_utils.mat3_to_vec_roll(bone_matrix) # print(' * %r - angle: %s' % (bone.name, angle)) # SET BONE TRANSFORMATION armature.data.edit_bones[bone.name].head = bone_matrix.to_translation( ).to_3d() * import_scale armature.data.edit_bones[bone.name].tail = ( armature.data.edit_bones[bone.name].head + Vector(axis).normalized() * bone_import_scale * import_scale) armature.data.edit_bones[bone.name].roll = angle # save initial bone scaling to use it in calculation when importing PIA animations # NOTE: bones after import always have scale of 1: # 1. because edit bones don't have scale, just tail and head # 2. because any scaling in pose bones will be overwritten by animation itself armature.pose.bones[bone.name][ _BONE_consts.init_scale_key] = bone_matrix.to_scale() # CONNECTED BONES # NOTE: Doesn't work as expected! Disabled for now in UI. # Child bones gets position offset and there is also a problem when translation # is animated, for which connected bones doesn't allow. if connected_bones: if len(bones[bone.name][2]) == 1: matrix = bones[bones[bone.name][2][0]][1] bone_matrix = _convert_utils.scs_to_blend_matrix( ) @ matrix.transposed() armature.data.edit_bones[ bone.name].tail = bone_matrix.to_translation().to_3d( ) * import_scale armature.data.edit_bones[bones[bone.name][2] [0]].use_connect = True bpy.ops.object.mode_set(mode='OBJECT') armature.data.show_axes = True armature.display_type = 'WIRE' # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') print("************************************") return bones
def load(root_object, pia_files, armature, pis_filepath=None, bones=None): scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIA Importer **") print("** (c)2014 SCS Software **") print("************************************\n") import_scale = scs_globals.import_scale ind = ' ' imported_count = 0 for pia_filepath in pia_files: # Check if PIA file is for the actual skeleton... if pis_filepath and bones: skeleton_match = _pix_container.fast_check_for_pia_skeleton(pia_filepath, pis_filepath) else: skeleton_match, pia_skeleton = _pix_container.utter_check_for_pia_skeleton(pia_filepath, armature) if skeleton_match: path = os.path.split(pia_filepath)[0] pia_skeleton = os.path.join(path, pia_skeleton) if os.path.isfile(pia_skeleton): bones = _pis.load(pia_skeleton, armature, get_only=True) else: lprint("\nE The filepath %r doesn't exist!", (pia_skeleton.replace("\\", "/"),)) else: lprint(str("E Animation doesn't match the skeleton. Animation won't be loaded!\n\t " "Animation file: %r"), (pia_filepath,)) if skeleton_match: lprint('I ++ "%s" IMPORTING animation data...', (os.path.basename(pia_filepath),)) pia_container = _pix_container.get_data_from_file(pia_filepath, ind) if not pia_container: lprint('\nE File "%s" is empty!', (pia_filepath.replace("\\", "/"),)) continue # TEST PRINTOUTS # ind = ' ' # for section in pia_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pia_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(pia_filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pia_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER format_version, source, f_type, animation_name, source_filename, author = _get_header(pia_container) if format_version != 3 or f_type != "Animation": continue # LOAD GLOBALS skeleton, total_time, bone_channel_count, custom_channel_count = _get_globals(pia_container) # CREATE ANIMATION ACTIONS anim_action = bpy.data.actions.new(animation_name + "_action") anim_action.use_fake_user = True anim_data = armature.animation_data if armature.animation_data else armature.animation_data_create() anim_data.action = anim_action # LOAD BONE CHANNELS bone_channels = _get_anim_channels(pia_container, section_name="BoneChannel") if len(bone_channels) > 0: for bone_name in bone_channels: if bone_name in armature.data.bones: ''' NOTE: skipped for now as no data needs to be readed stream_count = bone_channels[bone_name][0] keyframe_count = bone_channels[bone_name][1] ''' streams = bone_channels[bone_name][2] # CREATE ANIMATION GROUP anim_group = anim_action.groups.new(bone_name) armature.pose.bones[bone_name].rotation_mode = 'XYZ' # Set rotation mode. # use pose bone scale set on PIS import init_scale = Vector((1, 1, 1)) if _BONE_consts.init_scale_key in armature.pose.bones[bone_name]: init_scale = armature.pose.bones[bone_name][_BONE_consts.init_scale_key] # CREATE FCURVES (pos_fcurves, rot_fcurves, sca_fcurves) = _create_fcurves(anim_action, anim_group, str('pose.bones["' + bone_name + '"]'), rot_euler=True) # GET BONE REST POSITION MATRIX bone_rest_matrix_scs = bones[bone_name][1].transposed() parent_bone_name = bones[bone_name][0] if parent_bone_name in bones: parent_bone_rest_matrix_scs = bones[parent_bone_name][1].transposed() else: parent_bone_rest_matrix_scs = Matrix() parent_bone_rest_matrix_scs.identity() for key_time_i, key_time in enumerate(streams[0]): keyframe = key_time_i + 1 # GET BONE ANIMATION MATRIX bone_animation_matrix_scs = streams[1][key_time_i].transposed() # CREATE DELTA MATRIX delta_matrix = _get_delta_matrix(bone_rest_matrix_scs, parent_bone_rest_matrix_scs, bone_animation_matrix_scs, import_scale) # DECOMPOSE ANIMATION MATRIX location, rotation, scale = delta_matrix.decompose() # CALCULATE CURRENT SCALE - subtract difference between initial bone scale and current scale from 1 # NOTE: if imported PIS had initial bone scale different than 1, # initial scale was saved into pose bones custom properties and # has to be used here as bones after import in Blender always have scale of 1 scale = Vector((1 + scale[0] - init_scale[0], 1 + scale[1] - init_scale[1], 1 + scale[2] - init_scale[2])) # NOTE: this scaling rotation switch came from UK variants which had scale -1 loc, rot, sca = bone_rest_matrix_scs.decompose() if sca.y < 0: rotation.y *= -1 if sca.z < 0: rotation.z *= -1 rotation = rotation.to_euler('XYZ') # BUILD TRANSFORMATION CURVES for i in range(0, 3): pos_fcurves[i].keyframe_points.insert(frame=float(keyframe), value=location[i], options={'FAST'}) rot_fcurves[i].keyframe_points.insert(frame=float(keyframe), value=rotation[i], options={'FAST'}) sca_fcurves[i].keyframe_points.insert(frame=float(keyframe), value=scale[i], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES color_mode = 'AUTO_RAINBOW' # Or better 'AUTO_RGB'? for curve in pos_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in sca_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: _animation_utils.apply_euler_filter(curve) # LOAD CUSTOM CHANNELS (ARMATURE OFFSET ANIMATION) custom_channels = _get_anim_channels(pia_container, section_name="CustomChannel") if len(custom_channels) > 0: for channel_name in custom_channels: # print(' >>> channel %r - %s' % (channel_name, str(custom_channels[channel_name]))) if channel_name == 'Prism Movement': ''' NOTE: skipped for now as no data needs to be readed stream_count = custom_channels[channel_name][0] keyframe_count = custom_channels[channel_name][1] ''' streams = custom_channels[channel_name][2] # print(' channel %r - streams %s - keyframes %s' % (channel_name, stream_count, keyframe_count)) # CREATE ANIMATION GROUP # anim_group = anim_action.groups.new(channel_name) anim_group = anim_action.groups.new('Location') # armature.[channel_name].rotation_mode = 'XYZ' ## Set rotation mode. # active_bone = armature.data.bones[channel_name] # parent_bone = active_bone.parent # CREATE FCURVES # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, rot_euler=True, # types='LocRotSca') # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, types='Loc') fcurve_pos_x = anim_action.fcurves.new('location', 0) fcurve_pos_y = anim_action.fcurves.new('location', 1) fcurve_pos_z = anim_action.fcurves.new('location', 2) fcurve_pos_x.group = anim_group fcurve_pos_y.group = anim_group fcurve_pos_z.group = anim_group pos_fcurves = (fcurve_pos_x, fcurve_pos_y, fcurve_pos_z) location = None for key_time_i, key_time in enumerate(streams[0]): # print(' key_time: %s' % str(key_time[0])) # keyframe = key_time_i * (key_time[0] * 10) ## TODO: Do proper timing... keyframe = key_time_i + 1 scs_offset = _convert_utils.change_to_scs_xyz_coordinates(custom_channels[channel_name][2][1][key_time_i], import_scale) offset = Vector(scs_offset) if location is None: location = offset else: location = location + offset # print(' > location: %s' % str(location)) # BUILD TRANSLATION CURVES pos_fcurves[0].keyframe_points.insert(frame=float(keyframe), value=location[0], options={'FAST'}) pos_fcurves[1].keyframe_points.insert(frame=float(keyframe), value=location[1], options={'FAST'}) pos_fcurves[2].keyframe_points.insert(frame=float(keyframe), value=location[2], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES for curve in pos_fcurves: for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' else: lprint('W Unknown channel %r in "%s" file.', (channel_name, os.path.basename(pia_filepath))) # CREATE SCS ANIMATION animation = _animation_utils.add_animation_to_root(root_object, animation_name) animation.export = True animation.action = anim_action.name animation.anim_start = anim_action.frame_range[0] animation.anim_end = anim_action.frame_range[1] if total_time: animation.length = total_time # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') imported_count += 1 else: lprint('I "%s" file REJECTED', (os.path.basename(pia_filepath),)) # at the end of batch import make sure to select last animation always if imported_count > 0: root_object.scs_props.active_scs_animation = len(root_object.scs_object_animation_inventory) - 1 print("************************************") return imported_count
def load_pim_file(context, filepath, terrain_points_trans=None, preview_model=False): """Loads the actual PIM file type. This is used also for loading of 'Preview Models'. :param filepath: File path to be imported :type filepath: str :param preview_model: Load geomety as Preview Model :type preview_model: bool :param terrain_points_trans: transitional structure with filled terrain points from PIP; or None :type terrain_points_trans: io_scs_tools.imp.transition_structs.terrain_points.TerrainPntsTrans | None :return: ({'FINISHED'}, objects, skinned_objects, locators, armature, skeleton) or preview model object :rtype: tuple | bpy.types.Object """ # create empty terrain points transitional structure if none is passed if terrain_points_trans is None: terrain_points_trans = TerrainPntsTrans() scs_globals = _get_scs_globals() ind = ' ' pim_container = _pix_container.get_data_from_file(filepath, ind) # LOAD HEADER format_version, source, f_type, f_name, source_filename, author = get_header( pim_container) if format_version not in (1, ): lprint( '\nE Unknown PIM.EF file version! Version %r is not currently supported by PIM.EF importer.', format_version) return {'CANCELLED'}, None, None, [], None, None # LOAD GLOBALS (vertex_count, face_count, edge_count, material_count, piece_count, part_count, bone_count, locator_count, skeleton, piece_skin_count) = get_global(pim_container) # DATA LOADING materials_data = {} objects_data = {} parts_data = {} locators_data = {} bones = {} skin_streams = [] piece_skin_data = {} for section in pim_container: if section.type == 'Material': if scs_globals.import_pim_file: material_i, materials_alias, materials_effect = get_material_properties( section) # print('\nmaterials_alias: %r' % materials_alias) # print(' materials_effect: %s' % materials_effect) # suport legacy format without index if not material_i: material_i = len(materials_data.keys()) materials_data[material_i] = [ materials_alias, materials_effect, ] elif section.type == 'Piece': if scs_globals.import_pim_file: ob_index, ob_material, ob_vertex_cnt, ob_edge_cnt, ob_face_cnt, ob_stream_cnt = get_piece_properties( section) piece_name = 'piece_' + str(ob_index) (mesh_vertices, mesh_normals, mesh_tangents, mesh_rgb, mesh_rgba, mesh_scalars, mesh_uv, mesh_uv_aliases, mesh_tuv, mesh_faces, mesh_face_materials, mesh_edges) = _get_piece_streams(section) objects_data[ob_index] = ( context, piece_name, mesh_vertices, mesh_normals, mesh_tangents, mesh_rgb, mesh_rgba, mesh_scalars, mesh_uv, mesh_uv_aliases, mesh_tuv, mesh_faces, mesh_face_materials, mesh_edges, ) # print('piece_name: %s' % piece_name) # print('ob_material: %s' % ob_material) # print('mesh_vertices: %s' % mesh_vertices) # print('mesh_rgba 1: %s' % str(mesh_rgba)) # print('mesh_uv count: %s' % len(mesh_uv)) # print('mesh_triangles: %s' % mesh_triangles) # print('mesh_faces: %s' % mesh_faces) # print('mesh_face_materials: %s' % mesh_face_materials) # print('mesh_edges: %s' % mesh_edges) # print('piece_count: %s' % str(piece_count)) piece_count -= 1 elif section.type == 'Part': if scs_globals.import_pim_file: part_name, part_piece_count, part_locator_count, part_pieces, part_locators = get_part_properties( section) # print('\npart_name: %r' % part_name) # print(' part_piece_count: %i' % part_piece_count) # print(' part_locator_count: %i' % part_locator_count) # print(' part_pieces: %s' % str(part_pieces)) # print(' part_locators: %s' % str(part_locators)) if part_pieces is not None and isinstance(part_pieces, int): part_pieces = [part_pieces] if part_locators is not None and isinstance( part_locators, int): part_locators = [part_locators] parts_data[part_name] = ( part_pieces, part_locators, ) elif section.type == 'Locator': if scs_globals.import_pim_file: loc_index, loc_name, loc_hookup, loc_position, loc_rotation, loc_scale = get_locator_properties( section) # print('\nloc_index: %r' % loc_index) # print(' loc_name: %s' % loc_name) # if loc_hookup: # print(' loc_hookup: %s' % loc_hookup) # print(' loc_position: %s' % loc_position) # print(' loc_rotation: %s' % loc_rotation) # print(' loc_scale: %s' % str(loc_scale)) locators_data[loc_index] = ( loc_name, loc_hookup, loc_position, loc_rotation, loc_scale, ) # BONES elif section.type == 'Bones': if scs_globals.import_pis_file: bones = get_bones_properties(section, scs_globals.import_pis_file) # print('\nbones: %r' % str(bones)) # SKINNING elif section.type == 'Skin': # Always only one skin in current SCS game implementation. if scs_globals.import_pim_file and scs_globals.import_pis_file: skin_stream_cnt, skin_streams = get_skin_properties(section) # print('\nskin_stream_cnt: %r' % skin_stream_cnt) # print('skin_data: %r\n' % str(skin_data)) elif section.type == "PieceSkin": if scs_globals.import_pim_file and scs_globals.import_pis_file: skin_piece_idx, skin_stream_cnt, skin_piece_streams = get_piece_skin_properties( section) piece_skin_data[skin_piece_idx] = skin_piece_streams piece_skin_count -= 1 # CREATE MATERIALS if scs_globals.import_pim_file and not preview_model: lprint('\nI MATERIALS:') for mat_i in materials_data: mat = bpy.data.materials.new(materials_data[mat_i][0]) mat.scs_props.mat_effect_name = materials_data[mat_i][1] materials_data[mat_i].append(materials_data[mat_i][0]) materials_data[mat_i][0] = mat.name lprint('I Created Material "%s"...', mat.name) # PREPARE VERTEX GROUPS FOR SKINNING object_skinning = {} if scs_globals.import_pim_file and scs_globals.import_pis_file and bones: if skin_streams: # global skinning section for skin_stream in skin_streams: for stream_i, stream in enumerate(skin_stream): for data in stream[ 5]: # index 5 is data block, see _get_skin_stream # print(' ORIGIN - data: %s' % str(data)) for rec in data['clones']: obj = objects_data[rec[0]][1] # piece name if obj not in object_skinning: object_skinning[obj] = {} vertex = rec[1] for weight in data['weights']: vg = bones[weight[0]] if vg not in object_skinning[obj]: object_skinning[obj][vg] = {} vw = weight[1] object_skinning[obj][vg][vertex] = vw elif piece_skin_data: # or skinning per piece for piece_idx, piece_skin_streams in piece_skin_data.items(): obj = objects_data[piece_idx][1] # piece name for skin_stream in piece_skin_streams: for stream_i, stream in enumerate(skin_stream): for data in stream[ 5]: # index 5 is data block, see _get_skin_stream # print(' ORIGIN - data: %s' % str(data)) for vertex_idx in data['vertex_indices']: if obj not in object_skinning: object_skinning[obj] = {} for weight in data['weights']: vg = bones[weight[0]] if vg not in object_skinning[obj]: object_skinning[obj][vg] = {} vw = weight[1] object_skinning[obj][vg][vertex_idx] = vw # CREATE OBJECTS lprint('\nI OBJECTS:') objects = [] skinned_objects = [] for obj_i in objects_data: # print('objects_data[obj_i]: %s' % str(objects_data[obj_i])) obj = _create_piece( objects_data[obj_i][0], # context objects_data[obj_i][1], # piece_name objects_data[obj_i][2], # mesh_vertices objects_data[obj_i][3], # mesh_normals objects_data[obj_i][4], # mesh_tangents objects_data[obj_i][5], # mesh_rgb objects_data[obj_i][6], # mesh_rgba objects_data[obj_i][7], # mesh_scalars object_skinning, objects_data[obj_i][8], # mesh_uv objects_data[obj_i][9], # mesh_uv_aliases objects_data[obj_i][10], # mesh_tuv objects_data[obj_i][11], # mesh_faces objects_data[obj_i][12], # mesh_face_materials objects_data[obj_i][13], # mesh_edges terrain_points_trans, materials_data, ) piece_name = objects_data[obj_i][1] if obj: # make sure that objects are using Z depth calculation # comes handy when we have any kind of transparent materials. # Moreover as this property doesn't do any change on # none transparent materials we can easily set this to all imported objects obj.show_transparent = True if piece_name in object_skinning: skinned_objects.append(obj) else: objects.append(obj) lprint('I Created Object "%s"...', (obj.name, )) # PARTS for part in parts_data: # print('parts_data["%s"]: %s' % (str(part), str(parts_data[part]))) if parts_data[part][0] is not None: if obj_i in parts_data[part][0]: # print(' obj_i: %s - part: %s - parts_data[part][0]: %s' % (obj_i, part, parts_data[part][0])) obj.scs_props.scs_part = part.lower() else: lprint('E "%s" - Object creation FAILED!', piece_name) if preview_model: bases = [] # get the bases of newly created objects for override for base in bpy.context.scene.object_bases: if base.object in objects: bases.append(base) override = { 'window': bpy.context.window, 'screen': bpy.context.screen, 'blend_data': bpy.context.blend_data, 'scene': bpy.context.scene, 'region': None, 'area': None, 'active_object': objects[0], 'selected_editable_bases': bases } bpy.ops.object.join(override) return objects[0] # CREATE MODEL LOCATORS locators = [] if scs_globals.import_pim_file and not preview_model: lprint('\nI MODEL LOCATORS:') for loc_i in locators_data: # print('locators_data[loc_i]: %s' % str(locators_data[loc_i])) loc = _object_utils.create_locator_empty( locators_data[loc_i][0], # loc_name locators_data[loc_i][2], # loc_position locators_data[loc_i][3], # loc_rotation locators_data[loc_i][4], # loc_scale 1.0, # loc_size 'Model', # loc_type locators_data[loc_i][1], # loc_hookup ) # loc = create_locator( # locators_data[loc_i][0], # loc_name # locators_data[loc_i][1], # loc_hookup # locators_data[loc_i][2], # loc_position # locators_data[loc_i][3], # loc_rotation # locators_data[loc_i][4], # loc_scale # 'Model', ## loc_type # ) locator_name = locators_data[loc_i][0] if loc: lprint('I Created Locator "%s"...', locator_name) locators.append(loc) for part in parts_data: # print('parts_data[part]: %s' % str(parts_data[part])) if parts_data[part][1] is not None: if loc_i in parts_data[part][1]: # print(' loc_i: %s - part: %s - parts_data[part][1]: %s' % (loc_i, part, parts_data[part][1])) loc.scs_props.scs_part = part.lower() else: lprint('E "%s" - Locator creation FAILED!', locator_name) # CREATE SKELETON (ARMATURE) armature = None if scs_globals.import_pis_file and bones: bpy.ops.object.add(type='ARMATURE', enter_editmode=False) bpy.ops.object.editmode_toggle() for bone in bones: bpy.ops.armature.bone_primitive_add(name=bone) bpy.ops.object.editmode_toggle() # bpy.context.object.data.show_names = True armature = bpy.context.object # ADD ARMATURE MODIFIERS TO SKINNED OBJECTS for obj in skinned_objects: # print('...adding Armature modifier to %r...' % str(obj.name)) bpy.context.view_layer.objects.active = obj bpy.ops.object.modifier_add(type='ARMATURE') arm_modifier = None for modifier in obj.modifiers: if modifier.type == 'ARMATURE': arm_modifier = modifier break if arm_modifier: arm_modifier.object = armature obj.parent = armature # WARNING PRINTOUTS if piece_count < 0: lprint('W More Pieces found than were declared!') if piece_count > 0: lprint('W Some Pieces not found, but were declared!') if piece_skin_count > 0: lprint("W More PieceSkins found than were declared!") if piece_skin_count < 0: lprint("W Some PieceSkins not found, but were declared!") return {'FINISHED' }, objects, locators, armature, skeleton, materials_data.values()
def load(filepath, terrain_points_trans): """Loads given PIP file. :param filepath: complete filepath to PIP file :type filepath: str :param terrain_points_trans: terrain points transitional structure where terrain points shall be saved :type terrain_points_trans: io_scs_tools.imp.transition_structs.terrain_points.TerrainPntsTrans :return: set of operator result and list of created locators :rtype: tuple[set, list[bpy.types.Objects]] """ scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIP Importer **") print("** (c)2014 SCS Software **") print("************************************\n") # from bpy_extras.image_utils import load_image # UNUSED # scene = context.scene ind = ' ' pip_container = _pix_container.get_data_from_file(filepath, ind) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed (format_version, source, f_type, f_name, source_filename, author) = _get_header(pip_container) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed (node_count, terrain_point_count, nav_curve_count, sign_count, spawn_point_count, traffic_light_count, map_point_count, trigger_point_count, intersection_count) = _get_global(pip_container) ''' # DATA BUILDING nodes_data = {} # terrain_points_data = {} signs_data = {} spawn_points_data = {} traffic_lights_data = {} nav_curves_data = {} map_points_data = [] trigger_points_data = [] locators = [] # node_index = 0 sign_index = 0 spawn_index = 0 tsem_index = 0 map_index = 0 trp_index = 0 for section in pip_container: if section.type == 'Node': ( node_name, node_index, node_position, node_direction, node_input_lanes, node_output_lanes, tp_positions, tp_normals, tp_variants, ) = _get_node_properties(section) if node_name is None: node_name = str('Node_Locator_' + str(node_index)) else: node_name = _name_utils.get_unique(node_name, nodes_data.keys()) node_direction = _curve_utils.set_direction(node_direction) nodes_data[node_name] = (node_index, node_position, node_direction, node_input_lanes, node_output_lanes, tp_positions, tp_normals, tp_variants) elif section.type == 'Sign': (sign_name, sign_position, sign_rotation, sign_model, sign_part) = _get_sign_properties(section) if sign_name is None: sign_name = str('Sign_Locator_' + str(sign_index)) else: sign_name = _name_utils.get_unique(sign_name, signs_data.keys()) signs_data[sign_name] = ( sign_index, sign_position, sign_rotation, sign_model, sign_part, ) sign_index += 1 elif section.type == 'SpawnPoint': (spawn_name, spawn_position, spawn_rotation, spawn_type) = _get_spawn_properties(section) if spawn_name is None: spawn_name = str('Sign_Locator_' + str(spawn_index)) else: spawn_name = _name_utils.get_unique(spawn_name, spawn_points_data.keys()) spawn_points_data[spawn_name] = ( spawn_index, spawn_position, spawn_rotation, spawn_type, ) spawn_index += 1 elif section.type == 'Semaphore': # former "TrafficLight" (tsem_name, tsem_position, tsem_rotation, tsem_type, tsem_id, tsem_intervals, tsem_cycle, tsem_profile) = _get_t_light_properties(section) if tsem_name is None: tsem_name = str('Semaphore_Locator_' + str(tsem_index)) else: tsem_name = _name_utils.get_unique(tsem_name, traffic_lights_data.keys()) if tsem_id is None: tsem_id = -1 traffic_lights_data[tsem_name] = ( tsem_position, tsem_rotation, tsem_type, tsem_id, tsem_intervals, tsem_cycle, tsem_profile, ) tsem_index += 1 elif section.type == 'Curve': (cur_name, cur_index, cur_flags, cur_leads_to_nodes, cur_traffic_rule, cur_sempahore_id, cur_next_curves, cur_prev_curves, cur_length, bezier_start_pos, bezier_start_dir, bezier_start_qua, bezier_end_pos, bezier_end_dir, bezier_end_qua) = _get_curve_properties(section) nav_curves_data[cur_index] = ( cur_name, cur_flags, cur_leads_to_nodes, # not used cur_traffic_rule, cur_sempahore_id, cur_next_curves, cur_prev_curves, cur_length, # not used bezier_start_pos, bezier_start_dir, bezier_start_qua, bezier_end_pos, bezier_end_dir, bezier_end_qua, ) elif section.type == 'MapPoint': (map_indexx, map_name, map_visual_flags, map_nav_flags, map_position, map_neighbours) = _get_map_point_properties(section) if map_indexx is None: map_indexx = map_index if map_name is None: map_name = str("Map_Point_Locator_" + str(map_indexx)) map_points_data.append(( map_name, map_indexx, map_visual_flags, map_nav_flags, map_position, map_neighbours, )) map_index += 1 elif section.type == 'TriggerPoint': (trp_indexx, trp_name, trp_trigger_id, trp_action, trp_range, trp_reset_delay, trp_flags, trp_position, trp_neighbours) = _get_trigger_point_properties(section) if trp_indexx is None: trp_indexx = trp_index if trp_name is None: trp_name = str('Trigger_Locator_' + str(trp_indexx)) trp_range = float(trp_range) trigger_points_data.append(( trp_name, trp_indexx, trp_action, trp_range, trp_reset_delay, trp_flags, trp_position, trp_neighbours, )) trp_index += 1 # print('') # CREATE NODES for name in nodes_data: loc = _create_node_locator( name, nodes_data[name][0], # node_index nodes_data[name][1], # node_position nodes_data[name][2], # node_direction ) tp_pos_l = nodes_data[name][5] tp_nor_l = nodes_data[name][6] tp_var_l = nodes_data[name][7] # save terrain points into transitional structure if len(tp_var_l) > 0: # save per variant block for var_i, var in enumerate(tp_var_l): for i in range(var[0], var[0] + var[1]): terrain_points_trans.add(var_i, nodes_data[name][0], tp_pos_l[i], tp_nor_l[i]) else: for i in range(len(tp_pos_l)): terrain_points_trans.add(-1, nodes_data[name][0], tp_pos_l[i], tp_nor_l[i]) if loc: _print_locator_result(loc, "Node", name) locators.append(loc) # CREATE SIGNS for name in signs_data: # print('signs_data[name]: %s' % str(signs_data[name])) loc = _create_sign_locator(name, signs_data[name][1], signs_data[name][2], signs_data[name][3], signs_data[name][4], scs_globals.scs_sign_model_inventory) if loc: _print_locator_result(loc, "Sign", name) locators.append(loc) # CREATE SPAWN POINTS for name in spawn_points_data: # print('spawn_points_data[name]: %s' % str(spawn_points_data[name])) loc = _create_spawn_locator( name, spawn_points_data[name][1], spawn_points_data[name][2], spawn_points_data[name][3], ) if loc: _print_locator_result(loc, "Spawn Point", name) locators.append(loc) # CREATE TRAFFIC LIGHTS for name in traffic_lights_data: # print('traffic_lights_data[name]: %s' % str(traffic_lights_data[name])) loc = _create_traffic_light_locator( name, traffic_lights_data[name][0], # tsem_position traffic_lights_data[name][1], # tsem_rotation traffic_lights_data[name][2], # tsem_type traffic_lights_data[name][3], # tsem_id traffic_lights_data[name][4], # tsem_intervals traffic_lights_data[name][5], # tsem_cycle traffic_lights_data[name][6], # tsem_profile scs_globals.scs_tsem_profile_inventory) if loc: _print_locator_result(loc, "Traffic Semaphore", name) locators.append(loc) # PREPROCESS CURVE DATA AND CREATE LOCATORS AND DICTIONARY OF CONNECTIONS conns_dict = {} nav_locs_count = 0 for index in nav_curves_data: # assemble variables # cur_name = nav_curves_data[index][0] cur_flags = nav_curves_data[index][1] cur_traffic_rule = nav_curves_data[index][3] cur_sempahore_id = nav_curves_data[index][4] cur_next_curves = nav_curves_data[index][5] cur_prev_curves = nav_curves_data[index][6] bezier_start_pos = nav_curves_data[index][8] bezier_start_dir = bezier_start_qua = bezier_end_dir = bezier_end_qua = None if nav_curves_data[index][9]: bezier_start_dir = _curve_utils.set_direction( nav_curves_data[index][9]) else: bezier_start_qua = nav_curves_data[index][10] bezier_end_pos = nav_curves_data[index][11] if nav_curves_data[index][12]: bezier_end_dir = _curve_utils.set_direction( nav_curves_data[index][12]) else: bezier_end_qua = nav_curves_data[index][13] # check if there is need to create new one or alter the existing curve_locators_to_create = {"start": True, "end": True} for prev_curve_ind in cur_prev_curves: if prev_curve_ind != -1 and prev_curve_ind in conns_dict: if "end" in conns_dict[prev_curve_ind]: curve_locators_to_create["start"] = False # properly create entry for current curve if index in conns_dict: conns_dict[index]["start"] = conns_dict[ prev_curve_ind]["end"] else: conns_dict[index] = { "start": conns_dict[prev_curve_ind]["end"] } break for next_curve_ind in cur_next_curves: if next_curve_ind != -1 and next_curve_ind in conns_dict: if "start" in conns_dict[next_curve_ind]: curve_locators_to_create["end"] = False if index in conns_dict: conns_dict[index]["end"] = conns_dict[next_curve_ind][ "start"] else: conns_dict[index] = { "end": conns_dict[next_curve_ind]["start"] } break # CREATE 2 LOCATORS FOR EACH CURVE IF NEEDED for loc_key in curve_locators_to_create: nav_locator_data = {} nav_locs_count += 1 if loc_key == "start": nav_locator_data['np_name'] = "Nav_Point_" + str( nav_locs_count) nav_locator_data['np_pos'] = bezier_start_pos nav_locator_data['np_dir'] = bezier_start_dir nav_locator_data['np_qua'] = bezier_start_qua elif loc_key == "end": nav_locator_data['np_name'] = "Nav_Point_" + str( nav_locs_count) nav_locator_data['np_pos'] = bezier_end_pos nav_locator_data['np_dir'] = bezier_end_dir nav_locator_data['np_qua'] = bezier_end_qua nav_locator_data['np_low_probab'] = ( cur_flags & _PL_consts.PNCF.LOW_PROBABILITY) != 0 nav_locator_data['np_add_priority'] = ( cur_flags & _PL_consts.PNCF.ADDITIVE_PRIORITY) != 0 nav_locator_data['np_limit_displace'] = ( cur_flags & _PL_consts.PNCF.LIMIT_DISPLACEMENT) != 0 nav_locator_data[ 'np_allowed_veh'] = cur_flags & _PL_consts.PNCF.ALLOWED_VEHICLES_MASK if cur_flags & _PL_consts.PNCF.LEFT_BLINKER != 0: nav_locator_data['np_blinker'] = _PL_consts.PNCF.LEFT_BLINKER elif cur_flags & _PL_consts.PNCF.FORCE_NO_BLINKER != 0: nav_locator_data[ 'np_blinker'] = _PL_consts.PNCF.FORCE_NO_BLINKER elif cur_flags & _PL_consts.PNCF.RIGHT_BLINKER != 0: nav_locator_data['np_blinker'] = _PL_consts.PNCF.RIGHT_BLINKER else: nav_locator_data['np_blinker'] = 0 nav_locator_data['np_prior_modif'] = ( cur_flags & _PL_consts.PNCF.PRIORITY_MASK ) >> _PL_consts.PNCF.PRIORITY_SHIFT nav_locator_data['np_semaphore_id'] = cur_sempahore_id nav_locator_data['np_traffic_rule'] = cur_traffic_rule for node_data in nodes_data.values(): if loc_key == "start": for lane_index, curve_index in enumerate(node_data[3]): if curve_index == index: nav_locator_data['np_boundary'] = 1 + lane_index nav_locator_data['np_boundary_node'] = node_data[0] break else: for lane_index, curve_index in enumerate(node_data[4]): if curve_index == index: nav_locator_data[ 'np_boundary'] = 1 + lane_index + _PL_consts.PREFAB_LANE_COUNT_MAX nav_locator_data['np_boundary_node'] = node_data[0] break # locator already exists just set properties if curve_locators_to_create[loc_key] is False: loc_obj = conns_dict[index][loc_key] _set_nav_locator_props(loc_obj, nav_locator_data, loc_key == "start") continue loc = _create_nav_locator(nav_locator_data) _set_nav_locator_props(loc, nav_locator_data, loc_key == "start") locators.append(loc) if loc: # decide which side to update if loc_key == "start": related_curves = cur_prev_curves related_end = "end" else: related_curves = cur_next_curves related_end = "start" # create or update references for current connection if index not in conns_dict: conns_dict[index] = {loc_key: loc} else: conns_dict[index][loc_key] = loc # update references for prev or next connections for prev_curve_ind in related_curves: if prev_curve_ind != -1: if prev_curve_ind in conns_dict: if related_end not in conns_dict[prev_curve_ind]: conns_dict[prev_curve_ind][related_end] = loc else: conns_dict[prev_curve_ind] = {related_end: loc} # CREATE CONNECTIONS BETWEEN NAVIGATION POINTS for connection in conns_dict.values(): _group_connections_wrapper.create_connection(connection["start"], connection["end"]) # COLLECT MAP POINT CONNECTIONS connections = [] for map_point in map_points_data: # ignore auto generated map points if map_point[3] & _PL_consts.MPNF.NAV_BASE != 0: continue loc_index = map_point[1] for loc_neighbour_index in map_point[5]: if loc_neighbour_index == -1: continue if loc_index == loc_neighbour_index: continue for con in connections: if loc_neighbour_index == con[0] and loc_index == con[1]: continue if loc_neighbour_index == con[1] and loc_index == con[0]: continue connections.append((loc_index, loc_neighbour_index)) # CREATE MAP POINTS mp_locs = {} for map_point in map_points_data: name = map_point[0] # ignore auto generated map points if map_point[3] & _PL_consts.MPNF.NAV_BASE != 0: continue loc = _create_map_locator( map_point[0], map_point[2], map_point[3], map_point[4], ) _print_locator_result(loc, "Map Point", name) if loc: locators.append(loc) mp_locs[map_point[1]] = loc # APPLY MAP POINT CONNECTIONS for connection in connections: # safety check if connection indexes really exists if connection[0] in mp_locs and connection[1] in mp_locs: start_node = mp_locs[connection[0]] end_node = mp_locs[connection[1]] else: lprint('E Map connection out of range: %s', (str(connection), )) continue _group_connections_wrapper.create_connection(start_node, end_node) # COLLECT TRIGGER POINT CONNECTIONS connections = [] for tr_point in trigger_points_data: loc_index = tr_point[1] # print(' name: %s' % tr_point[0]) if len(tr_point[7]) != 2: lprint( 'W Unexpected number of connections (%i) for Trigger Point "%s"!', (len(tr_point[7]), tr_point[0])) for loc_neighbour_index in tr_point[7]: connections.append((loc_index, loc_neighbour_index)) # CREATE TRIGGER POINTS tp_locs = {} for tr_point in trigger_points_data: name = tr_point[0] # print('trigger_points_data[%r]: %s' % (name, str(tr_point))) loc = _create_trigger_locator( tr_point[0], tr_point[2], tr_point[3], tr_point[4], tr_point[5], tr_point[6], scs_globals.scs_trigger_actions_inventory) _print_locator_result(loc, "Trigger Point", name) if loc: locators.append(loc) tp_locs[tr_point[1]] = loc for connection in connections: # safety check if connection indexes really exists if connection[0] in tp_locs and connection[1] in tp_locs: start_node = tp_locs[connection[0]] end_node = tp_locs[connection[1]] else: print('E Trigger connection: %s', (str(connection), )) continue _group_connections_wrapper.create_connection(start_node, end_node) print("************************************") return {'FINISHED'}, locators
def load(filepath, terrain_points_trans): """Loads given PIP file. :param filepath: complete filepath to PIP file :type filepath: str :param terrain_points_trans: terrain points transitional structure where terrain points shall be saved :type terrain_points_trans: io_scs_tools.imp.transition_structs.terrain_points.TerrainPntsTrans :return: set of operator result and list of created locators :rtype: tuple[set, list[bpy.types.Objects]] """ scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIP Importer **") print("** (c)2014 SCS Software **") print("************************************\n") # from bpy_extras.image_utils import load_image # UNUSED # scene = context.scene ind = ' ' pip_container = _pix_container.get_data_from_file(filepath, ind) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed (format_version, source, f_type, f_name, source_filename, author) = _get_header(pip_container) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed (node_count, terrain_point_count, nav_curve_count, sign_count, spawn_point_count, traffic_light_count, map_point_count, trigger_point_count, intersection_count) = _get_global(pip_container) ''' # DATA BUILDING nodes_data = {} # terrain_points_data = {} signs_data = {} spawn_points_data = {} traffic_lights_data = {} nav_curves_data = {} map_points_data = [] trigger_points_data = [] locators = [] # node_index = 0 sign_index = 0 spawn_index = 0 tsem_index = 0 map_index = 0 trp_index = 0 for section in pip_container: if section.type == 'Node': (node_name, node_index, node_position, node_direction, node_input_lanes, node_output_lanes, tp_positions, tp_normals, tp_variants,) = _get_node_properties(section) if node_name is None: node_name = str('Node_Locator_' + str(node_index)) node_direction = _curve_utils.set_direction(node_direction) nodes_data[node_name] = ( node_index, node_position, node_direction, node_input_lanes, node_output_lanes, tp_positions, tp_normals, tp_variants ) elif section.type == 'Sign': (sign_name, sign_position, sign_rotation, sign_model, sign_part) = _get_sign_properties(section) if sign_name is None: sign_name = str('Sign_Locator_' + str(sign_index)) signs_data[sign_name] = ( sign_index, sign_position, sign_rotation, sign_model, sign_part, ) sign_index += 1 elif section.type == 'SpawnPoint': (spawn_name, spawn_position, spawn_rotation, spawn_type) = _get_spawn_properties(section) if spawn_name is None: spawn_name = str('Sign_Locator_' + str(spawn_index)) spawn_points_data[spawn_name] = ( spawn_index, spawn_position, spawn_rotation, spawn_type, ) spawn_index += 1 elif section.type == 'Semaphore': # former "TrafficLight" (tsem_name, tsem_position, tsem_rotation, tsem_type, tsem_id, tsem_intervals, tsem_cycle, tsem_profile) = _get_t_light_properties(section) if tsem_name is None: tsem_name = str('Semaphore_Locator_' + str(tsem_index)) if tsem_id is None: tsem_id = -1 traffic_lights_data[tsem_name] = ( tsem_position, tsem_rotation, tsem_type, tsem_id, tsem_intervals, tsem_cycle, tsem_profile, ) tsem_index += 1 elif section.type == 'Curve': (cur_name, cur_index, cur_flags, cur_leads_to_nodes, cur_traffic_rule, cur_sempahore_id, cur_next_curves, cur_prev_curves, cur_length, bezier_start_pos, bezier_start_dir, bezier_start_qua, bezier_end_pos, bezier_end_dir, bezier_end_qua) = _get_curve_properties(section) nav_curves_data[cur_index] = ( cur_name, cur_flags, cur_leads_to_nodes, # not used cur_traffic_rule, cur_sempahore_id, cur_next_curves, cur_prev_curves, cur_length, # not used bezier_start_pos, bezier_start_dir, bezier_start_qua, bezier_end_pos, bezier_end_dir, bezier_end_qua, ) elif section.type == 'MapPoint': (map_indexx, map_name, map_visual_flags, map_nav_flags, map_position, map_neighbours) = _get_map_point_properties(section) if map_indexx is None: map_indexx = map_index if map_name is None: map_name = str("Map_Point_Locator_" + str(map_indexx)) map_points_data.append(( map_name, map_indexx, map_visual_flags, map_nav_flags, map_position, map_neighbours, )) map_index += 1 elif section.type == 'TriggerPoint': (trp_indexx, trp_name, trp_trigger_id, trp_action, trp_range, trp_reset_delay, trp_flags, trp_position, trp_neighbours) = _get_trigger_point_properties(section) if trp_indexx is None: trp_indexx = trp_index if trp_name is None: trp_name = str('Trigger_Locator_' + str(trp_indexx)) trp_range = float(trp_range) trigger_points_data.append(( trp_name, trp_indexx, trp_action, trp_range, trp_reset_delay, trp_flags, trp_position, trp_neighbours, )) trp_index += 1 # print('') # CREATE NODES for name in nodes_data: loc = _create_node_locator( name, nodes_data[name][0], # node_index nodes_data[name][1], # node_position nodes_data[name][2], # node_direction ) tp_pos_l = nodes_data[name][5] tp_nor_l = nodes_data[name][6] tp_var_l = nodes_data[name][7] # save terrain points into transitional structure if len(tp_var_l) > 0: # save per variant block for var_i, var in enumerate(tp_var_l): for i in range(var[0], var[0] + var[1]): terrain_points_trans.add(var_i, nodes_data[name][0], tp_pos_l[i], tp_nor_l[i]) else: for i in range(len(tp_pos_l)): terrain_points_trans.add(-1, nodes_data[name][0], tp_pos_l[i], tp_nor_l[i]) if loc: _print_locator_result(loc, "Node", name) locators.append(loc) # CREATE SIGNS for name in signs_data: # print('signs_data[name]: %s' % str(signs_data[name])) loc = _create_sign_locator( name, signs_data[name][1], signs_data[name][2], signs_data[name][3], signs_data[name][4], scs_globals.scs_sign_model_inventory ) if loc: _print_locator_result(loc, "Sign", name) locators.append(loc) # CREATE SPAWN POINTS for name in spawn_points_data: # print('spawn_points_data[name]: %s' % str(spawn_points_data[name])) loc = _create_spawn_locator( name, spawn_points_data[name][1], spawn_points_data[name][2], spawn_points_data[name][3], ) if loc: _print_locator_result(loc, "Spawn Point", name) locators.append(loc) # CREATE TRAFFIC LIGHTS for name in traffic_lights_data: # print('traffic_lights_data[name]: %s' % str(traffic_lights_data[name])) loc = _create_traffic_light_locator( name, traffic_lights_data[name][0], # tsem_position traffic_lights_data[name][1], # tsem_rotation traffic_lights_data[name][2], # tsem_type traffic_lights_data[name][3], # tsem_id traffic_lights_data[name][4], # tsem_intervals traffic_lights_data[name][5], # tsem_cycle traffic_lights_data[name][6], # tsem_profile scs_globals.scs_tsem_profile_inventory ) if loc: _print_locator_result(loc, "Traffic Semaphore", name) locators.append(loc) # PREPROCESS CURVE DATA AND CREATE LOCATORS AND DICTIONARY OF CONNECTIONS conns_dict = {} nav_locs_count = 0 for index in nav_curves_data: # assemble variables # cur_name = nav_curves_data[index][0] cur_flags = nav_curves_data[index][1] cur_traffic_rule = nav_curves_data[index][3] cur_sempahore_id = nav_curves_data[index][4] cur_next_curves = nav_curves_data[index][5] cur_prev_curves = nav_curves_data[index][6] bezier_start_pos = nav_curves_data[index][8] bezier_start_dir = bezier_start_qua = bezier_end_dir = bezier_end_qua = None if nav_curves_data[index][9]: bezier_start_dir = _curve_utils.set_direction(nav_curves_data[index][9]) else: bezier_start_qua = nav_curves_data[index][10] bezier_end_pos = nav_curves_data[index][11] if nav_curves_data[index][12]: bezier_end_dir = _curve_utils.set_direction(nav_curves_data[index][12]) else: bezier_end_qua = nav_curves_data[index][13] # check if there is need to create new one or alter the existing curve_locators_to_create = {"start": True, "end": True} for prev_curve_ind in cur_prev_curves: if prev_curve_ind != -1 and prev_curve_ind in conns_dict: if "end" in conns_dict[prev_curve_ind]: curve_locators_to_create["start"] = False # properly create entry for current curve if index in conns_dict: conns_dict[index]["start"] = conns_dict[prev_curve_ind]["end"] else: conns_dict[index] = { "start": conns_dict[prev_curve_ind]["end"] } break for next_curve_ind in cur_next_curves: if next_curve_ind != -1 and next_curve_ind in conns_dict: if "start" in conns_dict[next_curve_ind]: curve_locators_to_create["end"] = False if index in conns_dict: conns_dict[index]["end"] = conns_dict[next_curve_ind]["start"] else: conns_dict[index] = { "end": conns_dict[next_curve_ind]["start"] } break # CREATE 2 LOCATORS FOR EACH CURVE IF NEEDED for loc_key in curve_locators_to_create: nav_locator_data = {} nav_locs_count += 1 if loc_key == "start": nav_locator_data['np_name'] = "Nav_Point_" + str(nav_locs_count) nav_locator_data['np_pos'] = bezier_start_pos nav_locator_data['np_dir'] = bezier_start_dir nav_locator_data['np_qua'] = bezier_start_qua elif loc_key == "end": nav_locator_data['np_name'] = "Nav_Point_" + str(nav_locs_count) nav_locator_data['np_pos'] = bezier_end_pos nav_locator_data['np_dir'] = bezier_end_dir nav_locator_data['np_qua'] = bezier_end_qua nav_locator_data['np_low_probab'] = (cur_flags & _PL_consts.PNCF.LOW_PROBABILITY) != 0 nav_locator_data['np_add_priority'] = (cur_flags & _PL_consts.PNCF.ADDITIVE_PRIORITY) != 0 nav_locator_data['np_limit_displace'] = (cur_flags & _PL_consts.PNCF.LIMIT_DISPLACEMENT) != 0 nav_locator_data['np_allowed_veh'] = cur_flags & _PL_consts.PNCF.ALLOWED_VEHICLES_MASK if cur_flags & _PL_consts.PNCF.LEFT_BLINKER != 0: nav_locator_data['np_blinker'] = _PL_consts.PNCF.LEFT_BLINKER elif cur_flags & _PL_consts.PNCF.FORCE_NO_BLINKER != 0: nav_locator_data['np_blinker'] = _PL_consts.PNCF.FORCE_NO_BLINKER elif cur_flags & _PL_consts.PNCF.RIGHT_BLINKER != 0: nav_locator_data['np_blinker'] = _PL_consts.PNCF.RIGHT_BLINKER else: nav_locator_data['np_blinker'] = 0 nav_locator_data['np_prior_modif'] = (cur_flags & _PL_consts.PNCF.PRIORITY_MASK) >> _PL_consts.PNCF.PRIORITY_SHIFT nav_locator_data['np_semaphore_id'] = cur_sempahore_id nav_locator_data['np_traffic_rule'] = cur_traffic_rule for node_data in nodes_data.values(): if loc_key == "start": for lane_index, curve_index in enumerate(node_data[3]): if curve_index == index: nav_locator_data['np_boundary'] = 1 + lane_index nav_locator_data['np_boundary_node'] = node_data[0] break else: for lane_index, curve_index in enumerate(node_data[4]): if curve_index == index: nav_locator_data['np_boundary'] = 1 + lane_index + _PL_consts.PREFAB_LANE_COUNT_MAX nav_locator_data['np_boundary_node'] = node_data[0] break # locator already exists just set properties if curve_locators_to_create[loc_key] is False: loc_obj = conns_dict[index][loc_key] _set_nav_locator_props(loc_obj, nav_locator_data, loc_key == "start") continue loc = _create_nav_locator(nav_locator_data) _set_nav_locator_props(loc, nav_locator_data, loc_key == "start") locators.append(loc) if loc: # decide which side to update if loc_key == "start": related_curves = cur_prev_curves related_end = "end" else: related_curves = cur_next_curves related_end = "start" # create or update references for current connection if index not in conns_dict: conns_dict[index] = { loc_key: loc } else: conns_dict[index][loc_key] = loc # update references for prev or next connections for prev_curve_ind in related_curves: if prev_curve_ind != -1: if prev_curve_ind in conns_dict: if related_end not in conns_dict[prev_curve_ind]: conns_dict[prev_curve_ind][related_end] = loc else: conns_dict[prev_curve_ind] = { related_end: loc } # CREATE CONNECTIONS BETWEEN NAVIGATION POINTS for connection in conns_dict.values(): _group_connections_wrapper.create_connection(connection["start"], connection["end"]) # COLLECT MAP POINT CONNECTIONS connections = [] for map_point in map_points_data: # ignore auto generated map points if map_point[3] & _PL_consts.MPNF.NAV_BASE != 0: continue loc_index = map_point[1] for loc_neighbour_index in map_point[5]: if loc_neighbour_index == -1: continue if loc_index == loc_neighbour_index: continue for con in connections: if loc_neighbour_index == con[0] and loc_index == con[1]: continue if loc_neighbour_index == con[1] and loc_index == con[0]: continue connections.append((loc_index, loc_neighbour_index)) # CREATE MAP POINTS mp_locs = {} for map_point in map_points_data: name = map_point[0] # ignore auto generated map points if map_point[3] & _PL_consts.MPNF.NAV_BASE != 0: continue loc = _create_map_locator( map_point[0], map_point[2], map_point[3], map_point[4], ) _print_locator_result(loc, "Map Point", name) if loc: locators.append(loc) mp_locs[map_point[1]] = loc # APPLY MAP POINT CONNECTIONS for connection in connections: # safety check if connection indexes really exists if connection[0] in mp_locs and connection[1] in mp_locs: start_node = mp_locs[connection[0]] end_node = mp_locs[connection[1]] else: lprint('E Map connection out of range: %s', (str(connection),)) continue _group_connections_wrapper.create_connection(start_node, end_node) # COLLECT TRIGGER POINT CONNECTIONS connections = [] for tr_point in trigger_points_data: loc_index = tr_point[1] # print(' name: %s' % tr_point[0]) if len(tr_point[7]) != 2: lprint('W Unexpected number of connections (%i) for Trigger Point "%s"!', (len(tr_point[7]), tr_point[0])) for loc_neighbour_index in tr_point[7]: connections.append((loc_index, loc_neighbour_index)) # CREATE TRIGGER POINTS tp_locs = {} for tr_point in trigger_points_data: name = tr_point[0] # print('trigger_points_data[%r]: %s' % (name, str(tr_point))) loc = _create_trigger_locator( tr_point[0], tr_point[2], tr_point[3], tr_point[4], tr_point[5], tr_point[6], scs_globals.scs_trigger_actions_inventory ) _print_locator_result(loc, "Trigger Point", name) if loc: locators.append(loc) tp_locs[tr_point[1]] = loc for connection in connections: # safety check if connection indexes really exists if connection[0] in tp_locs and connection[1] in tp_locs: start_node = tp_locs[connection[0]] end_node = tp_locs[connection[1]] else: print('E Trigger connection: %s', (str(connection),)) continue _group_connections_wrapper.create_connection(start_node, end_node) print("************************************") return {'FINISHED'}, locators