# -*- coding:utf-8 -*- __author__ = "Heather" try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.cElementTree as ET if __name__ == '__main__': print("解析本地data_demo.xml文档") # 加载url tree = ET.parse("data_demo.xml") # 获取根节点,并打印节点文本:data root = tree.getroot() print(root.tag) # 遍历输出country及其name属性 for child in root: print(child.tag, "name:", child.attrib["name"]) # 遍历rank节点 # 我们借助iter迭代器来进行全迭代查找感兴趣的节点 # 输出节点tag及其文本 print("使用iter迭代器查找目标节点") for rank in root.iter("rank"): print(rank.tag, "-", rank.text) # 换一种方式来遍历rank节点 # 我们借助findall和find方法来查找感兴趣的节点 # 输出节点tag及其文本 # 注意:findall只能查找从当前节点的子节点查找目标节点 print("使用findall查找目标节点") # 使用findall查找所有country节点,用于遍历
price_out = max(price, rprice) return str(price_out) # Save XML settings form = cgi.FieldStorage() erc.save_xml_set(form) erc.save() filename = 'erc.xml' download_patch = '/var/www/parse_erc/download_file/' fileXML = download_patch + filename dt = time.strftime("%Y-%m-%d %H:%M", time.localtime()) elem = ET.parse(fileXML) root = elem.getroot() doc = minidom.Document() price = doc.createElement('price') doc.appendChild(price) price.setAttribute('date', time.strftime("%Y-%m-%d %H:%M", time.localtime())) name = doc.createElement('name') price.appendChild(name) text = doc.createTextNode('Интернет-магазин') name.appendChild(text) currencies = doc.createElement('currencies') price.appendChild(currencies)
def spliting(name_list, addr): Modality = ('flair', 't1', 't1ce', 't2') for i_mol in Modality: # NumofSubject = 49 split_addr = addr + i_mol + '/ImageSets/Main/' annotation_addr = addr + i_mol + '/Annotations/' # We ensure i_mol folder exists if not os.path.exists(split_addr): os.mkdir(addr + i_mol + '/ImageSets/') os.mkdir(split_addr) cv = KFold(10) # filename tumour fn_tumour_train = [] fn_tumour_trainval = [] fn_tumour_val = [] fn_tumour_test = [] # filename core fn_core_train = [] fn_core_trainval = [] fn_core_val = [] fn_core_test = [] # filename necrosis fn_necrosis_train = [] fn_necrosis_trainval = [] fn_necrosis_val = [] fn_necrosis_test = [] # filename whole0 fn_whole0_train = [] fn_whole0_trainval = [] fn_whole0_val = [] fn_whole0_test = [] # filename whole1 fn_whole1_train = [] fn_whole1_trainval = [] fn_whole1_val = [] fn_whole1_test = [] # filename whole2 fn_whole2_train = [] fn_whole2_trainval = [] fn_whole2_val = [] fn_whole2_test = [] # fn_train = [] fn_test = [] fn_trainval = [] fn_val = [] FD = 0 in_FD = 0 for trainval, test in cv.split(name_list): # filename tumour fn_tumour_trainval.append(split_addr + 'cv' + str(FD) + '_tumour_trainval.txt') fn_tumour_test.append(split_addr + 'cv' + str(FD) + '_tumour_test.txt') fn_tumour_train.append(split_addr + 'cv' + str(FD) + '_tumour_train.txt') fn_tumour_val.append(split_addr + 'cv' + str(FD) + '_tumour_val.txt') # filename core fn_core_trainval.append(split_addr + 'cv' + str(FD) + '_core_trainval.txt') fn_core_test.append(split_addr + 'cv' + str(FD) + '_core_test.txt') fn_core_train.append(split_addr + 'cv' + str(FD) + '_core_train.txt') fn_core_val.append(split_addr + 'cv' + str(FD) + '_core_val.txt') # filename necrosis fn_necrosis_trainval.append(split_addr + 'cv' + str(FD) + '_necrosis_trainval.txt') fn_necrosis_test.append(split_addr + 'cv' + str(FD) + '_necrosis_test.txt') fn_necrosis_train.append(split_addr + 'cv' + str(FD) + '_necrosis_train.txt') fn_necrosis_val.append(split_addr + 'cv' + str(FD) + '_necrosis_val.txt') # filename whole0 fn_whole0_trainval.append(split_addr + 'cv' + str(FD) + '_whole0_trainval.txt') fn_whole0_test.append(split_addr + 'cv' + str(FD) + '_whole0_test.txt') fn_whole0_train.append(split_addr + 'cv' + str(FD) + '_whole0_train.txt') fn_whole0_val.append(split_addr + 'cv' + str(FD) + '_whole0_val.txt') # filename whole1 fn_whole1_trainval.append(split_addr + 'cv' + str(FD) + '_whole1_trainval.txt') fn_whole1_test.append(split_addr + 'cv' + str(FD) + '_whole1_test.txt') fn_whole1_train.append(split_addr + 'cv' + str(FD) + '_whole1_train.txt') fn_whole1_val.append(split_addr + 'cv' + str(FD) + '_whole1_val.txt') # filename whole2 fn_whole2_trainval.append(split_addr + 'cv' + str(FD) + '_whole2_trainval.txt') fn_whole2_test.append(split_addr + 'cv' + str(FD) + '_whole2_test.txt') fn_whole2_train.append(split_addr + 'cv' + str(FD) + '_whole2_train.txt') fn_whole2_val.append(split_addr + 'cv' + str(FD) + '_whole2_val.txt') # fn_trainval.append(split_addr + 'cv' + str(FD) + '_trainval.txt') fn_test.append(split_addr + 'cv' + str(FD) + '_test.txt') fn_train.append(split_addr + 'cv' + str(FD) + '_train.txt') fn_val.append(split_addr + 'cv' + str(FD) + '_val.txt') # writing files trainval_list = np.array(name_list)[trainval] test_list = np.array(name_list)[test] # writing files for trainval set for i_train_val_sub in trainval_list: fn_trval_sub, slt_trval_sub = select_from_start( annotation_addr, i_train_val_sub) #print(i_train_val_sub) for i_slice_fn in fn_trval_sub: i_slice_addr_fn = annotation_addr + i_slice_fn #print(i_slice_addr_fn) tree = et.parse(i_slice_addr_fn) root = tree.getroot() filename = root.find('filename').text #print(filename) have_tumour = -1 have_core = -1 have_necrosis = -1 have_whole0 = -1 have_whole1 = -1 have_whole2 = -1 for obj in root.findall('object'): name = obj.find('name').text if name == 'tumour': have_tumour = 1 if name == 'core': have_core = 1 if name == 'necrosis': have_necrosis = 1 if name == 'whole0': have_whole0 = 1 if name == 'whole1': have_whole1 = 1 if name == 'whole2': have_whole2 = 1 with open(fn_trainval[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + '\n') with open(fn_tumour_trainval[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_tumour) + '\n') with open(fn_core_trainval[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_core) + '\n') with open(fn_necrosis_trainval[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_necrosis) + '\n') with open(fn_whole0_trainval[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole0) + '\n') with open(fn_whole1_trainval[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole1) + '\n') with open(fn_whole2_trainval[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole2) + '\n') # writing files for testing set for i_test_sub in test_list: fn_test_sub, slt_test_sub = select_from_start( annotation_addr, i_test_sub) for i_slice_fn in fn_test_sub: i_slice_addr_fn = annotation_addr + i_slice_fn tree = et.parse(i_slice_addr_fn) root = tree.getroot() filename = root.find('filename').text #print(filename) have_tumour = -1 have_core = -1 have_necrosis = -1 have_whole0 = -1 have_whole1 = -1 have_whole2 = -1 for obj in root.findall('object'): name = obj.find('name').text if name == 'tumour': have_tumour = 1 if name == 'core': have_core = 1 if name == 'necrosis': have_necrosis = 1 if name == 'whole0': have_whole0 = 1 if name == 'whole1': have_whole1 = 1 if name == 'whole2': have_whole2 = 1 with open(fn_test[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + '\n') with open(fn_tumour_test[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_tumour) + '\n') with open(fn_core_test[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_core) + '\n') with open(fn_necrosis_test[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_necrosis) + '\n') with open(fn_whole0_test[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole0) + '\n') with open(fn_whole1_test[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole1) + '\n') with open(fn_whole2_test[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole2) + '\n') for A in range(1): train_and_val = train_test_split(range(len(trainval)), test_size=0.1) train_list = np.array(trainval_list)[train_and_val[0]] val_list = np.array(trainval_list)[train_and_val[1]] # writing files for training set for i_train_sub in train_list: fn_tr_sub, slt_tr_sub = select_from_start( annotation_addr, i_train_sub) #print(fn_tr_sub) for i_slice_fn in fn_tr_sub: i_slice_addr_fn = annotation_addr + i_slice_fn tree = et.parse(i_slice_addr_fn) root = tree.getroot() filename = root.find('filename').text #print(filename) have_tumour = -1 have_core = -1 have_necrosis = -1 have_whole0 = -1 have_whole1 = -1 have_whole2 = -1 for obj in root.findall('object'): name = obj.find('name').text if name == 'tumour': have_tumour = 1 if name == 'core': have_core = 1 if name == 'necrosis': have_necrosis = 1 if name == 'whole0': have_whole0 = 1 if name == 'whole1': have_whole1 = 1 if name == 'whole2': have_whole2 = 1 with open(fn_train[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + '\n') with open(fn_tumour_train[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_tumour) + '\n') with open(fn_core_train[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_core) + '\n') with open(fn_necrosis_train[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_necrosis) + '\n') with open(fn_whole0_train[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole0) + '\n') with open(fn_whole1_train[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole1) + '\n') with open(fn_whole2_train[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole2) + '\n') # writing files for val set for i_val_sub in val_list: fn_val_sub, slt_val_sub = select_from_start( annotation_addr, i_val_sub) for i_slice_fn in fn_val_sub: i_slice_addr_fn = annotation_addr + i_slice_fn tree = et.parse(i_slice_addr_fn) root = tree.getroot() filename = root.find('filename').text #print(filename) have_tumour = -1 have_core = -1 have_necrosis = -1 have_whole0 = -1 have_whole1 = -1 have_whole2 = -1 for obj in root.findall('object'): name = obj.find('name').text if name == 'tumour': have_tumour = 1 if name == 'core': have_core = 1 if name == 'necrosis': have_necrosis = 1 if name == 'whole0': have_whole0 = 1 if name == 'whole1': have_whole1 = 1 if name == 'whole2': have_whole2 = 1 with open(fn_val[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + '\n') with open(fn_tumour_val[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_tumour) + '\n') with open(fn_core_val[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_core) + '\n') with open(fn_necrosis_val[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_necrosis) + '\n') with open(fn_whole0_val[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole0) + '\n') with open(fn_whole1_val[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole1) + '\n') with open(fn_whole2_val[FD], 'a') as f: f.write(i_slice_fn[:-4] + ' ' + str(have_whole2) + '\n') FD += 1 return
dir_dic = {} #make the corresponding to training and xml directory for i in os.listdir(imgpath): dir_dic[i.split('_')[-1]] = i if __name__ == '__main__': xmlpath = os.path.join(root_dir, 'DETRAC-Train-Annotations-XML') annopath = os.path.join(root_dir, 'anno') for i in os.listdir(xmlpath): annofile = os.path.join(annopath, dir_dic[i.split('.')[0].split('_')[-1]]) xmlfile = os.path.join(xmlpath, i) os.mkdir(annofile) tree = ET.parse(xmlfile) root = tree.getroot() #sequence #ignore region for ignore in root.findall('ignored_region'): ignorefile = os.path.join(annofile, 'ignored_region') fi = open(ignorefile, 'w') fi.write('ignored_region\n') fi.write(str(len(ignore.findall('box'))) + '\n') for box in ignore.findall('box'): str1 = box.get('left') + ' ' + box.get('top') + ' ' + box.get( 'width') + ' ' + box.get('height') + '\n' fi.write(str1) fi.close() #each frame generate an annotation file for frame in root.findall('frame'): framefile = os.path.join(annofile,
def obfuscate(self, obfuscation_info: Obfuscation): self.logger.info('Running "{0}" obfuscator'.format(self.__class__.__name__)) try: Xml.register_namespace( "android", "http://schemas.android.com/apk/res/android" ) xml_parser = Xml.XMLParser(encoding="utf-8") manifest_tree = Xml.parse( obfuscation_info.get_manifest_file(), parser=xml_parser ) manifest_root = manifest_tree.getroot() self.package_name = manifest_root.get("package") if not self.package_name: raise Exception( "Unable to extract package name from application manifest" ) # Get a mapping between class name and smali file path. for smali_file in util.show_list_progress( obfuscation_info.get_smali_files(), interactive=obfuscation_info.interactive, description="Class name to smali file mapping", ): with open(smali_file, "r", encoding="utf-8") as current_file: class_name = None for line in current_file: if not class_name: # Every smali file contains a class. class_match = util.class_pattern.match(line) if class_match: self.class_name_to_smali_file[ class_match.group("class_name") ] = smali_file break self.transform_package_name(manifest_root) # Write the changes into the manifest file. manifest_tree.write(obfuscation_info.get_manifest_file(), encoding="utf-8") xml_files: Set[str] = set( os.path.join(root, file_name) for root, dir_names, file_names in os.walk( obfuscation_info.get_resource_directory() ) for file_name in file_names if file_name.endswith(".xml") and ( "layout" in root or "xml" in root ) # Only res/layout-*/ and res/xml-*/ folders. ) xml_files.add(obfuscation_info.get_manifest_file()) # TODO: use the following code to rename only the classes declared in # application's package. # package_smali_files: Set[str] = set( # smali_file # for class_name, smali_file in self.class_name_to_smali_file.items() # if class_name[1:].startswith(self.package_name.replace(".", "/")) # ) # # # Rename the classes declared in the application's package. # class_rename_transformations = self.rename_class_declarations( # list(package_smali_files), obfuscation_info.interactive # ) # Get user defined ignore package list. self.ignore_package_names = obfuscation_info.get_ignore_package_names() # Rename all classes declared in smali files. class_rename_transformations = self.rename_class_declarations( obfuscation_info.get_smali_files(), obfuscation_info.interactive ) # Update renamed classes through all the smali files. self.rename_class_usages_in_smali( obfuscation_info.get_smali_files(), class_rename_transformations, obfuscation_info.interactive, ) # Update renamed classes through all the xml files. self.rename_class_usages_in_xml( list(xml_files), class_rename_transformations, obfuscation_info.interactive, ) except Exception as e: self.logger.error( 'Error during execution of "{0}" obfuscator: {1}'.format( self.__class__.__name__, e ) ) raise finally: obfuscation_info.used_obfuscators.append(self.__class__.__name__)
def load_fig_option(path_prj, name_prj): """ This function loads the figure option saved in the xml file and create a dictionnary will be given to the functions which create the figures to know the different options chosen by the user. If the options are not written, this function uses data by default which are in the fonction create_default_fig_options(). :param path_prj: the path to the xml project file :param name_prj: the name to this file :return: the dictionary containing the figure options """ fig_dict = create_default_figoption() fname = os.path.join(path_prj, name_prj + '.xml') if not os.path.isfile(fname) and name_prj != '': # no project exists pass elif name_prj == '': pass elif not os.path.isfile(fname): # the project is not found print('Warning: No project file (.xml) found.\n') else: doc = ET.parse(fname) root = doc.getroot() child1 = root.find(".//Figure_Option") if child1 is not None: # modify existing option width1 = root.find(".//Width") height1 = root.find(".//Height") colormap1 = root.find(".//ColorMap1") colormap2 = root.find(".//ColorMap2") fontsize1 = root.find(".//FontSize") linewidth1 = root.find(".//LineWidth") grid1 = root.find(".//Grid") time1 = root.find(".//TimeStep") raw1 = root.find(".//PlotRawData") format1 = root.find(".//Format") marker1 = root.find(".//Marker") reso1 = root.find(".//Resolution") fish1 = root.find(".//FishNameType") text1 = root.find(".//TextOutput") shape1 = root.find(".//ShapeOutput") para1 = root.find(".//ParaviewOutput") langfig1 = root.find(".//LangFig") hopt1 = root.find(".//MinHeight") fishinfo1 = root.find(".//FishInfo") erase1 = root.find(".//EraseId") try: if width1 is not None: fig_dict['width'] = float(width1.text) if height1 is not None: fig_dict['height'] = float(height1.text) if colormap1 is not None: fig_dict['color_map1'] = colormap1.text if colormap2 is not None: fig_dict['color_map2'] = colormap2.text if fontsize1 is not None: fig_dict['font_size'] = int(fontsize1.text) if linewidth1 is not None: fig_dict['line_width'] = int(linewidth1.text) if grid1 is not None: fig_dict['grid'] = grid1.text if time1 is not None: fig_dict['time_step'] = time1.text # -99 is all if raw1 is not None: fig_dict['raw_data'] = raw1.text if format1 is not None: fig_dict['format'] = format1.text if marker1 is not None: fig_dict['marker'] = marker1.text if reso1 is not None: fig_dict['resolution'] = int(reso1.text) if fish1 is not None: fig_dict['fish_name_type'] = fish1.text if text1 is not None: fig_dict['text_output'] = text1.text if shape1 is not None: fig_dict['shape_output'] = shape1.text if para1 is not None: fig_dict['paraview'] = para1.text if langfig1 is not None: fig_dict['language'] = int(langfig1.text) if hopt1 is not None: fig_dict['min_height_hyd'] = float(hopt1.text) if fish1 is not None: fig_dict['fish_info'] = fishinfo1.text if erase1 is not None: fig_dict['erase_id'] = erase1.text except ValueError: print('Error: Figure Options are not of the right type.\n') fig_dict['time_step'] = fig_dict['time_step'].split(',') try: fig_dict['time_step'] = list(map(int, fig_dict['time_step'])) except ValueError: print('Error: Time step could not be read in the options' ) # sendLog not read yet return fig_dict
def count_tags(filename): # YOUR CODE HERE tree = ET.parse(filename) root = tree.getroot() tags = childTags(root) return tags
node_count = 800 interval = 0.05 total_count = node_count * interval sumo_path = 'F:/sumo-win32-0.24.0/sumo-0.24.0/' os.popen(sumo_path + 'bin/netconvert --osm-files target.osm -o input_net.net.xml') print sumo_path + 'bin/netconvert --osm-files target.osm -o input_net.net.xml' node = dict() while len(node) < node_count : print(len(node)) os.popen('python ' + sumo_path + '/tools/randomTrips.py -n input_net.net.xml -e ' + str(total_count) + ' -p ' + str(interval)) os.popen(sumo_path + 'bin/duarouter.exe --trip-files=trips.trips.xml --net-file=input_net.net.xml --output-file=routes.rou.xml') tree = ET.parse('routes.rou.xml') # 载入数据 root = tree.getroot() # 获取根节点 for child in root: # print child.tag, child.attrib # vehicle {'depart': '0.00', 'id': '0'} idx = int(child.attrib['id']) if idx in node: continue node[idx] = dict() for subchild in child: # print subchild.tag, subchild.attrib # route {'edges': '29141670#0 node[idx]['route_str'] = subchild.attrib['edges'] node[idx]['route'] = subchild.attrib['edges'].split(' ') node[idx]['from'] = node[idx]['route'][0] node[idx]['to'] = node[idx]['route'][len(node[idx]['route']) - 1] print len(node)
def load_hoomdxml(filename, top=None): """Load a single conformation from an HOOMD-Blue XML file. For more information on this file format, see: http://codeblue.umich.edu/hoomd-blue/doc/page_xml_file_format.html Notably, all node names and attributes are in all lower case. HOOMD-Blue does not contain residue and chain information explicitly. For this reason, chains will be found by looping over all the bonds and finding what is bonded to what. Each chain consisists of exactly one residue. Parameters ---------- filename : string The path on disk to the XML file top : None This argumet is ignored Returns ------- trajectory : md.Trajectory The resulting trajectory, as an md.Trajectory object, with corresponding Topology. Notes ----- This function requires the NetworkX python package. """ from mdtraj.core.trajectory import Trajectory from mdtraj.core.topology import Topology topology = Topology() tree = cElementTree.parse(filename) config = tree.getroot().find('configuration') position = config.find('position') bond = config.find('bond') atom_type = config.find('type') # MDTraj calls this "name" box = config.find('box') box.attrib = dict((key.lower(), val) for key, val in box.attrib.items()) # be generous for case of box attributes lx = float(box.attrib['lx']) ly = float(box.attrib['ly']) lz = float(box.attrib['lz']) try: xy = float(box.attrib['xy']) xz = float(box.attrib['xz']) yz = float(box.attrib['yz']) except (ValueError, KeyError): xy = 0.0 xz = 0.0 yz = 0.0 unitcell_vectors = np.array([[[lx, xy*ly, xz*lz], [0.0, ly, yz*lz], [0.0, 0.0, lz ]]]) positions, types = [], {} for pos in position.text.splitlines()[1:]: positions.append((float(pos.split()[0]), float(pos.split()[1]), float(pos.split()[2]))) for idx, atom_name in enumerate(atom_type.text.splitlines()[1:]): types[idx] = str(atom_name.split()[0]) if len(types) != len(positions): raise ValueError('Different number of types and positions in xml file') # ignore the bond type bonds = [(int(b.split()[1]), int(b.split()[2])) for b in bond.text.splitlines()[1:]] chains = _find_chains(bonds) ions = [i for i in range(len(types)) if not _in_chain(chains, i)] # add chains, bonds and ions (each chain = 1 residue) for chain in chains: t_chain = topology.add_chain() t_residue = topology.add_residue('A', t_chain) for atom in chain: topology.add_atom(types[atom], 'U', t_residue) for ion in ions: t_chain = topology.add_chain() t_residue = topology.add_residue('A', t_chain) topology.add_atom(types[ion], 'U', t_residue) for bond in bonds: atom1, atom2 = bond[0], bond[1] topology.add_bond(topology.atom(atom1), topology.atom(atom2)) traj = Trajectory(xyz=np.array(positions), topology=topology) traj.unitcell_vectors = unitcell_vectors return traj
# 添加文件到目录路径 xmlrealpath = os.path.join(xmlsplit[0], "changzhainengli.xml") print(xmlrealpath) # 遍历xml def traverseXml(element): if (len(element)) > 0: for child in element: print("child tag", "------", child) traverseXml(child) if __name__ == '__main__': try: tree = ET.parse(xmlrealpath) print("tree type:", type(tree)) # 获取根节点 root = tree.getroot() # print("根节点:", root, root.attrib) # for child in root: # print("child tag", "------", type(child.attrib), child.attrib) # print(traverseXml(root)) for child in root: print(child) for children in child: print(children.tag, children.attrib) childlist = root[0].findall("personDebtPayingAbilityEvaluate") print(childlist)
#!/usr/bin/env python3 # by ysan import xml.etree.cElementTree as ET tree = ET.parse('xml_test.xml') root = tree.getroot() print(root, root.tag) # 遍历xml文档 for child in root: print(child.tag, child.attrib) for i in child: print(i.tag, i.text) # 只遍历year 节点 for node in root.iter('year'): print(node.tag, node.text) # 修改 for node in root.iter('year'): new_year = int(node.text) + 1 node.text = str(new_year) node.set('update', 'yes') tree.write('xml_test.xml') # 删除 for country in root.iter('country'): rank = int(country.find('rank').text) if rank > 40:
def XML_PMS2aTV(PMS_baseURL, path, options): # double check aTV UDID, redo from client IP if needed/possible if not 'PlexConnectUDID' in options: UDID = getATVFromIP(options['aTVAddress']) if UDID: options['PlexConnectUDID'] = UDID else: declareATV(options['PlexConnectUDID'], options['aTVAddress']) # update with latest info # check cmd to work on cmd = '' if 'PlexConnect' in options: cmd = options['PlexConnect'] dprint(__name__, 1, "PlexConnect Cmd: " + cmd) # check aTV language setting if not 'aTVLanguage' in options: dprint(__name__, 1, "no aTVLanguage - pick en") options['aTVLanguage'] = 'en' # XML Template selector # - PlexConnect command # - path # - PMS ViewGroup XMLtemplate = '' PMS = None PMSroot = None # XML direct request or # XMLtemplate defined by solely PlexConnect Cmd if path.endswith(".xml"): XMLtemplate = path.lstrip('/') path = '' # clear path - we don't need PMS-XML elif cmd == 'Play': XMLtemplate = 'PlayVideo.xml' elif cmd == 'PlayVideo_ChannelsV1': dprint(__name__, 1, "playing Channels XML Version 1: {0}".format(path)) UDID = options['PlexConnectUDID'] PMS_uuid = PlexAPI.getPMSFromAddress(UDID, PMS_baseURL) auth_token = PlexAPI.getPMSProperty(UDID, PMS_uuid, 'accesstoken') path = PlexAPI.getDirectVideoPath(path, auth_token) return XML_PlayVideo_ChannelsV1( PMS_baseURL, path) # direct link, no PMS XML available elif cmd == 'PlayTrailer': trailerID = options['PlexConnectTrailerID'] info = urllib2.urlopen("http://youtube.com/get_video_info?video_id=" + trailerID).read() parsed = urlparse.parse_qs(info) key = 'url_encoded_fmt_stream_map' if not key in parsed: return XML_Error('PlexConnect', 'Youtube: No Trailer Info available') streams = parsed[key][0].split(',') url = '' for i in range(len(streams)): stream = urlparse.parse_qs(streams[i]) if stream['itag'][0] == '18': url = stream['url'][0] + '&signature=' + stream['sig'][0] if url == '': return XML_Error('PlexConnect', 'Youtube: ATV compatible Trailer not available') return XML_PlayVideo_ChannelsV1('', url.replace('&', '&')) elif cmd == 'ScrobbleMenu': XMLtemplate = 'ScrobbleMenu.xml' elif cmd == 'ScrobbleMenuVideo': XMLtemplate = 'ScrobbleMenuVideo.xml' elif cmd == 'ScrobbleMenuTVOnDeck': XMLtemplate = 'ScrobbleMenuTVOnDeck.xml' elif cmd == 'ChangeShowArtwork': XMLtemplate = 'ChangeShowArtwork.xml' elif cmd == 'ChangeSingleArtwork': XMLtemplate = 'ChangeSingleArtwork.xml' elif cmd == 'ChangeSingleArtworkVideo': XMLtemplate = 'ChangeSingleArtworkVideo.xml' elif cmd == 'PhotoBrowser': XMLtemplate = 'Photo_Browser.xml' elif cmd == 'MoviePreview': XMLtemplate = 'MoviePreview.xml' elif cmd == 'HomeVideoPrePlay': XMLtemplate = 'HomeVideoPrePlay.xml' elif cmd == 'MoviePrePlay': XMLtemplate = 'MoviePrePlay.xml' elif cmd == 'EpisodePrePlay': XMLtemplate = 'EpisodePrePlay.xml' elif cmd == 'ChannelPrePlay': XMLtemplate = 'ChannelPrePlay.xml' elif cmd == 'ChannelsVideo': XMLtemplate = 'ChannelsVideo.xml' elif cmd == 'ByFolder': XMLtemplate = 'ByFolder.xml' elif cmd == 'HomeVideoByFolder': XMLtemplate = 'HomeVideoByFolder.xml' elif cmd == 'HomeVideoDirectory': XMLtemplate = 'HomeVideoDirectory.xml' elif cmd == 'MovieByFolder': XMLtemplate = 'MovieByFolder.xml' elif cmd == 'MovieDirectory': XMLtemplate = 'MovieDirectory.xml' elif cmd == 'MovieSection': XMLtemplate = 'MovieSection.xml' elif cmd == 'HomeVideoSection': XMLtemplate = 'HomeVideoSection.xml' elif cmd == 'TVSection': XMLtemplate = 'TVSection.xml' elif cmd.find('SectionPreview') != -1: XMLtemplate = cmd + '.xml' elif cmd == 'AllMovies': XMLtemplate = 'Movie_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'movieview').replace(' ', '') + '.xml' elif cmd == 'AllHomeVideos': XMLtemplate = 'HomeVideo_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'homevideoview').replace(' ', '') + '.xml' elif cmd == 'MovieSecondary': XMLtemplate = 'MovieSecondary.xml' elif cmd == 'AllShows': XMLtemplate = 'Show_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'showview') + '.xml' elif cmd == 'TVSecondary': XMLtemplate = 'TVSecondary.xml' elif cmd == 'PhotoSecondary': XMLtemplate = 'PhotoSecondary.xml' elif cmd == 'Directory': XMLtemplate = 'Directory.xml' elif cmd == 'DirectoryWithPreview': XMLtemplate = 'DirectoryWithPreview.xml' elif cmd == 'DirectoryWithPreviewActors': XMLtemplate = 'DirectoryWithPreviewActors.xml' elif cmd == 'Settings': XMLtemplate = 'Settings.xml' path = '' # clear path - we don't need PMS-XML elif cmd == 'SettingsVideoOSD': XMLtemplate = 'Settings_VideoOSD.xml' path = '' # clear path - we don't need PMS-XML elif cmd == 'SettingsMovies': XMLtemplate = 'Settings_Movies.xml' path = '' # clear path - we don't need PMS-XML elif cmd == 'SettingsTVShows': XMLtemplate = 'Settings_TVShows.xml' path = '' # clear path - we don't need PMS-XML elif cmd == 'SettingsHomeVideos': XMLtemplate = 'Settings_HomeVideos.xml' path = '' # clear path - we don't need PMS-XML elif cmd == 'SettingsChannels': XMLtemplate = 'Settings_Channels.xml' path = '' # clear path - we don't need PMS-XML elif cmd.startswith('SettingsToggle:'): opt = cmd[len('SettingsToggle:'):] # cut command: parts = opt.split('+') g_ATVSettings.toggleSetting(options['PlexConnectUDID'], parts[0].lower()) XMLtemplate = parts[1] + ".xml" dprint(__name__, 2, "ATVSettings->Toggle: {0} in template: {1}", parts[0], parts[1]) path = '' # clear path - we don't need PMS-XML elif cmd == ('MyPlexLogin'): dprint(__name__, 2, "MyPlex->Logging In...") if not 'PlexConnectCredentials' in options: return XML_Error('PlexConnect', 'MyPlex Sign In called without Credentials.') parts = options['PlexConnectCredentials'].split(':', 1) (username, auth_token) = PlexAPI.MyPlexSignIn(parts[0], parts[1], options) UDID = options['PlexConnectUDID'] g_ATVSettings.setSetting(UDID, 'myplex_user', username) g_ATVSettings.setSetting(UDID, 'myplex_auth', auth_token) XMLtemplate = 'Settings.xml' path = '' # clear path - we don't need PMS-XML elif cmd == 'MyPlexLogout': dprint(__name__, 2, "MyPlex->Logging Out...") UDID = options['PlexConnectUDID'] auth_token = g_ATVSettings.getSetting(UDID, 'myplex_auth') PlexAPI.MyPlexSignOut(auth_token) g_ATVSettings.setSetting(UDID, 'myplex_user', '') g_ATVSettings.setSetting(UDID, 'myplex_auth', '') XMLtemplate = 'Settings.xml' path = '' # clear path - we don't need PMS-XML elif cmd.startswith('Discover'): UDID = options['PlexConnectUDID'] auth_token = g_ATVSettings.getSetting(UDID, 'myplex_auth') PlexAPI.discoverPMS(UDID, g_param['CSettings'], auth_token) return XML_Error( 'PlexConnect', 'Discover!') # not an error - but aTV won't care anyways. elif path.startswith('/search?'): XMLtemplate = 'Search_Results.xml' elif path == '/library/sections': # from PlexConnect.xml -> for //local, //myplex XMLtemplate = 'Library.xml' elif path == '/channels/all': XMLtemplate = 'Channel_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'channelview') + '.xml' path = '' # request PMS XML if not path == '': if 'PlexConnectUDID' in options: UDID = options['PlexConnectUDID'] PMS_uuid = PlexAPI.getPMSFromAddress(UDID, PMS_baseURL) auth_token = PlexAPI.getPMSProperty(UDID, PMS_uuid, 'accesstoken') else: auth_token = '' if PMS_baseURL.startswith('//'): # //local, //myplex UDID = options['PlexConnectUDID'] type = PMS_baseURL[2:] PMS = PlexAPI.getXMLFromMultiplePMS(UDID, path, type, options) else: PMS = PlexAPI.getXMLFromPMS(PMS_baseURL, path, options, authtoken=auth_token) if PMS == False: return XML_Error('PlexConnect', 'No Response from Plex Media Server') PMSroot = PMS.getroot() dprint(__name__, 1, "viewGroup: " + PMSroot.get('viewGroup', 'None')) # XMLtemplate defined by PMS XML content if path == '': pass # nothing to load elif not XMLtemplate == '': pass # template already selected elif PMSroot.get('viewGroup', '') == "secondary" and ( PMSroot.get('art', '').find('video') != -1 or PMSroot.get('thumb', '').find('video') != -1): XMLtemplate = 'HomeVideoSectionTopLevel.xml' elif PMSroot.get('viewGroup', '') == "secondary" and ( PMSroot.get('art', '').find('movie') != -1 or PMSroot.get('thumb', '').find('movie') != -1): XMLtemplate = 'MovieSectionTopLevel.xml' elif PMSroot.get('viewGroup', '') == "secondary" and ( PMSroot.get('art', '').find('show') != -1 or PMSroot.get('thumb', '').find('show') != -1): XMLtemplate = 'TVSectionTopLevel.xml' elif PMSroot.get('viewGroup', '') == "secondary" and ( PMSroot.get('art', '').find('photo') != -1 or PMSroot.get('thumb', '').find('photo') != -1): XMLtemplate = 'PhotoSectionTopLevel.xml' elif PMSroot.get('viewGroup', '') == "secondary": XMLtemplate = 'Directory.xml' elif PMSroot.get('viewGroup', '') == 'show': if PMSroot.get('title2') == 'By Folder': # By Folder View XMLtemplate = 'ByFolder.xml' else: # TV Show grid view XMLtemplate = 'Show_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'showview') + '.xml' elif PMSroot.get('viewGroup', '') == 'season': # TV Season view XMLtemplate = 'Season_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'seasonview') + '.xml' elif PMSroot.get( 'viewGroup', '') == 'movie' and PMSroot.get('thumb', '').find('video') != -1: if PMSroot.get('title2') == 'By Folder': # By Folder View XMLtemplate = 'HomeVideoByFolder.xml' else: # Home Video listing XMLtemplate = 'HomeVideo_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'homevideoview').replace( ' ', '') + '.xml' elif PMSroot.get( 'viewGroup', '') == 'movie' and PMSroot.get('thumb', '').find('movie') != -1: if PMSroot.get('title2') == 'By Folder': # By Folder View XMLtemplate = 'MovieByFolder.xml' else: # Movie listing XMLtemplate = 'Movie_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'homevideoview').replace( ' ', '') + '.xml' elif PMSroot.get('viewGroup', '') == 'track': XMLtemplate = 'Music_Track.xml' elif PMSroot.get('viewGroup', '') == 'episode': if PMSroot.get('title2')=='On Deck' or \ PMSroot.get('title2')=='Recently Viewed Episodes' or \ PMSroot.get('title2')=='Recently Aired' or \ PMSroot.get('title2')=='Recently Added': # TV On Deck View XMLtemplate = 'TV_OnDeck.xml' else: # TV Episode view XMLtemplate = 'Episode.xml' elif PMSroot.get('viewGroup', '') == 'photo': # Photo listing XMLtemplate = 'Photo.xml' else: XMLtemplate = 'Directory.xml' dprint(__name__, 1, "XMLTemplate: " + XMLtemplate) # get XMLtemplate aTVTree = etree.parse(sys.path[0] + '/assets/templates/' + XMLtemplate) aTVroot = aTVTree.getroot() # convert PMS XML to aTV XML using provided XMLtemplate global g_CommandCollection g_CommandCollection = CCommandCollection(options, PMSroot, PMS_baseURL, path) XML_ExpandTree(aTVroot, PMSroot, 'main') XML_ExpandAllAttrib(aTVroot, PMSroot, 'main') del g_CommandCollection dprint(__name__, 1, "====== generated aTV-XML ======") dprint(__name__, 1, prettyXML(aTVTree)) dprint(__name__, 1, "====== aTV-XML finished ======") return etree.tostring(aTVroot)
def get_elements(filename): tree = ET.parse(filename) root = tree.getroot() elements = [c for c in root] return elements
def read(self): tree = ElementTree.parse(self.gccxml_file) saxifier = etree_saxifier_t(tree, self) saxifier.saxify()
def filter_meme_results(meme_dir: str, promoter_sets: List[Motif], anchor: str) -> List[Motif]: """Analyse and filter MEME results""" def fetch_node_text(root: ElementTree.Element, search_string: str) -> str: """ Finds the text of a node with the given search string label """ node = root.find(search_string) if node is None or not hasattr(node, "text") or node.text is None: raise ValueError("unknown MEME output format") return node.text for motif in promoter_sets: xml_file = os.path.join(meme_dir, motif.pairing_string, "meme.xml") root = ElementTree.parse(xml_file).getroot() reason = fetch_node_text(root, "model/reason_for_stopping") anchor_seq_id = "" # no motif found for given e-value cutoff :-( if "Stopped because motif E-value > " in reason: if cassis.VERBOSE_DEBUG: logging.debug("MEME: motif %s; e-value exceeds cutoff", motif.pairing_string) # motif(s) found :-) elif "Stopped because requested number of motifs (1) found" in reason: # find anchor genes' sequence_id training_set = root.findall( "training_set/sequence" ) # all promoter sequences passed to MEME for element in training_set: if "__ANCHOR" in element.attrib["name"]: anchor_seq_id = element.attrib["id"] # e.g. id=sequence_1 # only accept motifs which occur in the anchor genes promoter # sequences which contributed to the motif contributing_sites = root.findall( "motifs/motif/contributing_sites/contributing_site") if anchor_seq_id in map(lambda site: site.attrib["sequence_id"], contributing_sites): # save motif score node = root.find("motifs/motif") if not node: raise ValueError("unknown MEME output format") motif.score = float(node.attrib["e_value"] ) # one motif, didn't ask MEME for more # save sequence sites which represent the motif motif.seqs = [ "".join( map(lambda letter: letter.attrib["letter_id"], site.findall("site/letter_ref"))) for site in contributing_sites ] # write sites to fasta file with open( os.path.join(meme_dir, str(motif), "binding_sites.fasta"), "w") as handle: handle.write(">{}__{}\n".format(anchor, str(motif))) handle.write("\n".join(motif.seqs)) if cassis.VERBOSE_DEBUG: logging.debug("MEME: motif %s; e-value = %s", motif, motif.score) else: if cassis.VERBOSE_DEBUG: logging.debug( "MEME: motif %s; does not occur in anchor gene promoter", motif) # unexpected reason, don't know why MEME stopped :-$ else: logging.error("MEME stopped unexpectedly (reason: %s)", reason) return [motif for motif in promoter_sets if motif.score is not None]
#用来进行albumentations增强,主要解决了拷贝多个的问题
def save_option_fig(self): """ A function which save the options for the figures in the xlm project file. The options for the figures are contained in a dictionnary. The idea is to give this dictinnory in argument to all the fonction which create figures. In the xml project file, the options for the figures are saved under the attribute "Figure_Option". If you change things here, it is necessary to start a new project as the old projects will not be compatible. For the new version of HABBY, it will be necessary to insure compatibility by adding xml attribute. """ # get default option fig_dict = create_default_figoption() # get the data and check validity # fig_size fig_size = self.fig1.text() if fig_size: fig_size = fig_size.split(',') try: fig_dict['width'] = np.float(fig_size[0]) fig_dict['height'] = np.float(fig_size[1]) except IndexError: self.send_log.emit( 'Error: The size of the figure should be in the format: num1,num2.\n' ) except ValueError: self.send_log.emit( 'Error: The size of the figure should be in the format: num1,num2.\n' ) # color map c1 = str(self.fig2.currentText()) if c1: fig_dict['color_map1'] = c1 c2 = str(self.fig3.currentText()) if c2: fig_dict['color_map2'] = c2 # font size font_size = self.fig5.text() if font_size: try: fig_dict['font_size'] = int(font_size) except ValueError: self.send_log.emit('Error: Font size should be an integer. \n') # line width line_width = self.fig6.text() if line_width: try: fig_dict['line_width'] = int(line_width) except ValueError: self.send_log.emit( 'Error: Line width should be an integer. \n') # grid if self.fig7a.isChecked() and self.fig7b.isChecked(): self.send_log.emit( 'Error: Grid cannot be on and off at the same time. \n') if self.fig7a.isChecked(): fig_dict['grid'] = True elif self.fig7b.isChecked(): fig_dict['grid'] = False # time step fig_dict['time_step'] = str(self.fig8.text()) # raw data if self.fig9a.isChecked() and self.fig9b.isChecked(): self.send_log.emit( 'Error: The option to plot raw output cannot be on and off at the same time. \n' ) if self.fig9a.isChecked(): fig_dict['raw_data'] = True elif self.fig9b.isChecked(): fig_dict['raw_data'] = False # format fig_dict['format'] = str(self.fig10.currentIndex()) # resolution try: fig_dict['resolution'] = int(self.fig11.text()) except ValueError: self.send_log.emit( 'Error: the resolution should be an integer. \n') if fig_dict['resolution'] < 0: self.send_log.emit( 'Error: The resolution should be higher than zero \n') return if fig_dict['resolution'] > 2000: self.send_log.emit( 'Warning: The resolution is higher than 2000 dpi. Figures might be very large.\n' ) # fish name type fig_dict['fish_name_type'] = int(self.fig12.currentIndex()) # marker if self.out9a.isChecked(): fig_dict['marker'] = True elif self.out9b.isChecked(): fig_dict['marker'] = False # outputs if self.out1a.isChecked() and self.out1b.isChecked(): self.send_log.emit( 'Error: Text Output cannot be on and off at the same time. \n') if self.out1a.isChecked(): fig_dict['text_output'] = True elif self.out1b.isChecked(): fig_dict['text_output'] = False if self.out2a.isChecked() and self.out2b.isChecked(): self.send_log.emit( 'Error: Shapefile output cannot be on and off at the same time. \n' ) if self.out2a.isChecked(): fig_dict['shape_output'] = True elif self.out2b.isChecked(): fig_dict['shape_output'] = False if self.out3a.isChecked() and self.out3b.isChecked(): self.send_log.emit( 'Error: Paraview cannot be on and off at the same time. \n') if self.out3a.isChecked(): fig_dict['paraview'] = True elif self.out3b.isChecked(): fig_dict['paraview'] = False if self.out4a.isChecked(): fig_dict['fish_info'] = True elif self.out4b.isChecked(): fig_dict['fish_info'] = False # other option try: fig_dict['min_height_hyd'] = float(self.hopt.text()) except ValueError: self.send_log.emit('Error: Minimum Height should be a number') if self.out5a.isChecked(): fig_dict['erase_id'] = True elif self.out5b.isChecked(): fig_dict['erase_id'] = False # save the data in the xml file # open the xml project file fname = os.path.join(self.path_prj, self.name_prj + '.xml') # save the name and the path in the xml .prj file if not os.path.isfile(fname): self.msg2.setIcon(QMessageBox.Warning) self.msg2.setWindowTitle(self.tr("Image Options Not Saved")) self.msg2.setText( self. tr("The project is not saved. Save the project in the General tab before saving data." )) self.msg2.setStandardButtons(QMessageBox.Ok) self.msg2.show() else: doc = ET.parse(fname) root = doc.getroot() child1 = root.find(".//Figure_Option") if child1 is not None: # modify existing option width1 = root.find(".//Width") height1 = root.find(".//Height") colormap1 = root.find(".//ColorMap1") colormap2 = root.find(".//ColorMap2") fontsize1 = root.find(".//FontSize") linewidth1 = root.find(".//LineWidth") grid1 = root.find(".//Grid") time1 = root.find(".//TimeStep") raw1 = root.find(".//PlotRawData") format1 = root.find(".//Format") reso1 = root.find(".//Resolution") fish1 = root.find(".//FishNameType") marker1 = root.find(".//Marker") text1 = root.find(".//TextOutput") shape1 = root.find(".//ShapeOutput") para1 = root.find(".//ParaviewOutput") langfig1 = root.find(".//LangFig") hopt1 = root.find(".//MinHeight") fishinfo1 = root.find(".//FishInfo") erase1 = root.find(".//EraseId") else: # save in case no fig option exist child1 = ET.SubElement(root, 'Figure_Option') width1 = ET.SubElement(child1, 'Width') height1 = ET.SubElement(child1, 'Height') colormap1 = ET.SubElement(child1, 'ColorMap1') colormap2 = ET.SubElement(child1, 'ColorMap2') fontsize1 = ET.SubElement(child1, 'FontSize') linewidth1 = ET.SubElement(child1, 'LineWidth') grid1 = ET.SubElement(child1, 'Grid') time1 = ET.SubElement(child1, 'TimeStep') raw1 = ET.SubElement(child1, "PlotRawData") format1 = ET.SubElement(child1, "Format") reso1 = ET.SubElement(child1, "Resolution") fish1 = ET.SubElement(child1, "FishNameType") marker1 = ET.SubElement(child1, "Marker") text1 = ET.SubElement(child1, "TextOutput") shape1 = ET.SubElement(child1, "ShapeOutput") para1 = ET.SubElement(child1, "ParaviewOutput") langfig1 = ET.SubElement(child1, "LangFig") hopt1 = ET.SubElement(child1, "MinHeight") fishinfo1 = ET.SubElement(child1, "FishInfo") erase1 = ET.SubElement(child1, "EraseId") width1.text = str(fig_dict['width']) height1.text = str(fig_dict['height']) colormap1.text = fig_dict['color_map1'] colormap2.text = fig_dict['color_map2'] fontsize1.text = str(fig_dict['font_size']) linewidth1.text = str(fig_dict['line_width']) grid1.text = str(fig_dict['grid']) time1.text = str(fig_dict['time_step']) # -99 is all time steps raw1.text = str(fig_dict['raw_data']) format1.text = str(fig_dict['format']) reso1.text = str(fig_dict['resolution']) # usually not useful, but should be added to new options for comptability with older project if fish1 is None: fish1 = ET.SubElement(child1, "FishNameType") fish1.text = str(fig_dict['fish_name_type']) marker1.text = str(fig_dict['marker']) if langfig1 is None: langfig1 = ET.SubElement(child1, "LangFig") langfig1.text = str(fig_dict['language']) text1.text = str(fig_dict['text_output']) shape1.text = str(fig_dict['shape_output']) para1.text = str(fig_dict['paraview']) hopt1.text = str(fig_dict['min_height_hyd']) fishinfo1.text = str(fig_dict['fish_info']) erase1.text = str(fig_dict['erase_id']) doc.write(fname) self.send_log.emit('The new options for the figures are saved. \n') self.send_log.emit('# Modifications of figure options. \n')
# ################################################################################################# # Author: Talal Najam # Date : 21/12/2018 # Github: https://github.com/mistat44 # ################################################################################################# import os from xml.etree import cElementTree from nltk.tokenize import word_tokenize, sent_tokenize # from sentiment_shifter import should_invert file_name = 'en-sentiment.xml' full_file = os.path.abspath(os.path.join('data', file_name)) xml = cElementTree.parse(full_file) xml = xml.getroot() def sentiment_score(tweet): words = word_tokenize(tweet) print(words) acum_score = 0 for word in words: for w in xml.findall('word'): w, pos, p = (w.attrib.get("form"), w.attrib.get("pos"), w.attrib.get("polarity", 0.0)) if word == str(w): acum_score += float(p)
def read(filepath): """Read the MMM File from disc and sets keyframes.""" start = bpy.context.scene.frame_current fps = bpy.context.scene.render.fps scale_factor = 0.001 doc = etree.parse(filepath) root = doc.getroot() tolower(root) names = [ e.get("name").replace('_joint', '') for e in root.findall(".//jointorder/joint") ] missing = [] for i in names: if i not in bpy.context.object.data.bones.keys(): print("Could not find joint: {}".format(i)) names.remove(i) missing.append(i) print(missing) timestamps = [ float(e.text.strip()) for e in root.findall(".//motionframes/motionframe/timestep") ] root_positions = [[ float(i) * scale_factor for i in e.text.strip().split() ] for e in root.findall(".//motionframes/motionframe/rootposition")] root_rotations = [[ float(i) for i in e.text.strip().split() ] for e in root.findall(".//motionframes/motionframe/rootrotation")] joint_positions = [[ float(i) for i in e.text.strip().split() ] for e in root.findall(".//motionframes/motionframe/jointposition")] print(len(root_positions), len(root_rotations)) bpy.ops.object.mode_set(mode='OBJECT') # lastFrame = start -20 counter = 3 # frame counter for skipping frames, value of 3 ensures that first frame is used frameCounter = start # count the current frame in blender # disable kinematic updates for import bpy.context.scene.RobotDesigner.doKinematicUpdate = False for [i, [timestamp, root_position, root_rotation, joint_position]] in enumerate( itertools.zip_longest(timestamps, root_positions, root_rotations, joint_positions, fillvalue=[])): counter += 1 # increase counter if counter != 4: # process frame only if counter equals 4 => use every 4th frame # print('Skipping') # inform that we're skipping continue # skip frame counter = 0 # reset counter # bpy.context.scene.frame_current = start + timestamp * fps * 10 bpy.context.scene.frame_current = frameCounter # set current frame in blender frameCounter += 1 # increase frameCounter for next frame # if bpy.context.scene.frame_current - lastFrame < 12: #or bpy.context.scene.frame_current > 100: # print('Skipping') # continue # lastFrame = bpy.context.scene.frame_current print("Frame number: ", bpy.context.scene.frame_current, " of ", len(root_positions) / 4 - 1) armName = bpy.context.active_object.name segment_name = bpy.context.active_object.data.bones[0].name bpy.context.active_object.location = Vector(root_position) bpy.context.active_object.rotation_euler = Euler(root_rotation, "XYZ") bpy.ops.anim.keyframe_insert(type="Location") bpy.ops.anim.keyframe_insert(type="Rotation") # bpy.context.active_object.location = Vector(root_position) # bpy.context.active_object.rotation_euler = Euler(root_rotation, "XYZ") for [x, value] in enumerate(joint_position): if x < len(names): bpy.ops.RobotDesigner.select_segment(segment_name=names[x]) try: bpy.context.active_bone.RobotDesigner.theta.value = value / pi * 180.0 except KeyError: print("Error updating {}".format(s)) # print(names[x], value/pi*180, bpy.context.active_bone.RobotDesigner.theta.value) bpy.ops.object.mode_set(mode='POSE') bpy.ops.pose.select_all(action='SELECT') armatures.updateKinematics(armName, segment_name) bpy.ops.anim.keyframe_insert(type='Rotation') bpy.ops.object.mode_set(mode='OBJECT') # armatures.updateKinematics(armName,segment_name) bpy.context.scene.RobotDesigner.doKinematicUpdate = True
def main(): sys.path.append(sys.argv[1]) sys.argv.remove(sys.argv[1]) #Now we can import all the stuff.. from sonLib.bioio import getBasicOptionParser from sonLib.bioio import parseBasicOptions from sonLib.bioio import logger from sonLib.bioio import addLoggingFileHandler, redirectLoggerStreamHandlers from sonLib.bioio import setLogLevel from sonLib.bioio import getTotalCpuTime, getTotalCpuTimeAndMemoryUsage from sonLib.bioio import getTempDirectory from sonLib.bioio import makeSubDir from jobTree.src.job import Job from jobTree.src.master import getEnvironmentFileName, getConfigFileName, listChildDirs, getTempStatsFile, setupJobAfterFailure from sonLib.bioio import system ########################################## #Input args ########################################## jobTreePath = sys.argv[1] jobFile = sys.argv[2] ########################################## #Load the environment for the job ########################################## #First load the environment for the job. fileHandle = open(getEnvironmentFileName(jobTreePath), 'r') environment = cPickle.load(fileHandle) fileHandle.close() for i in environment: if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"): os.environ[i] = environment[i] # sys.path is used by __import__ to find modules if "PYTHONPATH" in environment: for e in environment["PYTHONPATH"].split(':'): if e != '': sys.path.append(e) #os.environ = environment #os.putenv(key, value) ########################################## #Setup the temporary directories. ########################################## #Dir to put all the temp files in. localSlaveTempDir = getTempDirectory() localTempDir = makeSubDir(os.path.join(localSlaveTempDir, "localTempDir")) ########################################## #Setup the logging ########################################## #Setup the logging. This is mildly tricky because we don't just want to #redirect stdout and stderr for this Python process; we want to redirect it #for this process and all children. Consequently, we can't just replace #sys.stdout and sys.stderr; we need to mess with the underlying OS-level #file descriptors. See <http://stackoverflow.com/a/11632982/402891> #When we start, standard input is file descriptor 0, standard output is #file descriptor 1, and standard error is file descriptor 2. #What file do we want to point FDs 1 and 2 to? tempSlaveLogFile = os.path.join(localSlaveTempDir, "slave_log.txt") #Save the original stdout and stderr (by opening new file descriptors to the #same files) origStdOut = os.dup(1) origStdErr = os.dup(2) #Open the file to send stdout/stderr to. logDescriptor = os.open(tempSlaveLogFile, os.O_WRONLY | os.O_CREAT | os.O_APPEND) #Replace standard output with a descriptor for the log file os.dup2(logDescriptor, 1) #Replace standard error with a descriptor for the log file os.dup2(logDescriptor, 2) #Since we only opened the file once, all the descriptors duped from the #original will share offset information, and won't clobber each others' #writes. See <http://stackoverflow.com/a/5284108/402891>. This shouldn't #matter, since O_APPEND seeks to the end of the file before every write, but #maybe there's something odd going on... #Close the descriptor we used to open the file os.close(logDescriptor) for handler in list(logger.handlers): #Remove old handlers logger.removeHandler(handler) #Add the new handler. The sys.stderr stream has been redirected by swapping #the file descriptor out from under it. logger.addHandler(logging.StreamHandler(sys.stderr)) #Put a message at the top of the log, just to make sure it's working. print "---JOBTREE SLAVE OUTPUT LOG---" sys.stdout.flush() #Log the number of open file descriptors so we can tell if we're leaking #them. logger.debug("Next available file descriptor: {}".format( nextOpenDescriptor())) ########################################## #Parse input files ########################################## config = ET.parse(getConfigFileName(jobTreePath)).getroot() setLogLevel(config.attrib["log_level"]) job = Job.read(jobFile) job.messages = [] #This is the only way to stop messages logging twice, as are read only in the master job.children = [] #Similarly, this is where old children are flushed out. job.write() #Update status, to avoid reissuing children after running a follow on below. if os.path.exists(job.getLogFileName()): #This cleans the old log file os.remove(job.getLogFileName()) logger.info("Parsed arguments and set up logging") #Try loop for slave logging ########################################## #Setup the stats, if requested ########################################## if config.attrib.has_key("stats"): startTime = time.time() startClock = getTotalCpuTime() stats = ET.Element("slave") else: stats = None ########################################## #The max time ########################################## maxTime = float(config.attrib["job_time"]) assert maxTime > 0.0 assert maxTime < sys.maxint ########################################## #Slave log file trapped from here on in ########################################## slaveFailed = False try: ########################################## #The next job ########################################## def globalTempDirName(job, depth): return job.getGlobalTempDirName() + str(depth) command, memoryAvailable, cpuAvailable, depth = job.followOnCommands[-1] defaultMemory = int(config.attrib["default_memory"]) defaultCpu = int(config.attrib["default_cpu"]) assert len(job.children) == 0 startTime = time.time() while True: job.followOnCommands.pop() ########################################## #Global temp dir ########################################## globalTempDir = makeSubDir(globalTempDirName(job, depth)) i = 1 while os.path.isdir(globalTempDirName(job, depth+i)): system("rm -rf %s" % globalTempDirName(job, depth+i)) i += 1 ########################################## #Old children, not yet deleted # #These may exist because of the lazy cleanup #we do ########################################## for childDir in listChildDirs(job.jobDir): logger.debug("Cleaning up old child %s" % childDir) system("rm -rf %s" % childDir) ########################################## #Run the job ########################################## if command != "": #Not a stub if command[:11] == "scriptTree ": ########################################## #Run the target ########################################## loadStack(command).execute(job=job, stats=stats, localTempDir=localTempDir, globalTempDir=globalTempDir, memoryAvailable=memoryAvailable, cpuAvailable=cpuAvailable, defaultMemory=defaultMemory, defaultCpu=defaultCpu, depth=depth) else: #Is another command system(command) ########################################## #Cleanup/reset a successful job/checkpoint ########################################## job.remainingRetryCount = int(config.attrib["try_count"]) system("rm -rf %s/*" % (localTempDir)) job.update(depth=depth, tryCount=job.remainingRetryCount) ########################################## #Establish if we can run another job ########################################## if time.time() - startTime > maxTime: logger.info("We are breaking because the maximum time the job should run for has been exceeded") break #Deal with children if len(job.children) >= 1: #We are going to have to return to the parent logger.info("No more jobs can run in series by this slave, its got %i children" % len(job.children)) break if len(job.followOnCommands) == 0: logger.info("No more jobs can run by this slave as we have exhausted the follow ons") break #Get the next job and see if we have enough cpu and memory to run it.. command, memory, cpu, depth = job.followOnCommands[-1] if memory > memoryAvailable: logger.info("We need more memory for the next job, so finishing") break if cpu > cpuAvailable: logger.info("We need more cpus for the next job, so finishing") break logger.info("Starting the next job") ########################################## #Finish up the stats ########################################## if stats != None: totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage() stats.attrib["time"] = str(time.time() - startTime) stats.attrib["clock"] = str(totalCpuTime - startClock) stats.attrib["memory"] = str(totalMemoryUsage) tempStatsFile = getTempStatsFile(jobTreePath) fileHandle = open(tempStatsFile + ".new", "w") ET.ElementTree(stats).write(fileHandle) fileHandle.close() os.rename(tempStatsFile + ".new", tempStatsFile) #This operation is atomic logger.info("Finished running the chain of jobs on this node, we ran for a total of %f seconds" % (time.time() - startTime)) ########################################## #Where slave goes wrong ########################################## except: #Case that something goes wrong in slave traceback.print_exc() logger.critical("Exiting the slave because of a failed job on host %s", socket.gethostname()) job = Job.read(jobFile) setupJobAfterFailure(job, config) job.write() slaveFailed = True ########################################## #Cleanup ########################################## #Close the slave logging #Flush at the Python level sys.stdout.flush() sys.stderr.flush() #Flush at the OS level os.fsync(1) os.fsync(2) #Close redirected stdout and replace with the original standard output. os.dup2(origStdOut, 1) #Close redirected stderr and replace with the original standard error. os.dup2(origStdOut, 2) #sys.stdout and sys.stderr don't need to be modified at all. We don't need #to call redirectLoggerStreamHandlers since they still log to sys.stderr #Close our extra handles to the original standard output and standard error #streams, so we don't leak file handles. os.close(origStdOut) os.close(origStdErr) #Now our file handles are in exactly the state they were in before. #Copy back the log file to the global dir, if needed if slaveFailed: truncateFile(tempSlaveLogFile) system("mv %s %s" % (tempSlaveLogFile, job.getLogFileName())) #Remove the temp dir system("rm -rf %s" % localSlaveTempDir) #This must happen after the log file is done with, else there is no place to put the log if (not slaveFailed) and len(job.followOnCommands) == 0 and len(job.children) == 0 and len(job.messages) == 0: ########################################## #Cleanup global files at the end of the chain ########################################## job.delete()
def edit_bbox(obj_to_edit, action): ''' action = `delete` `change_class:new_class_index` `resize_bbox:new_x_left:new_y_top:new_x_right:new_y_bottom` ''' if 'change_class' in action: new_class_index = int(action.split(':')[1]) elif 'resize_bbox' in action: new_x_left = max(0, int(action.split(':')[1])) new_y_top = max(0, int(action.split(':')[2])) new_x_right = min(width, int(action.split(':')[3])) new_y_bottom = min(height, int(action.split(':')[4])) # 1. initialize bboxes_to_edit_dict # (we use a dict since a single label can be associated with multiple ones in videos) bboxes_to_edit_dict = {} current_img_path = IMAGE_PATH_LIST[img_index] bboxes_to_edit_dict[current_img_path] = obj_to_edit # 2. add elements to bboxes_to_edit_dict ''' If the bbox is in the json file then it was used by the video Tracker, hence, we must also edit the next predicted bboxes associated to the same `anchor_id`. ''' # if `current_img_path` is a frame from a video is_from_video, video_name = is_frame_from_video(current_img_path) if is_from_video: # get json file corresponding to that video json_file_path = '{}.json'.format(os.path.join(TRACKER_DIR, video_name)) file_exists, json_file_data = get_json_file_data(json_file_path) # if json file exists if file_exists: # match obj_to_edit with the corresponding json object frame_data_dict = json_file_data['frame_data_dict'] json_object_list = get_json_file_object_list( current_img_path, frame_data_dict) obj_matched = get_json_object_dict(obj_to_edit, json_object_list) # if match found if obj_matched is not None: # get this object's anchor_id anchor_id = obj_matched['anchor_id'] frame_path_list = get_next_frame_path_list( video_name, current_img_path) frame_path_list.insert(0, current_img_path) if 'change_class' in action: # add also the previous frames prev_path_list = get_prev_frame_path_list( video_name, current_img_path) frame_path_list = prev_path_list + frame_path_list # update json file if contain the same anchor_id for frame_path in frame_path_list: json_object_list = get_json_file_object_list( frame_path, frame_data_dict) json_obj = get_json_file_object_by_id( json_object_list, anchor_id) if json_obj is not None: bboxes_to_edit_dict[frame_path] = [ json_obj['class_index'], json_obj['bbox']['xmin'], json_obj['bbox']['ymin'], json_obj['bbox']['xmax'], json_obj['bbox']['ymax'] ] # edit json file if 'delete' in action: json_object_list.remove(json_obj) elif 'change_class' in action: json_obj['class_index'] = new_class_index elif 'resize_bbox' in action: json_obj['bbox']['xmin'] = new_x_left json_obj['bbox']['ymin'] = new_y_top json_obj['bbox']['xmax'] = new_x_right json_obj['bbox']['ymax'] = new_y_bottom else: break # save the edited data with open(json_file_path, 'w') as outfile: json.dump(json_file_data, outfile, sort_keys=True, indent=4) # 3. loop through bboxes_to_edit_dict and edit the corresponding annotation files for path in bboxes_to_edit_dict: obj_to_edit = bboxes_to_edit_dict[path] class_index, xmin, ymin, xmax, ymax = map(int, obj_to_edit) for ann_path in get_annotation_paths(path, annotation_formats): if '.txt' in ann_path: # edit YOLO file with open(ann_path, 'r') as old_file: lines = old_file.readlines() yolo_line = yolo_format( class_index, (xmin, ymin), (xmax, ymax), width, height) # TODO: height and width ought to be stored with open(ann_path, 'w') as new_file: for line in lines: if line != yolo_line + '\n': new_file.write(line) elif 'change_class' in action: new_yolo_line = yolo_format( new_class_index, (xmin, ymin), (xmax, ymax), width, height) new_file.write(new_yolo_line + '\n') elif 'resize_bbox' in action: new_yolo_line = yolo_format( class_index, (new_x_left, new_y_top), (new_x_right, new_y_bottom), width, height) new_file.write(new_yolo_line + '\n') elif '.xml' in ann_path: # edit PASCAL VOC file tree = ET.parse(ann_path) annotation = tree.getroot() for obj in annotation.findall('object'): class_name_xml, class_index_xml, xmin_xml, ymin_xml, xmax_xml, ymax_xml = get_xml_object_data( obj) if (class_index == class_index_xml and xmin == xmin_xml and ymin == ymin_xml and xmax == xmax_xml and ymax == ymax_xml): if 'delete' in action: annotation.remove(obj) elif 'change_class' in action: # edit object class name object_class = obj.find('name') object_class.text = CLASS_LIST[new_class_index] elif 'resize_bbox' in action: object_bbox = obj.find('bndbox') object_bbox.find('xmin').text = str(new_x_left) object_bbox.find('ymin').text = str(new_y_top) object_bbox.find('xmax').text = str(new_x_right) object_bbox.find('ymax').text = str(new_y_bottom) break xml_str = ET.tostring(annotation) write_xml(xml_str, ann_path)
# import xml.etree.ElementTree as ET import xml.etree.cElementTree as ET tree = ET.parse("country.xml") #<xml.etree.ElementTree.ElementTree object at 0x0029FD10> print(tree) root = tree.getroot() print(root) print(root.tag, ":", root.attrib) # 遍历xml文档的第二层 for child in root: # 第二层节点的标签名称和属性 print(child.tag,":", child.attrib) # 遍历xml文档的第三层 for children in child: # 第三层节点的标签名称和属性 print(children.tag, ":", children.attrib) print(root[0][1].text) # 过滤出所有neighbor标签 for neighbor in root.iter("neighbor"): print(neighbor.tag, ":", neighbor.attrib) # 遍历所有的counry标签 for country in root.findall("country"): # 查找country标签下的第一个rank标签 rank = country.find("rank").text # 获取country标签的name属性
def parse(file): tree = ET.parse(file) return XMLTree(tree.getroot())
def update_library(self, ep_obj=None, host=None, username=None, password=None, plex_server_token=None, force=True): """Handles updating the Plex Media Server host via HTTP API Plex Media Server currently only supports updating the whole video library and not a specific path. Returns: Returns None for no issue, else a string of host with connection issues """ if sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY: if not sickbeard.PLEX_SERVER_HOST: logger.log( u'PLEX: No Plex Media Server host specified, check your settings', logger.DEBUG) return False if not host: host = sickbeard.PLEX_SERVER_HOST if not username: username = sickbeard.PLEX_USERNAME if not password: password = sickbeard.PLEX_PASSWORD if not plex_server_token: plex_server_token = sickbeard.PLEX_SERVER_TOKEN # if username and password were provided, fetch the auth token from plex.tv token_arg = '' if plex_server_token: token_arg = '?X-Plex-Token=' + plex_server_token elif username and password: logger.log( u'PLEX: fetching plex.tv credentials for user: '******'https://plex.tv/users/sign_in.xml', data='') authheader = 'Basic %s' % base64.encodestring( '%s:%s' % (username, password))[:-1] req.add_header('Authorization', authheader) req.add_header('X-Plex-Device-Name', 'SickRage') req.add_header('X-Plex-Product', 'SickRage Notifier') req.add_header('X-Plex-Client-Identifier', sickbeard.common.USER_AGENT) req.add_header('X-Plex-Version', '1.0') try: response = urllib2.urlopen(req) auth_tree = etree.parse(response) token = auth_tree.findall( './/authentication-token')[0].text token_arg = '?X-Plex-Token=' + token except urllib2.URLError as e: logger.log( u'PLEX: Error fetching credentials from from plex.tv for user %s: %s' % (username, ex(e)), logger.DEBUG) except (ValueError, IndexError) as e: logger.log( u'PLEX: Error parsing plex.tv response: ' + ex(e), logger.DEBUG) file_location = '' if None is ep_obj else ep_obj.location host_list = [x.strip() for x in host.split(',')] hosts_all = {} hosts_match = {} hosts_failed = [] for cur_host in host_list: url = 'http://%s/library/sections%s' % (cur_host, token_arg) try: xml_tree = etree.parse(urllib.urlopen(url)) media_container = xml_tree.getroot() except IOError, e: logger.log( u'PLEX: Error while trying to contact Plex Media Server: ' + ex(e), logger.WARNING) hosts_failed.append(cur_host) continue except Exception as e: if 'invalid token' in str(e): logger.log( u'PLEX: Please set TOKEN in Plex settings: ', logger.ERROR) else: logger.log( u'PLEX: Error while trying to contact Plex Media Server: ' + ex(e), logger.ERROR) continue sections = media_container.findall('.//Directory') if not sections: logger.log( u'PLEX: Plex Media Server not running on: ' + cur_host, logger.DEBUG) hosts_failed.append(cur_host) continue for section in sections: if 'show' == section.attrib['type']: keyed_host = [(str(section.attrib['key']), cur_host)] hosts_all.update(keyed_host) if not file_location: continue for section_location in section.findall('.//Location'): section_path = re.sub( r'[/\\]+', '/', section_location.attrib['path'].lower()) section_path = re.sub(r'^(.{,2})[/\\]', '', section_path) location_path = re.sub(r'[/\\]+', '/', file_location.lower()) location_path = re.sub(r'^(.{,2})[/\\]', '', location_path) if section_path in location_path: hosts_match.update(keyed_host)
def main(): logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG) if len(sys.argv) < 4: sys.stderr.write("transform_benchmark_to_pcidss.py PCI_DSS.json " "SOURCE_XCCDF DESTINATION_XCCDF\n") sys.exit(1) id_tree = None with open(sys.argv[1], "r") as f: id_tree = json.load(f) benchmark = ElementTree.parse(sys.argv[2]) rules = [] for rule in \ benchmark.findall(".//{%s}Rule" % (XCCDF_NAMESPACE)): rules.append(rule) rule_usage_map = {} # only PCI-DSS related rules in that list, to speed-up processing filtered_rules = [] for rule in rules: for ref in rule.findall("./{%s}reference" % (XCCDF_NAMESPACE)): if ref.get("href") == REMOTE_URL: filtered_rules.append(rule) break values = [] for value in \ benchmark.findall(".//{%s}Value" % (XCCDF_NAMESPACE)): values.append(value) parent_map = dict((c, p) for p in benchmark.getiterator() for c in p) for rule in \ benchmark.findall(".//{%s}Rule" % (XCCDF_NAMESPACE)): parent_map[rule].remove(rule) for value in \ benchmark.findall(".//{%s}Value" % (XCCDF_NAMESPACE)): parent_map[value].remove(value) for group in \ benchmark.findall(".//{%s}Group" % (XCCDF_NAMESPACE)): parent_map[group].remove(group) root_element = benchmark.getroot() for id_, desc, children in id_tree: element = \ construct_xccdf_group(id_, desc, children, filtered_rules, rule_usage_map) root_element.append(element) if len(values) > 0: group = ElementTree.Element("{%s}Group" % (XCCDF_NAMESPACE)) group.set("id", "xccdf_org.ssgproject.content_group_values") group.set("selected", "true") title = ElementTree.Element("{%s}title" % (XCCDF_NAMESPACE)) title.text = "Values" group.append(title) description = ElementTree.Element("{%s}description" % (XCCDF_NAMESPACE)) description.text = "Group of values used in PCI-DSS profile" group.append(description) for value in values: copied_value = copy.deepcopy(value) group.append(copied_value) root_element.append(group) unused_rules = [] for rule in rules: if rule.get("id") not in rule_usage_map: # this rule wasn't added yet, it would be lost unless we added it # to a special non-PCI-DSS group unused_rules.append(rule) for ref in rule.findall("./{%s}reference" % (XCCDF_NAMESPACE)): if ref.get("href") == REMOTE_URL: logging.error( "Rule '%s' references PCI-DSS '%s' but doesn't match " "any Group ID in our requirement tree. Perhaps it's " "referencing something we don't consider applicable on " "the Operating System level?", rule.get("id"), ref.text) if len(unused_rules) > 0: logging.warning("%i rules don't reference PCI-DSS!" % (len(unused_rules))) group = ElementTree.Element("{%s}Group" % (XCCDF_NAMESPACE)) group.set("id", "xccdf_org.ssgproject.content_group_non-pci-dss") group.set("selected", "true") title = ElementTree.Element("{%s}title" % (XCCDF_NAMESPACE)) title.text = "Non PCI-DSS" group.append(title) description = ElementTree.Element("{%s}description" % (XCCDF_NAMESPACE)) description.text = "Rules that are not part of PCI-DSS" group.append(description) for rule in unused_rules: copied_rule = copy.deepcopy(rule) group.append(copied_rule) root_element.append(group) # change the Benchmark ID to avoid validation issues root_element.set( "id", root_element.get("id").replace("_benchmark_", "_benchmark_PCIDSS-")) for title_element in \ root_element.findall("./{%s}title" % (XCCDF_NAMESPACE)): title_element.text += " (PCI-DSS centric)" # filter out all profiles except PCI-DSS for profile in \ benchmark.findall("./{%s}Profile" % (XCCDF_NAMESPACE)): if profile.get("id").endswith("pci-dss"): # change the profile ID to avoid validation issues profile.set( "id", profile.get("id").replace("pci-dss", "pci-dss_centric")) else: root_element.remove(profile) continue # filter out old group selectors from the PCI-DSS profile for select in profile.findall("./{%s}select" % (XCCDF_NAMESPACE)): if select.get("idref").startswith( "xccdf_org.ssgproject.content_group_"): # we will remove all group selectors, all PCI-DSS groups are # selected by default so we don't need any in the final # PCI-DSS Benchmark profile.remove(select) benchmark.write(sys.argv[3])
dict = { "Traffic Light": "0", "Red": "1", "Green": "2", "Yellow": "3", "Straight": "4", "Right": "5", "Left": "6" } files = listdir(path_root) inputtxt = open("test.txt", 'r') for i in files: # print(i[0:-4]) # print(lines) # print(lines[-2][0:-4]) tree = ET.parse(path_root + i[0:-4] + '.xml') root = tree.getroot() save = open(txt_path + i[0:-4] + ".txt", 'w') line = '' for child in root.findall('object'): name = child.find('name').text bndbox = child.find('bndbox') xmin = (bndbox.find('xmin').text) ymin = (bndbox.find('ymin').text) xmax = (bndbox.find('xmax').text) ymax = (bndbox.find('ymax').text) if int(ymax) - int(ymin) <= 5: continue line += dict[ name] + ' ' + xmin + ' ' + ymin + ' ' + xmax + ' ' + ymax + '\n' for child2 in child.findall('status'):
renamed 'bikeStations.xml'. That database table should be empty (to avoid an exception due to inserting row with duplicate key). """ import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s') import psycopg2 import sys conn = psycopg2.connect(dbname='gisdb', user='******', password='******') cur = conn.cursor() import xml.etree.cElementTree as ET tree = ET.parse('bikeStations.xml') # These are the children of interest childTags = ['name', 'terminalName', 'lat', 'long', 'nbBikes', 'nbEmptyDocks'] falseStatusTags = ['locked', 'temporary'] trueStatusTags = ['installed', 'public'] stations = tree.findall('station') for station in stations: print("STATION") stationName = station.find('name').text stationId = station.find('terminalName').text lat = station.find('lat').text lon = station.find('long').text fullDocks = station.find('nbBikes').text emptyDocks = station.find('nbEmptyDocks').text dockQty = int(fullDocks) + int(emptyDocks)
def parse_abstracts_xml(abstracts_xmlfilename, csv_file): """ Method for getting structured list containing all the abstracts from XML. Every abstract in the list is an object of Abstract class. It contains 4 main components: 1. Track of abstract 2. Title 3. List of authors. Every author is an object of Person class 4. Abstract itself (content) """ tree_abstracts = ET.parse(abstracts_xmlfilename) root_abstracts = tree_abstracts.getroot() doc_abstracts = LXML_ET.parse(abstracts_xmlfilename) count_abstracts = doc_abstracts.xpath('count(//abstract)') track = "" title = "" content = "" flag = False authors = [] abstracts_list = [] unknown_affiliations = [] affiliation_standarts = create_dict_standarts(csv_file) print("1. Parsing all abstracts from XML") for i in range(1, int(count_abstracts) + 1): for child in root_abstracts[i]: if child.tag == "Title": title = child.text.strip() continue if child.tag == "Content": content = child.text.strip() continue if child.tag == "PrimaryAuthor" or child.tag == "Co-Author": # Bringing different affiliations to the same standard affiliation = str(child[3].text).strip() # If affiliation is in standards - bring affiliation to standard if affiliation in affiliation_standarts: affiliation = affiliation_standarts[affiliation] else: unknown_affiliations.append(affiliation) primary_author = Person(first_name=str(child[0].text), family_name=str(child[1].text), email=str(child[2].text), affiliation=affiliation, is_primary_author=True if child.tag == "PrimaryAuthor" else False) authors.append(primary_author) continue if child.tag == "Track" and not flag: track = child.text flag = True continue abstract = Abstract(title, content, authors, track) abstracts_list.append(abstract) authors = [] flag = False # Print unknown affiliations unknown_affiliations = list(set(unknown_affiliations)) print("2. The following affiliations are unknown. Please add them to CSV file with standards.") for affiliation in unknown_affiliations: print(affiliation) print("==============================================") return abstracts_list
def execute(self, context): zip_name = self.filepath #file paths ans zip extraction dirs zip_path = os.path.abspath(zip_name) zip_dir = os.path.dirname(zip_path) html_path = os.path.join(zip_dir, 'html') xml_path = os.path.join(html_path, 'xml') zip2_path = os.path.join(html_path, os.path.basename(zip_path)) #remove old files shutil.rmtree(xml_path, True) #unzip files with ZipFile(zip_path, 'r') as zip_file: zip_file.extractall(html_path) with ZipFile(zip2_path, 'r') as zip_file: zip_file.extractall(xml_path) #clear scene bpy.data.scenes["Scene"].unit_settings.scale_length = 1.0 bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.object.select_all(action='SELECT') bpy.ops.object.delete(use_global=False) #clear images imgs = bpy.data.images for image in imgs: image.user_clear() #clear materials for material in bpy.data.materials: material.user_clear() bpy.data.materials.remove(material) #clear textures textures = bpy.data.textures for tex in textures: tex.user_clear() bpy.data.textures.remove(tex) #read xml and files xmlPath = os.path.join(xml_path, 'Home.xml') xmlRoot = ElementTree.parse(xmlPath).getroot() #read house filename = os.path.join(xml_path, xmlRoot.get('structure')) bpy.ops.import_scene.obj(filepath=filename) obs = bpy.context.selected_editable_objects[:] bpy.context.scene.objects.active = obs[0] bpy.ops.object.join() obs[0].name = xmlRoot.get('name') obs[0].dimensions = obs[0].dimensions * scale obs[0].location = (0.0, 0.0, 0.0) bpy.ops.object.shade_flat() bpy.context.active_object.layers[0] = True bpy.context.active_object.layers[1] = False bpy.context.active_object.layers[2] = False bpy.context.active_object.layers[3] = False Level = namedtuple("Level", "id elev ft") levels = [] for element in xmlRoot: objectName = element.tag if objectName == 'level': levels.append( Level(id=element.get('id'), elev=float(element.get('elevation')), ft=float(element.get('floorThickness')))) if objectName == 'furnitureGroup': for furniture in element: xmlRoot.append(furniture) #if objectName in ('doorOrWindow','pieceOfFurniture'): if 'model' in element.keys(): print(objectName) filename = os.path.join(xml_path, unquote(element.get('model'))) dimX = float(element.get('width')) dimY = float(element.get('height')) dimZ = float(element.get('depth')) locX = float(element.get('x')) * scale locY = -float(element.get('y')) * scale lve = 0.0 if 'level' in element.keys(): for lv in levels: if lv.id == element.get('level'): lve = (lv.elev) * scale if 'elevation' in element.keys(): locZ = (dimY * scale / 2.0) + ( float(element.get('elevation')) * scale) + lve else: locZ = (dimY * scale / 2.0) + lve bpy.ops.import_scene.obj(filepath=filename) obs = bpy.context.selected_editable_objects[:] bpy.context.scene.objects.active = obs[0] bpy.ops.object.join() obs[0].name = element.get('name') bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS') if objectName in ('doorOrWindow'): bpy.context.active_object.layers[1] = True bpy.context.active_object.layers[2] = False else: bpy.context.active_object.layers[2] = True bpy.context.active_object.layers[1] = False bpy.context.active_object.layers[0] = False bpy.context.active_object.layers[3] = False if 'modelMirrored' in element.keys(): if element.get('modelMirrored') == 'true': bpy.ops.transform.mirror( constraint_axis=(True, False, False), constraint_orientation='GLOBAL', proportional='DISABLED') if 'modelRotation' in element.keys(): value = element.get('modelRotation') va = value.split() mat_rot = mathutils.Matrix() mat_rot[0][0] = float(va[0]) mat_rot[0][1] = float(va[1]) mat_rot[0][2] = float(va[2]) mat_rot[1][0] = float(va[3]) mat_rot[1][1] = float(va[4]) mat_rot[1][2] = float(va[5]) mat_rot[2][0] = float(va[6]) mat_rot[2][1] = float(va[7]) mat_rot[2][2] = float(va[8]) ob = bpy.context.object ob.matrix_world = mat_rot bpy.ops.object.transform_apply(location=False, rotation=True, scale=False) ob.rotation_euler = (math.pi / 2, 0.0, 0.0) #if 'backFaceShown' in element.keys(): #TODO #object position and rotation obs[0].dimensions = (dimX * scale, dimY * scale, dimZ * scale) bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS') obs[0].location = (locX, locY, locZ) bpy.ops.object.transform_apply(location=False, rotation=True, scale=True) if 'angle' in element.keys(): angle = element.get('angle') obs[0].rotation_euler[2] = -float(angle) if 'color' in element.keys(): color = element.get('color') r = int(color[2:4], 16) / 255.0 g = int(color[4:6], 16) / 255.0 b = int(color[6:8], 16) / 255.0 bcolor = [r, g, b] for material in bpy.context.active_object.data.materials: material.diffuse_color = bcolor #search for texture or materials for prop in element: if prop.tag == 'texture': image = prop.get('image') for material in bpy.context.active_object.data.materials: img = bpy.data.images.load( os.path.join(xml_path, image)) tex = bpy.data.textures.new(image, type='IMAGE') tex.image = img mtex = material.texture_slots.add() mtex.texture = tex if prop.tag == 'material': mname = prop.get('name') if 'color' in prop.keys(): color = prop.get('color') r = int(color[2:4], 16) / 255.0 g = int(color[4:6], 16) / 255.0 b = int(color[6:8], 16) / 255.0 bcolor = [r, g, b] for material in bpy.context.active_object.data.materials: if mname in material.name: material.diffuse_color = bcolor #face texture of material for texture in prop: if texture.tag == 'texture': image = texture.get('image') for material in bpy.context.active_object.data.materials: if mname in material.name: img = bpy.data.images.load( os.path.join(xml_path, image)) tex = bpy.data.textures.new( image, type='IMAGE') tex.image = img mtex = material.texture_slots.add() mtex.texture = tex if objectName in ('light'): owner = bpy.context.active_object bpy.ops.object.lamp_add(type='POINT', location=(0.0, 0.0, 0.0)) bpy.context.active_object.data.energy = 200.0 * scale bpy.context.active_object.data.shadow_method = 'RAY_SHADOW' bpy.context.active_object.data.color = (0.9, 0.9, 0.9) bpy.context.active_object.parent = owner bpy.context.active_object.layers[3] = True bpy.context.active_object.layers[0] = False bpy.context.active_object.layers[1] = False bpy.context.active_object.layers[2] = False #insert camera if objectName in ('observerCamera'): if element.get('attribute') == 'observerCamera': locX = float(element.get('x')) * scale locY = -float(element.get('y')) * scale locZ = float(element.get('z')) * scale yaw = float(element.get('yaw')) pitch = float(element.get('pitch')) bpy.ops.object.camera_add( location=(locX, locY, locZ), rotation=((-pitch / 8.0) + (-math.pi / 2.0), math.pi, 0)) bpy.ops.mesh.primitive_cube_add( location=(locX, locY, locZ - (170.0 * scale / 2.0)), rotation=(0.0, 0.0, -yaw)) obs = bpy.context.selected_editable_objects[:] bpy.context.scene.objects.active = obs[0] obs[0].name = 'player' obs[0].dimensions = (40 * scale, 20 * scale, 170.0 * scale) bpy.data.objects["Camera"].parent = bpy.data.objects[ "player"] bpy.data.objects["Camera"].location = (0.0, -30.0 * scale, 22 * scale) bpy.data.objects["player"].game.physics_type = 'CHARACTER' bpy.data.objects["player"].game.use_collision_bounds = True bpy.data.objects["player"].game.step_height = 0.8 #add logic blocks obj = bpy.data.objects["player"] cam = bpy.data.objects["Camera"] #foward bpy.ops.logic.sensor_add(type="KEYBOARD", object="player") bpy.ops.logic.controller_add(type="LOGIC_AND", object="player") bpy.ops.logic.actuator_add(type="MOTION", object="player") obj.game.sensors[-1].link(obj.game.controllers[-1]) obj.game.actuators[-1].link(obj.game.controllers[-1]) obj.game.sensors[-1].name = "w" obj.game.sensors[-1].key = "W" obj.game.actuators[-1].offset_location[1] = -speed #backward bpy.ops.logic.sensor_add(type="KEYBOARD", object="player") bpy.ops.logic.controller_add(type="LOGIC_AND", object="player") bpy.ops.logic.actuator_add(type="MOTION", object="player") obj.game.sensors[-1].link(obj.game.controllers[-1]) obj.game.actuators[-1].link(obj.game.controllers[-1]) obj.game.sensors[-1].name = "s" obj.game.sensors[-1].key = "S" obj.game.actuators[-1].offset_location[1] = speed #left bpy.ops.logic.sensor_add(type="KEYBOARD", object="player") bpy.ops.logic.controller_add(type="LOGIC_AND", object="player") bpy.ops.logic.actuator_add(type="MOTION", object="player") obj.game.sensors[-1].link(obj.game.controllers[-1]) obj.game.actuators[-1].link(obj.game.controllers[-1]) obj.game.sensors[-1].name = "a" obj.game.sensors[-1].key = "A" obj.game.actuators[-1].offset_location[0] = speed #right bpy.ops.logic.sensor_add(type="KEYBOARD", object="player") bpy.ops.logic.controller_add(type="LOGIC_AND", object="player") bpy.ops.logic.actuator_add(type="MOTION", object="player") obj.game.sensors[-1].link(obj.game.controllers[-1]) obj.game.actuators[-1].link(obj.game.controllers[-1]) obj.game.sensors[-1].name = "d" obj.game.sensors[-1].key = "D" obj.game.actuators[-1].offset_location[0] = -speed #jump bpy.ops.logic.sensor_add(type="KEYBOARD", object="player") bpy.ops.logic.controller_add(type="LOGIC_AND", object="player") bpy.ops.logic.actuator_add(type="MOTION", object="player") obj.game.sensors[-1].link(obj.game.controllers[-1]) obj.game.actuators[-1].link(obj.game.controllers[-1]) obj.game.sensors[-1].name = "space" obj.game.sensors[-1].key = "SPACE" obj.game.actuators[-1].mode = 'OBJECT_CHARACTER' obj.game.actuators[-1].use_character_jump = True #mouse view bpy.ops.logic.sensor_add(type="MOUSE", object="player") bpy.ops.logic.controller_add(type="LOGIC_AND", object="player") bpy.ops.logic.actuator_add(type="MOUSE", object="player") obj.game.sensors[-1].link(obj.game.controllers[-1]) obj.game.actuators[-1].link(obj.game.controllers[-1]) obj.game.sensors[-1].mouse_event = 'MOVEMENT' obj.game.actuators[-1].mode = 'LOOK' obj.game.actuators[-1].sensitivity_y = 0.0 bpy.ops.logic.actuator_add(type="MOUSE", object="Camera") cam.game.actuators[-1].link(obj.game.controllers[-1]) cam.game.actuators[-1].mode = 'LOOK' cam.game.actuators[-1].sensitivity_x = 0.0 #Iterate over all members of the material struct and disable apha (to solve texture errors in blender game engine) for item in bpy.data.materials: if item.alpha == 1.0: item.use_transparency = False item.use_transparent_shadows = True else: item.raytrace_mirror.use = True item.raytrace_mirror.reflect_factor = 0.1 item.diffuse_intensity = 0.01 #better collision detection bpy.data.scenes["Scene"].game_settings.physics_step_sub = 5.0 #world settings bpy.data.worlds["World"].light_settings.use_ambient_occlusion = True bpy.data.worlds["World"].light_settings.ao_factor = 0.01 bpy.data.worlds["World"].light_settings.use_environment_light = True bpy.data.worlds["World"].light_settings.environment_energy = 0.01 bpy.data.scenes["Scene"].unit_settings.system = 'METRIC' bpy.data.scenes["Scene"].unit_settings.scale_length = 0.01 / scale bpy.data.scenes["Scene"].layers[0] = True bpy.data.scenes["Scene"].layers[1] = True bpy.data.scenes["Scene"].layers[2] = True bpy.data.scenes["Scene"].layers[3] = True return {'FINISHED'}
def loadVSS(fn): '''Decode a Vicon Skeleton file (VST format). VSK is labeling skeleton. VSS is solving skeleton.''' import xml.etree.cElementTree as ET import numpy as np dom = ET.parse(fn) parameters = dom.findall('Parameters')[0] params = dict([(p.get('NAME'),p.get('VALUE')) for p in parameters]) sticks = dom.findall('MarkerSet')[0].find('Sticks') sticksPairs = [(x.get('MARKER1'),x.get('MARKER2')) for x in sticks] sticksColour= [np.fromstring(x.get('RGB1', '255 255 255'), dtype=np.uint8, sep=' ') for x in sticks] hasTargetSet = True try: markers = dom.findall('TargetSet')[0].find('Targets') except: markers = dom.findall('MarkerSet')[0].find('Markers'); hasTargetSet = False markerOffsets = [x.get('POSITION').split() for x in markers] def ev(x,params): for k,v in params.items(): x = x.replace(k,v) return float(x) # eval(x) markerOffsets = [[ev(x,params) for x in mp] for mp in markerOffsets] markerColour= [np.fromstring(col, dtype=np.uint8, sep=' ') for col in \ [x.get('MARKER', x.get('RGB')) for x in dom.findall('MarkerSet')[0].find('Markers')]] colouredMarkers = [x.get('MARKER', x.get('NAME')) for x in dom.findall('MarkerSet')[0].find('Markers')] markerNames = [x.get('MARKER', x.get('NAME')) for x in markers] markerWeights = [float(x.get('WEIGHT')) if hasTargetSet else 1.0 for x in markers] markerParents = [x.get('SEGMENT') for x in markers] skeleton = dom.findall('Skeleton')[0] # skeleton is defined as a tree of Segments # Segment contains Joint and Segment # Joint is JointDummy(0)/JointHinge(1)/JointHardySpicer(2)/JointBall(3)/JointFree(6), containing JointTemplate def ap(skeleton, parent, skel): for seg in skeleton: if seg.tag == 'Segment': skel.append([seg.get('NAME'),parent,seg.attrib]) ap(seg, len(skel)-1, skel) else: skel[parent].extend([seg.tag,seg.attrib,{} if len(seg) == 0 else seg[0].attrib]) return skel # recursively parse the skeleton root = ap(skeleton, -1, []) assert(len(markerParents) == len(markerOffsets)) def cqToR(rs, R): '''Given a compressed quaternion, form a 3x3 rotation matrix.''' angle = np.dot(rs,rs)**0.5 scale = (np.sin(angle*0.5)/angle if angle > 1e-8 else 0.5) q = np.array([rs[0]*scale,rs[1]*scale,rs[2]*scale,np.cos(angle*0.5)], dtype=np.float32) q = np.outer(q, q)*2 R[:3,:3] = [ [1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3]], [ q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3]], [ q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1]]] def float3(x): return np.array(map(lambda x:ev(x,params), x.split()),dtype=np.float32) def mats(x): preT = x.get('PRE-POSITION', '0 0 0') postT = x.get('POST-POSITION', '0 0 0') preR = x.get('PRE-ORIENTATION', '0 0 0') postR = x.get('POST-ORIENTATION', '0 0 0') pre = np.zeros((3,4),dtype=np.float32) post = np.zeros((3,4),dtype=np.float32) pre[:,3] = float3(preT) post[:,3] = float3(postT) cqToR(float3(preR), pre[:3,:3]) cqToR(float3(postR), post[:3,:3]) return pre,post name = fn.rpartition('/')[2].partition('.')[0] numBones = len(root) jointNames = [r[0] for r in root] markerParents = np.array([jointNames.index(mp) for mp in markerParents],dtype=np.int32) jointNames[0] = 'root' # !!!! WARNING !!!! jointParents = [r[1] for r in root] jointData = [mats(r[4]) for r in root] jointTypes = [r[3] for r in root] # JointDummy(0)/JointHinge(1)/JointHardySpicer(2)/JointBall(3)/JointFree(6) #jointTemplates = [mats(r[5]) for r in root] # JointTemplate ... contains the same data as jointTypes jointAxes = [r[4].get('AXIS',r[4].get('AXIS-PAIR',r[4].get('EULER-ORDER','XYZ'))) for r in root] # order jointTs = [r[4].get('T',None) for r in root] Gs = np.zeros((numBones,3,4),dtype=np.float32) # GLOBAL mats Ls = np.zeros((numBones,3,4),dtype=np.float32) # LOCAL mats Bs = np.zeros((numBones,3),dtype=np.float32) # BONES for ji,pi in enumerate(jointParents): if pi == -1: Ls[ji] = jointData[ji][0] else: np.dot(jointData[pi][1][:,:3],jointData[ji][0],out=Ls[ji]); Ls[ji,:,3] += jointData[pi][1][:,3] dofNames = [] jointChans = [] # tx=0,ty,tz,rx,ry,rz jointChanSplits = [0] # TODO: locked channels for ji,(jt,T) in enumerate(zip(jointTypes,jointTs)): jointChanSplits.append(len(jointChans)) if jt == 'JointDummy': assert(T is None) elif jt == 'JointHinge': assert(T == '* ') jointChans.append(jointAxes[ji].split().index('1')+3) elif jt == 'JointHardySpicer': assert(T == '* * ') ja = jointAxes[ji].split() jointChans.append(ja.index('1',3)) jointChans.append(ja.index('1')+3) elif jt == 'JointBall': assert(T == '* * * ') ja = jointAxes[ji] jointChans.append(ord(ja[0])-ord('X')+3) jointChans.append(ord(ja[1])-ord('X')+3) jointChans.append(ord(ja[2])-ord('X')+3) elif jt == 'JointFree': assert(T == '* * * * * * ' or T is None) # version 1 of the file apparently doesn't fill this! ja = jointAxes[ji] jointChans.append(0) jointChans.append(1) jointChans.append(2) jointChanSplits[-1] = len(jointChans) jointChans.append(ord(ja[0])-ord('X')+3) jointChans.append(ord(ja[1])-ord('X')+3) jointChans.append(ord(ja[2])-ord('X')+3) for jc in jointChans[jointChanSplits[-2]:]: dofNames.append(jointNames[ji]+':'+'tx ty tz rx ry rz'.split()[jc]) jointChanSplits.append(len(jointChans)) numDofs = len(dofNames) # fill Gs chanValues = np.zeros(numDofs,dtype=np.float32) rootMat = np.eye(3, 4, dtype=np.float32) # fill Bs; TODO add dummy joints to store the extra bones (where multiple joints have the same parent) for ji,pi in enumerate(jointParents): if pi != -1: Bs[pi] = Ls[ji,:,3] Bs[np.where(Bs*Bs<0.01)] = 0 # zero out bones < 0.1mm # TODO: compare skeleton with ASF exported version skel_dict = { 'markerOffsets' : np.array(markerOffsets, dtype=np.float32), 'markerParents' : markerParents, 'markerNames' : markerNames, 'markerNamesUnq' : colouredMarkers, 'markerColour' : markerColour, 'markerWeights' : np.array(markerWeights,dtype=np.float32), 'numMarkers' : len(markerNames), 'sticks' : sticksPairs, 'sticksColour' : sticksColour, 'name' : str(name), 'numJoints' : int(numBones), 'jointNames' : jointNames, # list of strings 'jointIndex' : dict([(k,v) for v,k in enumerate(jointNames)]), # dict of string:int 'jointParents' : np.array(jointParents,dtype=np.int32), 'jointChans' : np.array(jointChans,dtype=np.int32), # 0 to 5 : tx,ty,tz,rx,ry,rz 'jointChanSplits': np.array(jointChanSplits,dtype=np.int32), 'chanNames' : dofNames, # list of strings 'chanValues' : np.zeros(numDofs,dtype=np.float32), 'numChans' : int(numDofs), 'Bs' : np.array(Bs, dtype=np.float32), 'Ls' : np.array(Ls, dtype=np.float32), 'Gs' : np.array(Gs, dtype=np.float32), 'rootMat' : rootMat, } Character.pose_skeleton(skel_dict['Gs'], skel_dict) return skel_dict