def display_youtube_video_title(sender, channel, message): ''' Checks every message sent for a youtube link and displays title and uploader information ''' results = re.finditer(r'http://(www\.)?youtube\.com/watch\?.*?v=(?P<id>[-_\w]{11})', message) output = u'' for result in results: handle = None try: handle = urllib2.urlopen('http://gdata.youtube.com/feeds/api/videos/%s?v=2' % result.group('id')) except urllib2.HTTPError as error: send_message(u'%s' % error) info = ElementTree() try: info.parse(handle) except ExpatError: send_message(u'Error: Malformed XML.') handle.close() title = info.findtext('{%s}title' % ATOM) uploader = info.findtext('{%s}author/{%s}name' % (ATOM, ATOM)) if title != None and uploader != None: output += u"'%s' uploaded by %s\n" % (title, uploader) if output != u'': # strip last \n in output output = output[:-1] # send message back to channel send_message(channel, output)
def GetGPSData(self): self.WriteLog("Get gps Data ----- Start ") consource = urllib.urlopen(const.SERVER_URL + const.TEST_TYPE_GPS) readsource = consource.read() domtree = ElementTree(XML(readsource)) domtree.getroot() value = domtree.findtext("p-answer/gps/state/value") description = domtree.findtext("p-answer/gps/state/percentage") self.WriteLog(const.STR_SHAP) self.WriteLog("value : " + value) self.WriteLog("description: " + description) self.WriteLog(const.STR_SHAP) consource.close() self.WriteLog("Get gps Data ----- End ")
def pkgs_to_version_dict(pkg_list: List[str]) -> Dict[str, str]: """ Get the version of all packages in a list. The results are stored in a dict. The key is the package name and the version is stored as the value. :param pkg_list: List of package names :return: Version dictionary :raises Exception: In case a package file is missing or can't be parsed """ installed_pkgs = {} mm = rospkg.rospack.ManifestManager(rospkg.common.PACKAGE_FILE) for pkg in pkg_list: path = mm.get_path(pkg) package_manifest = os.path.join(path, rospkg.common.PACKAGE_FILE) try: root = ElementTree(None, package_manifest) version = root.findtext("version") except Exception as e: logging.getLogger(__name__).error( f"Error during parsing of '{rospkg.common.PACKAGE_FILE}' of package '{pkg}': {e}" ) raise installed_pkgs.update({pkg: version}) return installed_pkgs
def test_image(): anno_dir = "output/annotations/" for anno_file in os.listdir(anno_dir): anno_tree = ElementTree() anno_tree.parse(os.path.join(anno_dir, anno_file)) img = cv.imdecode( np.fromfile(os.path.join("output", anno_tree.findtext("path")), dtype=np.uint8), -1) box = list() for obj in anno_tree.iter(tag="object"): box_node = obj.find("bndbox") box.append([ int(box_node.findtext("xmin")), int(box_node.findtext("ymin")), int(box_node.findtext("xmax")), int(box_node.findtext("ymax")), obj.findtext("name") ]) for box in box: cv.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 4) cv.putText(img, box[4], (box[0], box[1] - 5), cv.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2) _s = min(1080.0 / img.shape[1], 720.0 / img.shape[0]) resized = cv.resize(img, None, fx=_s, fy=_s, interpolation=cv.INTER_AREA) cv.imshow("resized", resized) cv.waitKey() cv.destroyAllWindows()
def parse_file(filename): tree = ElementTree() tree.parse(filename) #config_element = tree.find("config") classname = tree.getroot().attrib["name"] include = tree.findtext("include") enums = {} for enum in tree.findall("enums/enum"): elements = map(lambda f: f.attrib["name"], enum.getiterator("element")) enums[enum.attrib["name"]] = elements values = [] for v_elem in tree.findall('values/value'): v = {} v['name'] = v_elem.attrib['name'] v['type'] = v_elem.attrib['type'] v['default'] = v_elem.attrib['default'] v['comment'] = v_elem.text values.append(v) global_vars = [] for g_elem in tree.findall('global'): global_vars.append(g_elem.attrib['name']) return {"classname":classname, "include":include, "enums":enums, "values":values, "filename":filename, "globals":global_vars}
def parse_file(filename): tree = ElementTree() tree.parse(filename) #config_element = tree.find("config") classname = tree.getroot().attrib["name"] include = tree.findtext("include") enums = {} for enum in tree.findall("enums/enum"): elements = list(map(lambda f: f.attrib["name"], enum.getiterator("element"))) enums[enum.attrib["name"]] = elements values = [] for v_elem in tree.findall('values/value'): v = {} v['name'] = v_elem.attrib['name'] v['type'] = v_elem.attrib['type'] v['default'] = v_elem.attrib['default'] v['comment'] = v_elem.text values.append(v) global_vars = [] for g_elem in tree.findall('global'): global_vars.append(g_elem.attrib['name']) return {"classname":classname, "include":include, "enums":enums, "values":values, "filename":filename, "globals":global_vars}
def get_package_name(path): """ Get the name of the ROS package that contains *path*. This is determined by finding the nearest parent ``manifest.xml`` file. This routine may not traverse package setups that rely on internal symlinks within the package itself. :param path: filesystem path :return: Package name or ``None`` if package cannot be found, ``str`` """ # NOTE: the realpath is going to create issues with symlinks, most # likely. parent = os.path.dirname(os.path.realpath(path)) # walk up until we hit ros root or ros/pkg while ( not os.path.exists(os.path.join(path, MANIFEST_FILE)) and not os.path.exists(os.path.join(path, PACKAGE_FILE)) and parent != path ): path = parent parent = os.path.dirname(path) # check termination condition if os.path.exists(os.path.join(path, MANIFEST_FILE)): return os.path.basename(os.path.abspath(path)) elif os.path.exists(os.path.join(path, PACKAGE_FILE)): root = ElementTree(None, os.path.join(path, PACKAGE_FILE)) return root.findtext("name") else: return None
def search(self, keyword, page=1, pagesize=50): if not keyword: return None cached, created = HitCount.current_objects.get_or_create( source=self.get_source_id(), query='%s [%s:%s]' % (keyword, page, pagesize), defaults=dict( hits=0, valid_until=datetime.datetime.now() + datetime.timedelta(1) ) ) if not created and cached.results: return simplejson.loads(cached.results) opener = urllib2.build_opener( urllib2.HTTPCookieProcessor(cookielib.CookieJar()), SmartRedirectHandler() ) url = '%s?query="%s"&operation=searchRetrieve&version=1.1&' \ 'maximumRecords=%s&startRecord=%s' % ( settings.ARTSTOR_GATEWAY, urllib.quote(keyword), pagesize, (page - 1) * pagesize + 1, ) socket.setdefaulttimeout(self.timeout) try: response = opener.open(urllib2.Request(url)) except urllib2.URLError: return None try: results = ElementTree(file=response) total = results.findtext( '{http://www.loc.gov/zing/srw/}numberOfRecords') or 0 except ExpatError: total = 0 if not total: return None result = dict(records=[], hits=total) for image in results.findall('//{info:srw/schema/1/dc-v1.1}dc'): for ids in image.findall( '{http://purl.org/dc/elements/1.1/}identifier'): if ids.text.startswith('URL'): url = ids.text[len('URL:'):] elif ids.text.startswith('THUMBNAIL'): tn = ids.text[len('THUMBNAIL:'):] title = image.findtext('{http://purl.org/dc/elements/1.1/}title') result['records'].append(dict( thumb_url=tn, title=title, record_url=url)) cached.results = simplejson.dumps(result, separators=(',', ':')) cached.save() return result
def findtext( tree: ElementTree.ElementTree, query: str, namespaces: T.Dict[str, str] = SENTINEL1_NAMESPACES, ) -> str: value = tree.findtext(query, namespaces=namespaces) if value is None: raise ValueError(f"query={query} returned None") return value
def get_version(path): stack_path = os.path.join(path, 'stack.xml') if not os.path.exists(stack_path): raise RuntimeError('Could not find stack.xml in current folder.') try: root = ElementTree(None, stack_path) return root.findtext('version') except Exception as e: raise RuntimeError('Could not extract version from stack.xml:\n%s' % e)
def get_status_stats(firewall_ip, api_key, jobid): values = { 'type': 'export', 'category': 'stats-dump', 'action': 'status', 'job-id': jobid, 'key': api_key } call = 'https://%s/api/' % (firewall_ip) response = requests.post(call, data=values, verify=False) xmltree = ElementTree(fromstring(response.text)) status = xmltree.findtext('./result/job/status') progress = xmltree.findtext('./result/job/progress') return status, progress
def main(): parser = argparse.ArgumentParser( description='rosversion -d: Output the version of the given package\n' 'rosversion package: Output the ROS distribution name', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--skip-newline', action='store_true', help='Skip trailing newline') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('package', nargs='?', help="The ROS package name (e.g. 'roscpp')") group.add_argument('-d', '--distro', action='store_true', help='Output the ROS distribution name') args = parser.parse_args() printer = print_without_newline if args.skip_newline else print if args.distro: if 'ROS_DISTRO' in os.environ: distro_name = os.environ['ROS_DISTRO'] else: distro_name = get_distro_name_from_roscore() if not distro_name: distro_name = '<unknown>' printer(distro_name) sys.exit(0) rosstack = RosStack() try: version = rosstack.get_stack_version(args.package) except ResourceNotFound as e: try: # hack to make it work with wet packages mm = ManifestManager(PACKAGE_FILE) path = mm.get_path(args.package) package_manifest = os.path.join(path, 'package.xml') if os.path.exists(package_manifest): from xml.etree.ElementTree import ElementTree try: root = ElementTree(None, package_manifest) version = root.findtext('version') except Exception: pass except ResourceNotFound as e: print('Cannot locate [%s]' % args.package) sys.exit(1) if version is None: version = '<unversioned>' printer(version)
def search(self, keyword, page=1, pagesize=50): if not keyword: return None cached, created = HitCount.current_objects.get_or_create( source=self.get_source_id(), query='%s [%s:%s]' % (keyword, page, pagesize), defaults=dict(hits=0, valid_until=datetime.datetime.now() + datetime.timedelta(1))) if not created and cached.results: return simplejson.loads(cached.results) opener = urllib2.build_opener( urllib2.HTTPCookieProcessor(cookielib.CookieJar()), SmartRedirectHandler()) url = '%s?query="%s"&operation=searchRetrieve&version=1.1&maximumRecords=%s&startRecord=%s' % ( settings.ARTSTOR_GATEWAY, urllib.quote(keyword), pagesize, (page - 1) * pagesize + 1, ) socket.setdefaulttimeout(self.timeout) try: response = opener.open(urllib2.Request(url)) except urllib2.URLError: return None try: results = ElementTree(file=response) total = results.findtext( '{http://www.loc.gov/zing/srw/}numberOfRecords') or 0 except ExpatError: total = 0 if not total: return None pages = int(math.ceil(float(total) / pagesize)) result = dict(records=[], hits=total) for image in results.findall('//{info:srw/schema/1/dc-v1.1}dc'): for ids in image.findall( '{http://purl.org/dc/elements/1.1/}identifier'): if ids.text.startswith('URL'): url = ids.text[len('URL:'):] elif ids.text.startswith('THUMBNAIL'): tn = ids.text[len('THUMBNAIL:'):] else: id = ids.text title = image.findtext('{http://purl.org/dc/elements/1.1/}title') result['records'].append( dict(thumb_url=tn, title=title, record_url=url)) cached.results = simplejson.dumps(result, separators=(',', ':')) cached.save() return result
def main(): parser = argparse.ArgumentParser( description='rosversion -d: Output the version of the given package\n' 'rosversion package: Output the ROS distribution name', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( '-s', '--skip-newline', action='store_true', help='Skip trailing newline') group = parser.add_mutually_exclusive_group(required=True) group.add_argument( 'package', nargs='?', help="The ROS package name (e.g. 'roscpp')") group.add_argument( '-d', '--distro', action='store_true', help='Output the ROS distribution name') args = parser.parse_args() printer = print_without_newline if args.skip_newline else print if args.distro: if 'ROS_DISTRO' in os.environ: distro_name = os.environ['ROS_DISTRO'] else: distro_name = get_distro_name_from_roscore() if not distro_name: distro_name = '<unknown>' printer(distro_name) sys.exit(0) rosstack = RosStack() try: version = rosstack.get_stack_version(args.package) except ResourceNotFound as e: try: # hack to make it work with wet packages mm = ManifestManager(PACKAGE_FILE) path = mm.get_path(args.package) package_manifest = os.path.join(path, 'package.xml') if os.path.exists(package_manifest): from xml.etree.ElementTree import ElementTree try: root = ElementTree(None, package_manifest) version = root.findtext('version') except Exception: pass except ResourceNotFound as e: print('Cannot locate [%s]' % args.package) sys.exit(1) if version is None: version = '<unversioned>' printer(version)
def list_by_path(manifest_name, path, cache): """ List ROS stacks or packages within the specified path. The cache will be updated with the resource->path mappings. list_by_path() does NOT returned cached results -- it only updates the cache. :param manifest_name: MANIFEST_FILE or STACK_FILE, ``str`` :param path: path to list resources in, ``str`` :param cache: path cache to update. Maps resource name to directory path, ``{str: str}`` :returns: complete list of resources in ROS environment, ``[str]`` """ resources = [] path = os.path.abspath(path) basename = os.path.basename for d, dirs, files in os.walk(path, topdown=True, followlinks=True): if PACKAGE_FILE in files: # parse package.xml and decide if it matches the search criteria root = ElementTree(None, os.path.join(d, PACKAGE_FILE)) is_metapackage = root.find("./export/metapackage") is not None if ( (manifest_name == STACK_FILE and is_metapackage) or (manifest_name == MANIFEST_FILE and not is_metapackage) or manifest_name == PACKAGE_FILE ): resource_name = root.findtext("name") if resource_name not in resources: resources.append(resource_name) if cache is not None: cache[resource_name] = d del dirs[:] continue # leaf if manifest_name in files: resource_name = basename(d) if resource_name not in resources: resources.append(resource_name) if cache is not None: cache[resource_name] = d del dirs[:] continue # leaf elif MANIFEST_FILE in files or PACKAGE_FILE in files: # noop if manifest_name==MANIFEST_FILE, but a good # optimization for stacks. del dirs[:] continue # leaf elif "rospack_nosubdirs" in files or "CATKIN_IGNORE" in files: del dirs[:] continue # leaf # remove hidden dirs (esp. .svn/.git) [dirs.remove(di) for di in dirs if di[0] == "."] return resources
def search(query, params, off, num_results_wanted): off = int(off) # query and params both come from sidebar, so should have exactly one. if not query and not params: return Result(0, off), get_empty_parameters() elif query and params: print "artstor 34, shouldn't have reached here... Have both query (%s) and params (%s)" % ( query, params) raise NotImplementedError elif query: query_terms = Query_Language(identifier).searcher_translator(query) else: query_terms = parse_parameters(params) print "artstor 48 query_terms %s" % query_terms # return empty result if no search terms (submitting an empty query breaks artstor) if len(query_terms) is 0: return Result(0, 0), get_empty_parameters() """ Caching of results, uncomment and fix if supported in other searchers TODO cached, created = HitCount.current_objects.get_or_create( source=self.get_source_id(), query='%s [%s:%s]' % (keyword, page, pagesize), defaults=dict(hits=0, valid_until=datetime.datetime.now() + datetime.timedelta(1))) if not created and cached.results: return simplejson.loads(cached.results) """ pagesize = num_results_wanted url = _get_url(query_terms, pagesize, off) html_page = _get_html_page(url) try: results = ElementTree(file=html_page) print "artstor 61 \n\tfile %s\n\tresults %s\n" % (html_page, results) num_results = int( results.findtext( '{http://www.loc.gov/zing/srw/}numberOfRecords')) or 0 except ExpatError: # XML parsing error num_results = 0 if not num_results: # other type of error or no results found return Result(0, off), _build_returnable_parameters(query_terms) #pages = int(math.ceil(float(total) / pagesize)) result = Result(num_results, num_results + off) image_divs = results.findall('.//{info:srw/schema/1/dc-v1.1}dc') print "artstor 77 query_terms %s" % query_terms for div in image_divs: (url, thumb, image_identifier, title) = _get_image(div) result.addImage(ResultImage(url, thumb, title, image_identifier)) # TODO cope with full image not actually giving result (timeout error) return result, _build_returnable_parameters(query_terms)
def search(query, params, off, num_results_wanted): off = int(off) # query and params both come from sidebar, so should have exactly one. if not query and not params: return Result(0, off), get_empty_parameters() elif query and params: print "artstor 34, shouldn't have reached here... Have both query (%s) and params (%s)" %(query, params) raise NotImplementedError elif query: query_terms = Query_Language(identifier).searcher_translator(query) else: query_terms = parse_parameters(params) print "artstor 48 query_terms %s" %query_terms # return empty result if no search terms (submitting an empty query breaks artstor) if len(query_terms) is 0: return Result(0, 0), get_empty_parameters() """ Caching of results, uncomment and fix if supported in other searchers TODO cached, created = HitCount.current_objects.get_or_create( source=self.get_source_id(), query='%s [%s:%s]' % (keyword, page, pagesize), defaults=dict(hits=0, valid_until=datetime.datetime.now() + datetime.timedelta(1))) if not created and cached.results: return simplejson.loads(cached.results) """ pagesize = num_results_wanted url = _get_url(query_terms, pagesize, off) html_page = _get_html_page(url) try: results = ElementTree(file=html_page) print "artstor 61 \n\tfile %s\n\tresults %s\n" %(html_page, results) num_results = int(results.findtext('{http://www.loc.gov/zing/srw/}numberOfRecords')) or 0 except ExpatError: # XML parsing error num_results = 0 if not num_results: # other type of error or no results found return Result(0, off), _build_returnable_parameters(query_terms) #pages = int(math.ceil(float(total) / pagesize)) result = Result(num_results, num_results+off) image_divs = results.findall('.//{info:srw/schema/1/dc-v1.1}dc') print "artstor 77 query_terms %s" %query_terms for div in image_divs: (url, thumb, image_identifier, title) = _get_image(div) result.addImage(ResultImage(url, thumb, title, image_identifier)) # TODO cope with full image not actually giving result (timeout error) return result, _build_returnable_parameters(query_terms)
class City(): # note that we are are grabbing a different # data URL then in city list def __init__(self, dataurl): self.tree = ElementTree() try: urlhandle = urllib.urlopen(dataurl) except IOError: print "[Error] Unable to open the data url: " + dataurl sys.exit(1) self.tree.parse(urlhandle) def getQuantity(self,path): """Get the quatity contained at the XML XPath""" return self.tree.findtext(path) def getAttribute(self, path, attribute): """Get the atribute of the element at XPath path""" element = self.tree.find(path) if attribute in element: return element['attribute'] return None def getAvailableQuantities(self): """Get a list of all the available quatities in the form of their XML XPaths """ pathlist =[] self._getAllXPaths(pathlist,"",self.tree.getroot()) return pathlist # This nasty little function recursively traverses # an element tree to get all the available XPaths # you have to pass in the pathlist you want to contain # the list def _getAllXPaths(self, pathlist, path, element): children = element.getchildren() if not children: sys.stdout.write(path + "/"+element.tag + "\n") pathlist.append(path + "/"+element.tag) else: for child in children: self._getAllXPaths(pathlist, path + "/" + element.tag, child)
def _generate_nexus_url(self): """ Generates nexus URL for artifact download """ elem_tree = ElementTree(file=os.path.join(self._cwd, "pom.xml")) if self.project_version == '' or self.project_version is None: self.project_version = elem_tree.findtext( "{http://maven.apache.org/POM/4.0.0}version") nexus_url = self._generate_base_nexus_url(elem_tree) nexus_url += '/' nexus_url += self.project_version nexus_url += '/' nexus_url += '{0}-{1}.jar'.format(self._jar_name, self.project_version) return nexus_url
def _add_homebrew_meta_xml_metadata(rom: ROM, metadata: 'Metadata', meta_xml: ElementTree.ElementTree): name = meta_xml.findtext('name') if name: rom.ignore_name = True metadata.add_alternate_name(name, 'Banner Title') metadata.developer = metadata.publisher = meta_xml.findtext('coder') metadata.specific_info['Version'] = meta_xml.findtext('version') url = meta_xml.findtext('url') if url: metadata.documents['Homepage'] = url release_date_text = meta_xml.findtext('release_date') if release_date_text: metadata.release_date = Date(release_date_text[0:4], release_date_text[4:6], release_date_text[6:8]) short_description = meta_xml.findtext('short_description') if short_description: metadata.descriptions['Short Description'] = short_description long_description = meta_xml.findtext('long_description') if long_description: metadata.descriptions['Long Description'] = long_description metadata.specific_info['Homebrew Category'] = meta_xml.findtext('category') or 'None' #Makes me wonder if it's feasible to include an option to get categories not from folders…
def GetSrvID(self, testtype=const.TEST_TYPE_CONDIS): self.WriteLog("GetSrvID ----- Start ") # consource = urllib.urlopen(const.SERVER_URL+testtype) # readsource = self.consource.read() readsource = self.ConnectionHTTP(testtype) domtree = ElementTree(XML(readsource[2])) # self.WriteLog(self.readsource) # domtree = ElementTree(XML(readsource)) domtree.getroot() emel = domtree.findtext("p-answer/id") self.WriteLog(const.STR_SHAP) self.WriteLog("Server ID : " + emel) self.WriteLog(const.STR_SHAP) # consource.close() self.WriteLog("GetSrvID ----- END ") return emel
def get_tsf(firewall_ip, api_key): values = {'type': 'export', 'category': 'tech-support', 'key': api_key} call = 'https://%s/api/' % (firewall_ip) response = requests.post(call, data=values, verify=False) xmltree = ElementTree(fromstring(response.text)) root = xmltree.getroot() if root.attrib["status"] == "success": print("TSF Generated") else: print("TSF failed") sys.exit() jobid = xmltree.findtext('./result/job') return jobid
def get_ros_package_version(self, stack_name): # copied from https://github.com/ros-infrastructure/rospkg/blob/master/scripts/rosversion try: version = self.rosstack.get_stack_version(stack_name) except rospkg.ResourceNotFound as e: try: # hack to make it work with wet packages path = self.mm.get_path(stack_name) package_manifest = os.path.join(path, 'package.xml') if os.path.exists(package_manifest): from xml.etree.ElementTree import ElementTree try: root = ElementTree(None, package_manifest) version = root.findtext('version') return version except Exception: pass except rospkg.ResourceNotFound as e: return None return None
def _generate_nexus_url(self): """ Generates nexus URL for artifact download """ elem_tree = ElementTree(file=os.path.join(self._cwd, "pom.xml")) if self.project_version == '' or self.project_version is None: self.project_version = elem_tree.findtext("{http://maven.apache.org/POM/4.0.0}version") nexus_url = self._generate_base_nexus_url(elem_tree) nexus_url += '/' nexus_url += self.project_version nexus_url += '/' nexus_url += '{0}-{1}'.format(self._jar_name, self.project_version) if self._classifier is not None: nexus_url += '-{0}'.format(self._classifier) nexus_url += '.jar' return nexus_url
def get_version_from_package_name(package): rosstack = RosStack() try: version = rosstack.get_stack_version(package) except ResourceNotFound as e: try: # hack to make it work with wet packages mm = ManifestManager(PACKAGE_FILE) path = mm.get_path(package) package_manifest = os.path.join(path, 'package.xml') if os.path.exists(package_manifest): from xml.etree.ElementTree import ElementTree try: root = ElementTree(None, package_manifest) version = root.findtext('version') except Exception: pass except ResourceNotFound as e: print('Cannot locate [%s]' % package) sys.exit(1) return version
def get_comments_meta(self, start_id=0, journal=None): # This is plain api call. You should cache this. # See http://www.livejournal.com/developer/exporting.bml. session = self.get_session() headers = { 'User-Agent': self.user_agent, 'Cookie': 'ljsession=' + session } url = 'http://%s/export_comments.bml'\ '?get=comment_meta&startid=%d' %\ (self.server_name, start_id) if journal: url += '&authas=%s' % journal logging.debug('sending get_comments: %s (%s)' % (url, str(headers))) request = urllib.request.Request(url, None, headers) response = urllib.request.urlopen(request) result = {} tree = ElementTree() tree.parse(response) result['maxid'] = tree.findtext('maxid') comments = tree.find('comments') result['comments'] = {} for comment in comments.getiterator('comment'): attrib = comment.attrib.copy() id = int(attrib['id']) del attrib['id'] try: attrib['posterid'] = int(attrib['posterid']) except KeyError: pass result['comments'][id] = attrib usermaps = tree.find('usermaps') result['usermaps'] = {} for usermap in usermaps.getiterator('usermap'): id = int(usermap.attrib['id']) user = usermap.attrib['user'] result['usermaps'][id] = user return result
def get_comments_meta(self, start_id=0, journal=None): # This is plain api call. You should cache this. # See http://www.livejournal.com/developer/exporting.bml. session = self.get_session() headers = {'User-Agent': self.user_agent, 'Cookie': 'ljsession=' + session} url = 'http://%s/export_comments.bml'\ '?get=comment_meta&startid=%d' %\ (self.server_name, start_id) if journal: url += '&authas=%s' % journal logging.debug('sending get_comments: %s (%s)' % ( url, str(headers))) request = urllib.request.Request(url, None, headers) response = urllib.request.urlopen(request) result = {} tree = ElementTree() tree.parse(response) result['maxid'] = tree.findtext('maxid') comments = tree.find('comments') result['comments'] = {} for comment in comments.getiterator('comment'): attrib = comment.attrib.copy() id = int(attrib['id']) del attrib['id'] try: attrib['posterid']=int(attrib['posterid']) except KeyError: pass result['comments'][id]=attrib usermaps = tree.find('usermaps') result['usermaps'] = {} for usermap in usermaps.getiterator('usermap'): id = int(usermap.attrib['id']) user = usermap.attrib['user'] result['usermaps'][id]=user return result
def __init__(self, args): service_url = args.service_url tree = ElementTree() try: tree.parse(urllib.request.urlopen(service_url)) except urllib.error.URLError as exc: sys.stderr.write("Unknown URL %s\n" % service_url) #sys.stderr.write("%s\n" % exc.reason) sys.exit(exc.reason.errno) pub_date = tree.findtext("AsOf") pub_date_dt = datetime.strptime(pub_date, MYTHTV_SERVICES_DATETIME_FORMAT) pub_date = utils.formatdate(pub_date_dt.timestamp(), localtime=True) created_on = utils.formatdate(localtime=True) # Use web2py naming convention self.model_dict = dict(title=args.feed_title, link=args.feed_url, description=args.feed_description, pub_date=pub_date, created_on=created_on, language=args.feed_language, entries=self.__items__(args, tree))
def convert(xml_path, txt_path): file_list = os.listdir(xml_path) file_list.sort() file_txt = open(txt_path, 'w') for filename in file_list: xml = ElementTree() xml.parse(xml_path + filename) xml_content = [xml.findtext('filename'), str(len(xml.findall('object')))] for object in xml.findall('object'): str_temp = '' x1 = object.findtext('bndbox/xmin') y1 = object.findtext('bndbox/ymin') x2 = object.findtext('bndbox/xmax') y2 = object.findtext('bndbox/ymax') w = str(int(x2) - int(x1)) h = str(int(y2) - int(y1)) str_temp += x1 + ' ' str_temp += y1 + ' ' str_temp += w + ' ' str_temp += h + ' ' str_temp += '0' + ' ' str_temp += '0' + ' ' str_temp += '0' + ' ' str_temp += '0' + ' ' str_temp += object.findtext('truncated') + ' ' str_temp += '0' + ' ' xml_content.append(str_temp) xml_content.append('') file_txt.write('\n'.join(xml_content)) file_txt.close()
args = parser.parse_args() if args.ConfigFile: global_config_file = args.ConfigFile else: global_config_file = local_config["common"]["global-config-file"] # глобальные настройки # они предоставляются в виде внешнего XML файла global_config = ElementTree() global_config.parse(global_config_file) if args.DatePeriod: DATE_START, DATE_END = args.DatePeriod.split(" ") else: DATE_START = global_config.findtext("DataRange/Start") DATE_END = global_config.findtext("DataRange/End") if not DATE_START or not DATE_END: raise Exception("Не указана дата") # local constants HOST = local_config["common"]["host"] DEBUG = local_config["common"]["on-debug-mode"] URL = "https://" + HOST ROOT_URL = URL STATE_FILE_PATH = ROOT_DIR + local_config["common"]["state-file-path"] # globals constants USE_PROXY = bool(int(global_config.findtext("Proxy/Use"))) DOWNLOAD_ROOT_DIR = global_config.findtext("ReportsDir") + "ODU" TEMP_ROOT_DIR = global_config.findtext("TempDir") + "ODU"
def _add_meta_xml_metadata(metadata: 'Metadata', meta_xml: ElementTree.ElementTree): #version = 33 for digital stuff, sometimes 32 otherwise?, content_platform = WUP, ext_dev_urcc = some kiosk related thingo #logo_type = 2 on third party stuff?, app_launch_type = 1 on parental controls/H&S/Wii U Chat and 0 on everything else?, invisible_flag = maybe just for keeping stuff out of the daily log?, no_managed_flag, no_event_log, no_icon_database, launching_flag, install_flag, closing_msg, group_id, boss_id, os_version, app_size, common_boss_size, account_boss_size, save_no_rollback, join_game_id, join_game_mode_mask, bg_daemon_enable, olv_accesskey, wood_tin, e_manual = I guess it's 1 if it has a manual, e_manual_version, eula_version, direct_boot, reserved_flag{0-7}, add_on_unique_id{0-31} = DLC probs? product_code = meta_xml.findtext('product_code') if product_code: metadata.product_code = product_code try: metadata.specific_info['Virtual Console Platform'] = WiiUVirtualConsolePlatform(metadata.product_code[6]) except ValueError: pass gametdb_id = product_code[-4:] add_info_from_tdb(_tdb, metadata, gametdb_id) company_code = meta_xml.findtext('company_code') if company_code: if company_code in _nintendo_licensee_codes: metadata.publisher = _nintendo_licensee_codes[company_code] elif len(company_code) == 4 and company_code.startswith('00'): if company_code[2:] in _nintendo_licensee_codes: metadata.publisher = _nintendo_licensee_codes[company_code[2:]] if product_code and company_code: _add_cover(metadata, product_code[-4:], company_code[2:]) mastering_date_text = meta_xml.findtext('mastering_date') #Usually blank? Sometimes exists though if mastering_date_text: try: mastering_datetime = datetime.fromisoformat(mastering_date_text[:10]) mastering_date = Date(mastering_datetime.year, mastering_datetime.month, mastering_datetime.day) metadata.specific_info['Mastering Date'] = mastering_date guessed_date = Date(mastering_date.year, mastering_date.month, mastering_date.day, True) if guessed_date.is_better_than(metadata.release_date): metadata.release_date = guessed_date except ValueError: #print(mastering_date_text) pass #Maybe we can use these to figure out if it creates a save file or not… metadata.specific_info['Common Save Size'] = int(meta_xml.findtext('common_save_size') or '0', 16) metadata.specific_info['Account Save Size'] = int(meta_xml.findtext('account_save_size') or '0', 16) metadata.specific_info['Title ID'] = meta_xml.findtext('title_id') version = meta_xml.findtext('title_version') if version: metadata.specific_info['Version'] = 'v' + version region = meta_xml.findtext('region') region_codes = set() if region: try: region_flags = int(region, 16) for region_code in WiiU3DSRegionCode: if region_code in (WiiU3DSRegionCode.RegionFree, WiiU3DSRegionCode.WiiURegionFree): continue if region_code.value & region_flags: region_codes.add(region_code) metadata.specific_info['Region Code'] = region_codes except ValueError: metadata.specific_info['Region Code'] = '0x' + region #Tempted to reuse wii.parse_ratings, but I might not because it's just a bit different rating_tags = {tag: int(tag.text) for tag in meta_xml.iter() if tag.tag.startswith('pc_') and tag.text} ratings = {tag.tag: rating & 0b0001_1111 for tag, rating in rating_tags.items() if (rating & 0b1000_0000) == 0 and (rating & 0b0100_0000) == 0} if ratings: try: rating = statistics.mode(ratings.values()) except statistics.StatisticsError: rating = max(ratings.values()) metadata.specific_info['Age Rating'] = rating if 'pc_cero' in ratings: metadata.specific_info['CERO Rating'] = ratings['pc_cero'] if 'pc_esrb' in ratings: metadata.specific_info['ESRB Rating'] = ratings['pc_esrb'] if 'pc_usk' in ratings: metadata.specific_info['USK Rating'] = ratings['pc_usk'] if 'pc_pegi_gen' in ratings: metadata.specific_info['PEGI Rating'] = ratings['pc_pegi_gen'] #There are more but that will do # #These may not be accurate at all? # metadata.specific_info['Uses-Nunchuk'] = meta_xml.findtext('ext_dev_nunchaku') != '0' # metadata.specific_info['Uses-Classic-Controller'] = meta_xml.findtext('ext_dev_classic') != '0' # metadata.specific_info['Uses-Balance-Board'] = meta_xml.findtext('ext_dev_board') != '0' #maybe? # metadata.specific_info['Uses-USB-Keyboard'] = meta_xml.findtext('ext_dev_usb_keyboard') != '0' # uses_etc = meta_xml.findtext('ext_dev_etc') != '0' #??? # if uses_etc: # metadata.specific_info['Uses-Etc'] = meta_xml.findtext('ext_dev_etc_name') #drc = meta_xml.findtext('drc_use') != '0' #network = meta_xml.findtext('network_use') != '0' #online_account = meta_xml.findtext('online_account_use') != '0' short_names = {} long_names = {} publishers = {} for lang_code, lang_name in _languages.items(): short_name = meta_xml.findtext('shortname_' + lang_code) if short_name: short_names[lang_name] = short_name long_name = meta_xml.findtext('longname_' + lang_code) if long_name: long_names[lang_name] = long_name.replace('\n', ': ') #Newlines seem to be used here to separate subtitles publisher = meta_xml.findtext('publisher_' + lang_code) if publisher: publishers[lang_name] = publisher add_info_from_local_titles(metadata, short_names, long_names, publishers, region_codes) def _add_homebrew_meta_xml_metadata(rom: ROM, metadata: 'Metadata', meta_xml: ElementTree.ElementTree): name = meta_xml.findtext('name') if name: rom.ignore_name = True metadata.add_alternate_name(name, 'Banner Title') metadata.developer = metadata.publisher = meta_xml.findtext('coder') metadata.specific_info['Version'] = meta_xml.findtext('version') url = meta_xml.findtext('url') if url: metadata.documents['Homepage'] = url release_date_text = meta_xml.findtext('release_date') if release_date_text: metadata.release_date = Date(release_date_text[0:4], release_date_text[4:6], release_date_text[6:8]) short_description = meta_xml.findtext('short_description') if short_description: metadata.descriptions['Short Description'] = short_description long_description = meta_xml.findtext('long_description') if long_description: metadata.descriptions['Long Description'] = long_description metadata.specific_info['Homebrew Category'] = meta_xml.findtext('category') or 'None' #Makes me wonder if it's feasible to include an option to get categories not from folders… def _add_rpx_metadata(rom: ROM, metadata: 'Metadata'): #The .rpx itself is not interesting and basically just a spicy ELF #This is going to assume we are looking at a homebrew folder try: #info.json has the same info? But it's not always there _add_homebrew_meta_xml_metadata(rom, metadata, ElementTree.parse(rom.path.with_name('meta.xml'))) if metadata.categories[-1] == rom.path.parent.name: metadata.categories = metadata.categories[:-1] except FileNotFoundError: pass homebrew_banner_path = rom.path.with_name('icon.png') if homebrew_banner_path.is_file(): metadata.images['Banner'] = homebrew_banner_path def add_folder_metadata(rom: FolderROM, metadata: 'Metadata'): content_dir = rom.get_subfolder('content') meta_dir = rom.get_subfolder('meta') assert content_dir and meta_dir, 'It should be impossible for content_dir or meta_dir to be none, otherwise this would not have even been detected as a folder' metadata.specific_info['Executable Name'] = rom.relevant_files['rpx'].name #TODO: Move this over to engine_detect if rom.path.joinpath('code', 'UnityEngine_dll.rpl').is_file(): #Unity games on Wii U just have a "Data" folder under content with no executable (because it's over here in code), so our usual detection won't work; not sure about other cross platform engines metadata.specific_info['Engine'] = 'Unity' if content_dir.joinpath('assets').is_dir() and all(content_dir.joinpath('app', file).is_dir() for file in ('appinfo.xml', 'config.xml', 'index.html')): metadata.specific_info['Engine'] = 'Nintendo Web Framework' engine = try_and_detect_engine_from_folder(content_dir, metadata) if engine: metadata.specific_info['Engine'] = engine #Seemingly this can actually sometimes be all lowercase? I should make this check case insensitive but I don't really care too much icon_path = meta_dir.joinpath('iconTex.tga') if icon_path.is_file(): metadata.images['Icon'] = icon_path boot_drc_path = meta_dir.joinpath('bootDrcTex.tga') #Image displayed on the gamepad while loading if boot_drc_path.is_file(): metadata.images['Gamepad Boot Image'] = boot_drc_path boot_tv_path = meta_dir.joinpath('bootTvTex.tga') #Generally just bootDrcTex but higher resolution (and for the TV) if boot_tv_path.is_file(): metadata.images['TV Boot Image'] = boot_tv_path boot_logo_path = meta_dir.joinpath('bootLogoTex.tga') if boot_logo_path.is_file(): metadata.images['Boot Logo'] = boot_logo_path #There is also a Manual.bfma in here, bootMovie.h264 and bootSound.btsnd, and some ratings images like "CERO_ja.jpg" and "PEGI_en.jpg" except they're 1 byte so I dunno meta_xml_path = meta_dir.joinpath('meta.xml') try: meta_xml = ElementTree.parse(meta_xml_path) _add_meta_xml_metadata(metadata, meta_xml) except FileNotFoundError: pass if metadata.specific_info.get('Virtual Console Platform') == WiiUVirtualConsolePlatform.GBAOrPCEngine: metadata.specific_info['Virtual Console Platform'] = WiiUVirtualConsolePlatform.GBA if rom.name == 'm2engage' else WiiUVirtualConsolePlatform.PCEngine def add_wii_u_custom_info(game: 'ROMGame'): if game.rom.is_folder: add_folder_metadata(cast(FolderROM, game.rom), game.metadata) if game.rom.extension == 'rpx': _add_rpx_metadata(game.rom, game.metadata)
def run(self): """ @Description: 运行代码, 将原图片变换后保存到output文件夹 --------- """ if not os.path.exists(self.output_img_dir): os.mkdir(self.output_img_dir) if not os.path.exists(self.output_anno_dir): os.mkdir(self.output_anno_dir) for anno_file in os.listdir(self.anno_dir): try: anno_name, anno_ext = anno_file.split(".") if anno_ext != "xml": continue log.info(anno_file) label = "label_%s" % anno_name.split("_")[0] anno_tree = ElementTree() anno_tree.parse(os.path.join(self.anno_dir, anno_file)) img_folder = anno_tree.findtext("folder") img_fullname = anno_tree.findtext("filename") img_name, img_ext = img_fullname.split(".") # img_path = os.path.join(self.img_dir, img_folder, img_fullname) img_path = os.path.join(self.img_dir, img_fullname) src_img = cv.imdecode(np.fromfile(img_path, dtype=np.uint8), -1) src_box = list() for obj in anno_tree.iter(tag="object"): box_node = obj.find("bndbox") src_box.append([ int(box_node.findtext("xmin")), int(box_node.findtext("ymin")), int(box_node.findtext("xmax")), int(box_node.findtext("ymax")), obj.findtext("name") ]) self.save_file(src_img, src_box, anno_tree, "%s_0.%s" % (img_name, img_ext), "%s_0.%s" % (anno_name, anno_ext)) dst_img, dst_box = ImageUtils.rotation(src_img, 90, src_box) self.save_file(dst_img, dst_box, anno_tree, "%s_1.%s" % (img_name, img_ext), "%s_1.%s" % (anno_name, anno_ext)) dst_img, dst_box = ImageUtils.rotation(src_img, 180, src_box) self.save_file(dst_img, dst_box, anno_tree, "%s_2.%s" % (img_name, img_ext), "%s_2.%s" % (anno_name, anno_ext)) dst_img, dst_box = ImageUtils.rotation(src_img, 270, src_box) self.save_file(dst_img, dst_box, anno_tree, "%s_3.%s" % (img_name, img_ext), "%s_3.%s" % (anno_name, anno_ext)) flip_img, flip_box = ImageUtils.flip(src_img, 1, src_box) self.save_file(flip_img, flip_box, anno_tree, "%s_4.%s" % (img_name, img_ext), "%s_4.%s" % (anno_name, anno_ext)) dst_img, dst_box = ImageUtils.rotation(flip_img, 90, flip_box) self.save_file(dst_img, dst_box, anno_tree, "%s_5.%s" % (img_name, img_ext), "%s_5.%s" % (anno_name, anno_ext)) dst_img, dst_box = ImageUtils.rotation(flip_img, 180, flip_box) self.save_file(dst_img, dst_box, anno_tree, "%s_6.%s" % (img_name, img_ext), "%s_6.%s" % (anno_name, anno_ext)) dst_img, dst_box = ImageUtils.rotation(flip_img, 270, flip_box) self.save_file(dst_img, dst_box, anno_tree, "%s_7.%s" % (img_name, img_ext), "%s_7.%s" % (anno_name, anno_ext)) except Exception as e: log.error(e)
def __init__(self, projectPath=None): ## Executor Sync ################### self.finishedSemaphore = threading.Semaphore() ## Default Project IDs ################ self.artifactId = os.path.basename(projectPath) self.groupId = os.path.basename(projectPath) self.version = "unknown" self.projectPath = projectPath self.dependencies = [] ## Maven project ? ##################### if os.path.isfile(projectPath+"/pom.xml"): self.buildSystem = "maven" ## Read in XML doc = ElementTree(file=projectPath+"/pom.xml") #print("IN MAVEN PROJECT (folder "+self.artifactId+")") #### Update Project infos ################### ## Parent Infos parentElement = doc.find("{http://maven.apache.org/POM/4.0.0}parent") if parentElement: self.parent = { "artifactId":parentElement.findtext('{http://maven.apache.org/POM/4.0.0}artifactId'), "groupId": parentElement.findtext('{http://maven.apache.org/POM/4.0.0}groupId'), "version": parentElement.findtext('{http://maven.apache.org/POM/4.0.0}version') } ## Local Infos self.artifactId = doc.findtext('{http://maven.apache.org/POM/4.0.0}artifactId') self.groupId = doc.findtext('{http://maven.apache.org/POM/4.0.0}groupId') self.version = doc.findtext('{http://maven.apache.org/POM/4.0.0}version') ## Try to Update local infos from parent if necessary if hasattr(self,'parent'): if self.groupId == None: self.groupId = self.parent['groupId'] if self.version == None: self.version = self.parent['version'] #### Get Dependencies with XPATh #################### dependencyElements = doc.findall("{{{0}}}dependencies/{{{0}}}dependency".format("http://maven.apache.org/POM/4.0.0")) for dependency in dependencyElements: #print("Found Dependency: "+str(dependency)) self.dependencies += [{ "artifactId":dependency.findtext('{http://maven.apache.org/POM/4.0.0}artifactId'), "groupId": dependency.findtext('{http://maven.apache.org/POM/4.0.0}groupId'), "version": dependency.findtext('{http://maven.apache.org/POM/4.0.0}version') }] else: self.buildSystem = "sbt"
def retrieveShowMetadata(self, folder): """ :param folder: :return: """ empty_return = (None, None, None) metadata_path = os.path.join(folder, self._show_metadata_filename) if not os.path.isdir(folder) or not os.path.isfile(metadata_path): sickrage.app.log.debug("Can't load the metadata file from " + metadata_path + ", it doesn't exist") return empty_return try: sickrage.app.log.debug("Loading show info from sickrage.metadata file in {}".format(folder)) except: pass try: with io.open(metadata_path, 'rb') as xmlFileObj: showXML = ElementTree(file=xmlFileObj) if showXML.findtext('title') is None or ( showXML.findtext('tvdbid') is None and showXML.findtext('id') is None): sickrage.app.log.info( "Invalid info in tvshow.nfo (missing name or id): {} {} {}".format(showXML.findtext('title'), showXML.findtext('tvdbid'), showXML.findtext('id'))) return empty_return name = showXML.findtext('title') indexer_id_text = showXML.findtext('tvdbid') or showXML.findtext('id') if indexer_id_text: indexer_id = try_int(indexer_id_text, None) if indexer_id is None or indexer_id < 1: sickrage.app.log.debug( "Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file") return empty_return else: sickrage.app.log.debug( "Empty <id> or <tvdbid> field in NFO, unable to find a ID, not using metadata file") return empty_return if showXML.findtext('tvdbid') is not None: indexer_id = int(showXML.findtext('tvdbid')) elif showXML.findtext('id') is not None: indexer_id = int(showXML.findtext('id')) else: sickrage.app.log.warning("Empty <id> or <tvdbid> field in NFO, unable to find a ID") return empty_return indexer = 1 epg_url_text = showXML.findtext('episodeguide/url') if epg_url_text: epg_url = epg_url_text.lower() if str(indexer_id) in epg_url and 'tvrage' in epg_url: sickrage.app.log.warning("Invalid Indexer ID (" + str( indexer_id) + "), not using metadata file because it has TVRage info") return empty_return except Exception as e: sickrage.app.log.warning( "There was an error parsing your existing metadata file: '" + metadata_path + "' error: {}".format(e)) return empty_return return indexer_id, name, indexer
#!/usr/bin/env python import os from setuptools import setup import sys from xml.etree.ElementTree import ElementTree try: root = ElementTree(None, 'stack.xml') version = root.findtext('version') except Exception, e: print >> sys.stderr, 'Could not extract version from your stack.xml:\n%s' % e sys.exit(-1) sys.path.insert(0, 'src') PKG = 'ros_comm' gen = ['msg', 'srv'] packages = [] package_dir = {} if 'CATKIN_BINARY_DIR' in os.environ: build_d = os.environ['CATKIN_BINARY_DIR'] for t in gen: p = os.path.join(build_d, 'gen', 'py', PKG, t) if os.path.isdir(p): # e.g. std_msgs.msg = build/gen/py/std_msgs/msg package_dir["%s.%s"%(PKG, t)] = p packages.append("%s.%s"%(PKG, t)) setup(name = PKG, version = version,
class LCTWindow(Gtk.Window): def __init__(self): self.parserrunning = False self.LogFolder = "" self.settings = ElementTree() self.LoadSettings() Gtk.Window.__init__(self, title="Lazy Combat Tracker") self.set_keep_above(True) self.connect("delete-event", Gtk.main_quit) self.VBox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6) self.add(self.VBox) self.HBox = Gtk.Box(spacing=6) if self.LogFolder == "": self.LogLable = Gtk.Label("No log file selected!") else: self.LogLable = Gtk.Label(self.GetServer()) self.HBox.pack_start(self.LogLable, True, True, 0) self.button = Gtk.Button(label="Change") self.button.connect("clicked", self.on_file_select) self.HBox.pack_start(self.button, True, True, 0) self.runButton = Gtk.Button(label="Start Parser!") self.VBox.pack_start(self.HBox, True, True, 0) self.runButton.connect("clicked", self.toggle_parser) self.VBox.pack_start(self.runButton, True, True, 0) self.Statusbar = Gtk.ProgressBar() self.Statusbar.set_text("Please select a log file directory.") self.Statusbar.set_show_text(True) self.Statusbar.set_pulse_step(0.01) self.VBox.pack_start(self.Statusbar, True, True, 0) #StatusIcon self.StatusIcon = Gtk.StatusIcon() try: self.StatusIcon.set_from_file("LCT.png") self.StatusIcon.set_visible(True) except: pass def GetServer(self): server = self.LogFolder.split('/') server = server[len(server) - 1] return server def toggle_parser(self, widget): if (self.parserrunning): self.Statusbar.set_text("Stopping parser...") Gtk.main_iteration() self.parser_thread.stop() self.parserrunning = False self.Statusbar.set_text("Done!") self.Statusbar.set_fraction(0.0) try: self.StatusIcon.set_from_file("LCT.png") except: pass self.runButton.set_label("Start Parser!") else: self.Statusbar.set_text("Scanning log file, please wait...") Gtk.main_iteration() self.parser_thread = Parser(self.LogFolder, self.CopyToClipBoard, self.Statusbar, self.StatusIcon) self.parser_thread.name = 0 self.parser_thread.start() self.parserrunning = True self.Statusbar.set_text("Parser is running...") self.Statusbar.set_fraction(1.0) self.runButton.set_label("Stop Parser!") def on_file_select(self, widget): dialog = Gtk.FileChooserDialog( "Please choose a folder", self, Gtk.FileChooserAction.SELECT_FOLDER, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, "Select", Gtk.ResponseType.OK)) dialog.set_default_size(800, 400) response = dialog.run() if response == Gtk.ResponseType.OK: logging.info("Open clicked") logging.info("File selected: " + dialog.get_filename()) self.LogFolder = dialog.get_filename() self.SaveSettings() self.LogLable.set_text(self.GetServer()) self.Statusbar.set_text("Log directory set!") elif response == Gtk.ResponseType.CANCEL: logging.info("Cancel clicked") dialog.destroy() def CopyToClipBoard(self, text): self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) GObject.idle_add(self.clipboard.set_text, text, -1) def LoadSettings(self): try: os.chdir(os.path.expanduser("~/.LCT")) except: os.makedirs(os.path.expanduser("~/.LCT")) os.chdir(os.path.expanduser("~/.LCT")) try: self.settings.parse(os.path.expanduser("~/.LCT/Settings.xml")) except: file = open(os.path.expanduser("~/.LCT/Settings.xml"), 'w+') file.write('<LCT><LOGFOLDER></LOGFOLDER></LCT>') file.close() self.settings.parse(os.path.expanduser("~/.LCT/Settings.xml")) if not exists("LCT.png"): print("Downloading: LCT.png") DownLoad("http://www.lejoni.com/lct/LCT.png", "LCT.png") if not exists("LCT-running.png"): print("Downloading: LCT-running.png") DownLoad("http://www.lejoni.com/lct/LCT-running.png", "LCT-running.png") if not exists("LCT-fighting.png"): print("Downloading: LCT-fighting.png") DownLoad("http://www.lejoni.com/lct/LCT-fighting.png", "LCT-fighting.png") self.LogFolder = self.settings.findtext("LOGFOLDER") def SaveSettings(self): self.settings.find("LOGFOLDER").text = self.LogFolder self.settings.write(os.path.expanduser("~/.LCT/Settings.xml"))
def xml_to_abc(filename): global default_len if os.path.splitext(filename)[1].lower() == '.mxl': root = ElementTree().parse(mxl_to_xml(filename)) else: root = ElementTree().parse(filename) debug = False results_for_different_L_fields = [] for L in [Fraction(1, 8), Fraction(1, 16)]: default_len = L tune_fields = [] parts = [] if root.findtext('work/work-title', ''): tune_fields.append(Field('T', root.findtext('work/work-title', '').strip())) if root.findtext('movement-title', ''): tune_fields.append(Field('T', root.findtext('movement-title', '').strip())) for credit in root.findall('credit'): credits = ''.join(e.text or '' for e in credit.findall('credit-words')) # ran into issues with Unicode strings, so except TypeError to avoid processing these strings try: credits = credits.translate(None, '\r\n') if credits.strip(): tune_fields.append(Field('T', credits)) except TypeError: pass for creator in root.findall('identification/creator'): if creator.get('type') == 'composer': for line in creator.text.split('\n'): tune_fields.append(Field('S', line.strip())) elif creator.get('type') == 'lyricist': text = creator.text for line in text.split('\n'): tune_fields.append(Field('Z', line.strip())) num_parts = len(list(root.findall('part'))) for part_no, part in enumerate(root.findall('part')): accidentals = defaultdict(lambda: 0) measure_accidentals = accidentals cur_divisions = 768 bar_number = 0 bar_offset = 0 lyric_numbers = sorted(set(lyric.get('number', '1') for lyric in part.findall('*/note/lyric'))) lyrics = dict((lyric_number, []) for lyric_number in lyric_numbers) voice_names = sorted([voice.text for voice in part.findall('measure/note/voice')]) if voice_names: first_voice = voice_names[0] else: first_voice = None voices = defaultdict(list) num_staves = int(root.findtext('part/measure/attributes/staves', '1')) for measure in part.findall('measure'): bar_number += 1 bar_offset = 0 last_measure_accidentals = measure_accidentals measure_accidentals = accidentals.copy() notes_with_accidentals_in_this_measure = set() for measure_element in measure.getchildren(): # fields if measure_element.tag == 'attributes': attributes = measure_element for element in attributes.getiterator(): if element.tag == 'key': # determine modal scale mode, base_note_distance = key_mode_map.get(element.findtext('mode'), '') # insert new key in all voices fifths = int(element.findtext('fifths')) key_name = get_key_name(fifths, base_note_distance) + mode field = Field('K', key_name) if voices: for voice in voices.values(): voice.append(field) elif part_no == 0: tune_fields.append(field) # update accidentals accidentals = get_accidentals(fifths) measure_accidentals = accidentals.copy() elif element.tag == 'time': metre = '%s/%s' % (element.findtext('beats'), element.findtext('beat-type')) field = Field('M', metre) if voices: for voice in voices.values(): voice.append(field) elif part_no == 0: tune_fields.append(field) elif element.tag == 'divisions': cur_divisions = int(element.text) # notes elif measure_element.tag == 'note': note = measure_element voice = note.findtext('voice') # name if note.get('print-object') == 'no': note_name = 'x' elif note.find('rest') is not None: note_name = 'z' else: if note.findtext('pitch/step') != None and note.findtext('pitch/octave') != None: note_name = note.findtext('pitch/step') + note.findtext('pitch/octave') else: note_name = "?" # duration and whether it's a grace note is_grace_note = note.find('grace') is not None grace_slash = False if is_grace_note: duration = name2duration[note.findtext('type')] grace_slash = note.find('grace').get('slash') == 'yes' else: duration = Fraction(int(note.findtext('duration')), cur_divisions) / 4 duration.reduce() # find any time-modifications (due to tuplets) and rescale displayed duration accordingly actual_notes = note.findtext('time-modification/actual-notes') normal_notes = note.findtext('time-modification/normal-notes') if actual_notes and normal_notes: # if time modification time_modification = Fraction(int(actual_notes), int(normal_notes)) else: time_modification = Fraction(1, 1) displayed_duration = duration * time_modification # create Note object n = Note(note_name, duration, displayed_duration, bar_number, bar_offset, is_grace_note, grace_slash) # tuplet tuplet = note.find('notations/tuplet') if tuplet is not None: if tuplet.get('type') == 'start': actual_notes, normal_notes = int(actual_notes), int(normal_notes) if actual_notes == 3 and normal_notes == 2: n.tuplet_begin = '(3' elif actual_notes == 5: n.tuplet_begin = '(5' else: raise Exception('unrecognized tuplet: %d/%d' % (actual_notes, normal_notes)) #n.tuplet_begin = '(%d' % int(Fraction(actual_notes, normal_notes) * 2) # TODO: make more generally applicable elif tuplet.get('type') == 'stop': n.tuplet_end = True # accidental if not note_name in 'zx': # unless rest or invisible rest alter = int(note.findtext('pitch/alter', '0')) try: if alter != measure_accidentals[note_name] or note.find('accidental') is not None: n.accidental = {-2: '__', -1: '_', 0: '=', 1: '^', 2: '^^'}[alter] measure_accidentals[note_name] = alter notes_with_accidentals_in_this_measure.add(note_name) except KeyError: print "Error processing accidental. Skipping..." # tie tie = note.find('tie') if tie is not None and tie.get('type') == 'start': n.tie = '-' # slurs for slur in note.findall('notations/slur'): if slur.get('type') == 'start': n.slur_begin = n.slur_begin + '(' if slur.get('type') == 'stop': n.slur_end = n.slur_end + ')' # ornaments for key, value in note_ornamentation_map.items(): if note.find(key) is not None: n.ornaments = n.ornaments + value # fingering fingering = note.find('notations/technical/fingering') if fingering is not None: n.ornaments = '!%s!' % fingering.text + n.ornaments # spacing due to beam ends or long notes if not is_grace_note: beams = [beam for beam in note.findall('beam') if beam.text in ['begin', 'continue', 'end']] all_beams_end_here = beams and not [b for b in beams if b.text != 'end'] if all_beams_end_here or duration >= Fraction(1, 4) or (duration < Fraction(1, 4) and len(beams)==0): n.trailing_space = ' ' ##if duration < Fraction(1, 4) and len(beams)==0: ## n.trailing_space = ' !%s!' % len(beams) # chord if note.find('chord') is not None: n.trailing_space = '' # beam detection only works for first chord note, so erase any incorrectly generated space last = voices[voice].pop() if not isinstance(last, Chord): last = Chord([last]) last.add(n) n = last # lyrics elif voice == first_voice and not is_grace_note and note_name not in 'zx': for lyric_number in lyric_numbers: lyrics_text = '*' # skip this note unless we find a lyric element that matches the current lyrics number for lyric in note.findall('lyric'): if lyric_number == lyric.get('number', '1'): # match found, so get the lyrics text (replace spaces and elision elements by '~') lyrics_text = '' for lyr_element in lyric: if lyr_element.tag == 'elision': lyrics_text = lyrics_text + '~' elif lyr_element.tag == 'text': lyrics_text = lyrics_text + (lyr_element.text or '').replace('-', r'\-').replace(' ', '~') # escape '-' characters # add - and _ characters if lyric.findtext('syllabic') in ['begin', 'middle']: lyrics_text = lyrics_text + '-' if lyric.find('extend') is not None: lyrics_text = lyrics_text.replace('*', '') + '_' # if the current element is silence and the last element was '_', then discard the silence since the '_' covers this if lyrics_text == '*' and lyrics[lyric_number] and lyrics[lyric_number][-1].endswith('_'): lyrics[lyric_number].append('') # adding '' ensures that the if condition is not true next time around else: lyrics[lyric_number].append(lyrics_text) # add note/chord to its voice voices[voice].append(n) if not is_grace_note: bar_offset += n.duration # backup elif measure_element.tag == 'backup': duration = Fraction(int(measure_element.findtext('duration')), cur_divisions) / 4 duration.reduce() bar_offset -= duration elif measure_element.tag == 'barline': for voice_name, voice in voices.items(): barline = measure_element location = barline.get('location') bar_style = barline.findtext('bar-style') if bar_style == 'light-light': s = '|-|' elif bar_style == 'light-heavy': s = '|]' else: s = '|' repeat = barline.find('repeat') if repeat is not None: if repeat.get('direction') == 'forward': s = '|:' else: s = ':|' # handle segno, coda, fermata ornament = None if barline.find('segno'): ornament = 'S' elif barline.find('coda'): ornament = 'O' elif barline.find('fermata'): ornament = 'H' if ornament: for voice in voices.values(): voice.append(ornament) ending = barline.find('ending') if ending is not None: if ending.get('type') == 'start': if part_no == 0 and voice_name == first_voice: # only read endings for first part since this is the way ABC handles it text = ending.text or ending.get('number') if text is None: text = '' elif text.strip() in '1 2 3 4 5 6 1. 2. 3. 4. 5. 6.': text = text.replace('.', '') # delete any trailing dot after the ending number else: text = '"%s"' % text s = s + '[' + text else: s = s + '|-|' voice.append(s) debug_print(s) elif measure_element.tag == 'direction': direction = measure_element s = None if direction.find('direction-type/coda') is not None: s = 'O' elif direction.find('direction-type/segno') is not None: s = 'S' elif direction.find('direction-type/words') is not None: words = direction.find('direction-type/words') offset = words.get('default-y') text = direction.findtext('direction-type/words', '').strip() ##if text.lower() == 'fine': ## s = '!fine!' ##elif text.upper() == 'D.C.': ## s = '!D.C.!' ##elif text.upper() == 'D.S.': ## s = '!D.S.!' if text == '$': # Sibelius sometimes seems to use this for segno s = 'S' elif offset and int(offset) < 0: s = '"_%s"' % text else: s = '"^%s"' % text s = s.replace('\n', ' ') if s: voices[first_voice].append(s) #for voice in voices.values(): # voice.append(s) elif measure_element.tag == 'harmony': voices['1'].append(xml_harmony_to_abc(measure_element)) elif measure_element.tag == 'print': print_element = measure_element if print_element is not None and print_element.get('new-system') == 'yes': for voice in voices.values(): voice[-1] = voice[-1] + '\n' for lyrics_number in lyrics: lyrics[lyrics_number].append('\n') for voice in voices.values(): voice.append('|') debug_print('|') for voice in voices: fix_chords(voices[voice]) fix_tuplets(voices[voice]) voices[voice] = introduce_grace_starts_and_ends(voices[voice]) voices[voice] = fix_barlines(voices[voice]) fix_slurs_before_repeat_ends(voices[voice]) introduce_broken_rythms(voices[voice]) pass for voice_name, voice in sorted(voices.items()): if not voice_name and not voice: continue s = ''.join(map(unicode, voice)) s = s.replace('|-|', '||').replace(':|||:', '::').replace('||:', '|:').replace(':||', ':|').replace('||[', '|[').replace('||[', '|[').replace('|-|', '||').replace('|||', '||').replace('|||', '||').replace(']|', ']').replace('|]|', '|]').strip() if s.endswith('||'): s = s[0:-2] + '|]' if num_parts > 1: if voice_name: voice_name = part.get('id') + '_' + str(voice_name) else: voice_name = part.get('id') # if this is the first voice, then pair up each line of note output with the lyrics lines (if there are any) if voice_name == first_voice: result = [] notes_lines = s.split('\n') lines_for_each_lyrics = [' '.join(lyrics[lyrics_number]).split('\n') for (lyrics_number, lyrics_parts) in sorted(lyrics.items())] for line_no in range(len((notes_lines))): result.append(notes_lines[line_no]) for lines in lines_for_each_lyrics: if re.search(r'[^-*_ ]', lines[line_no]): # if line is not empty result.append('w: %s' % lines[line_no]) s = '\n'.join(result) parts.append(('V:%s' % str(voice_name), s)) file_numbers = [int(x) for x in re.findall(r'(\d+)', filename)] if file_numbers and 0 <= file_numbers[-1] <= 100000: tune_fields.insert(0, Field('X', str(file_numbers[-1]))) else: tune_fields.insert(0, Field('X', '1')) tune_fields.append(Field('L', str(default_len))) ##tune_fields.append(Field('R', '')) ##tune_fields.append(Field('O', '')) output = StringIO.StringIO(u'') for f in tune_fields: if f.field_name != 'K': output.write(unicode(f).replace('[', '').replace(']', '') + '\n') for f in tune_fields: if f.field_name == 'K': output.write(unicode(f).replace('[', '').replace(']', '') + '\n') if not [f for f in tune_fields if f.field_name == 'K']: output.write('K:C\n') for pname, p in parts: if len(parts) > 1 and pname: output.write(pname + '\n') lines = p.split('\n') for line in lines: output.write(line.strip() + '\n') results_for_different_L_fields.append(output.getvalue()) # use the L-field that gives the shortest output (but add some extra penalty for using many '/' characters) len_and_texts = [(len(s) + s.count('/')*0.15, s) for s in results_for_different_L_fields] len_and_texts.sort() return len_and_texts[0][1]
def xml_to_abc(filename): global default_len if os.path.splitext(filename)[1].lower() == '.mxl': root = ElementTree().parse(mxl_to_xml(filename)) else: root = ElementTree().parse(filename) debug = False results_for_different_L_fields = [] # Files converted from Noteworthy Composer (.nwc format) may have incorrect beams. # These files do not use begin/continue/end but rather just begin/continue for beams. encoding_software = root.find('identification/encoding/software') noteworthy_composer_mode = encoding_software is not None and encoding_software.text == 'Noteworthy Composer' if noteworthy_composer_mode: process_early_directions(root) for L in [Fraction(1, 8), Fraction(1, 16)]: default_len = L tune_fields = [] parts = [] if root.findtext('work/work-title', ''): tune_fields.append( Field('T', root.findtext('work/work-title', '').strip())) if root.findtext('movement-title', ''): tune_fields.append( Field('T', root.findtext('movement-title', '').strip())) for credit in root.findall('credit'): credits = ''.join(e.text or '' for e in credit.findall('credit-words')) credits = credits.translate(None, '\r\n') if credits.strip(): tune_fields.append(Field('T', credits)) for creator in root.findall('identification/creator'): if creator.text: if creator.get('type') == 'composer': for line in creator.text.split('\n'): tune_fields.append(Field('S', line.strip())) elif creator.get('type') in ('lyricist', 'transcriber'): text = creator.text for line in text.split('\n'): tune_fields.append(Field('Z', line.strip())) if noteworthy_composer_mode: # no line breaks in xml from NoteWorthy Composer so use continueall tune_fields.append(Field('I', 'continueall')) num_parts = len(list(root.findall('part'))) for part_no, part in enumerate(root.findall('part')): accidentals = defaultdict(lambda: 0) measure_accidentals = accidentals cur_divisions = 768 bar_number = 0 bar_offset = 0 lyric_numbers = sorted( set( lyric.get('number', '1') for lyric in part.findall('*/note/lyric'))) lyrics = dict((lyric_number, []) for lyric_number in lyric_numbers) voice_names = sorted( [voice.text for voice in part.findall('measure/note/voice')]) if voice_names: first_voice = voice_names[0] else: first_voice = None voices = defaultdict(list) num_staves = int( root.findtext('part/measure/attributes/staves', '1')) for measure in part.findall('measure'): bar_number += 1 bar_offset = 0 last_measure_accidentals = measure_accidentals measure_accidentals = accidentals.copy() notes_with_accidentals_in_this_measure = set() for measure_element in measure.getchildren(): # fields if measure_element.tag == 'attributes': attributes = measure_element for element in attributes.getiterator(): if element.tag == 'key': # determine modal scale mode, base_note_distance = key_mode_map.get( element.findtext('mode') or 'major', '') # insert new key in all voices fifths = int(element.findtext('fifths')) key_name = get_key_name( fifths, base_note_distance) + mode field = Field('K', key_name) if voices: for voice in voices.values(): voice.append(field) elif part_no == 0: tune_fields.append(field) # update accidentals accidentals = get_accidentals(fifths) measure_accidentals = accidentals.copy() elif element.tag == 'time': metre = '%s/%s' % (element.findtext( 'beats'), element.findtext('beat-type')) field = Field('M', metre) if voices: for voice in voices.values(): voice.append(field) elif part_no == 0: tune_fields.append(field) elif element.tag == 'divisions': cur_divisions = int(element.text) # tempo in BPM elif measure_element.tag == 'sound': if 'tempo' in measure_element.attrib: tune_fields.append( Field('Q', measure_element.attrib['tempo'])) # notes elif measure_element.tag == 'note': note = measure_element voice = note.findtext('voice') # name if note.get('print-object') == 'no': note_name = 'x' elif note.find('rest') is not None: note_name = 'z' else: note_name = note.findtext( 'pitch/step') + note.findtext('pitch/octave') # duration and whether it's a grace note is_grace_note = note.find('grace') is not None grace_slash = False if is_grace_note: duration = name2duration[note.findtext('type')] grace_slash = note.find('grace').get( 'slash') == 'yes' else: duration = Fraction(int(note.findtext('duration')), cur_divisions) / 4 duration.reduce() # find any time-modifications (due to tuplets) and rescale displayed duration accordingly actual_notes = note.findtext( 'time-modification/actual-notes') normal_notes = note.findtext( 'time-modification/normal-notes') if actual_notes and normal_notes: # if time modification time_modification = Fraction( int(actual_notes), int(normal_notes)) else: time_modification = Fraction(1, 1) displayed_duration = duration * time_modification # create Note object n = Note(note_name, duration, displayed_duration, bar_number, bar_offset, is_grace_note, grace_slash) # tuplet tuplet = note.find('notations/tuplet') if tuplet is not None: if tuplet.get('type') == 'start': actual_notes, normal_notes = int( actual_notes), int(normal_notes) if actual_notes == 3 and normal_notes == 2: n.tuplet_begin = '(3' elif actual_notes == 5: n.tuplet_begin = '(5' else: raise Exception( 'unrecognized tuplet: %d/%d' % (actual_notes, normal_notes)) #n.tuplet_begin = '(%d' % int(Fraction(actual_notes, normal_notes) * 2) # TODO: make more generally applicable elif tuplet.get('type') == 'stop': n.tuplet_end = True # accidental if not note_name in 'zx': # unless rest or invisible rest alter = int(note.findtext('pitch/alter', '0')) if alter != measure_accidentals[ note_name] or note.find( 'accidental') is not None: n.accidental = { -2: '__', -1: '_', 0: '=', 1: '^', 2: '^^' }[alter] measure_accidentals[note_name] = alter notes_with_accidentals_in_this_measure.add( note_name) # tie tie = note.find('tie') if tie is not None and tie.get('type') == 'start': n.tie = '-' # slurs for slur in note.findall('notations/slur'): if slur.get('type') == 'start': n.slur_begin = n.slur_begin + '(' if slur.get('type') == 'stop': n.slur_end = n.slur_end + ')' # ornaments for key, value in note_ornamentation_map.items(): if note.find(key) is not None: n.ornaments = n.ornaments + value # fingering fingering = note.find('notations/technical/fingering') if fingering is not None: n.ornaments = '!%s!' % fingering.text + n.ornaments # string string = note.find('notations/technical/string') if string is not None and string.text: # add as a text annotation if string.attrib.get('placement', 'above') == 'above': n.before_string = '"^%s"' % string.text.strip( ) + n.before_string else: n.before_string = '"_%s"' % string.text.strip( ) + n.before_string # spacing due to beam ends or long notes if not is_grace_note: beams = [ beam for beam in note.findall('beam') if beam.text in ['begin', 'continue', 'end'] ] all_beams_end_here = beams and not [ b for b in beams if b.text != 'end' ] if all_beams_end_here or duration >= Fraction( 1, 4) or (duration < Fraction(1, 4) and len(beams) == 0): n.trailing_space = ' ' ##if duration < Fraction(1, 4) and len(beams)==0: ## n.trailing_space = ' !%s!' % len(beams) # chord if note.find('chord') is not None: n.trailing_space = '' # beam detection only works for first chord note, so erase any incorrectly generated space last = voices[voice].pop() if not isinstance(last, Chord): last = Chord([last]) last.add(n) n = last # lyrics elif voice == first_voice and not is_grace_note and note_name not in 'zx': for lyric_number in lyric_numbers: lyrics_text = '*' # skip this note unless we find a lyric element that matches the current lyrics number for lyric in note.findall('lyric'): if lyric_number == lyric.get( 'number', '1'): # match found, so get the lyrics text (replace spaces and elision elements by '~') lyrics_text = '' for lyr_element in lyric: if lyr_element.tag == 'elision': lyrics_text = lyrics_text + '~' elif lyr_element.tag == 'text': lyrics_text = lyrics_text + ( lyr_element.text or '' ).replace('-', r'\-').replace( ' ', '~' ) # escape '-' characters # add - and _ characters if lyric.findtext('syllabic') in [ 'begin', 'middle' ]: lyrics_text = lyrics_text + '-' if lyric.find('extend') is not None: lyrics_text = lyrics_text.replace( '*', '') + '_' # if the current element is silence and the last element was '_', then discard the silence since the '_' covers this if lyrics_text == '*' and lyrics[ lyric_number] and lyrics[lyric_number][ -1].endswith('_'): lyrics[lyric_number].append( '' ) # adding '' ensures that the if condition is not true next time around else: lyrics[lyric_number].append(lyrics_text) # add note/chord to its voice voices[voice].append(n) if not is_grace_note: bar_offset += n.duration # backup elif measure_element.tag == 'backup': duration = Fraction( int(measure_element.findtext('duration')), cur_divisions) / 4 duration.reduce() bar_offset -= duration elif measure_element.tag == 'barline': for voice_name, voice in voices.items(): barline = measure_element location = barline.get('location') bar_style = barline.findtext('bar-style') if bar_style == 'light-light': s = '|-|' elif bar_style == 'light-heavy': s = '|]' else: s = '|' repeat = barline.find('repeat') if repeat is not None: if repeat.get('direction') == 'forward': s = '|:' else: s = ':|' # handle segno, coda, fermata ornament = None if barline.find('segno'): ornament = 'S' elif barline.find('coda'): ornament = 'O' elif barline.find('fermata'): ornament = 'H' if ornament: for voice in voices.values(): voice.append(ornament) ending = barline.find('ending') if ending is not None: if ending.get('type') == 'start': if part_no == 0 and voice_name == first_voice: # only read endings for first part since this is the way ABC handles it text = ending.text or ending.get( 'number') if text is None: text = '' elif text.strip( ) in '1 2 3 4 5 6 1. 2. 3. 4. 5. 6.': text = text.replace( '.', '' ) # delete any trailing dot after the ending number else: text = '"%s"' % text s = s + '[' + text else: s = s + '|-|' voice.append(s) debug_print(s) elif measure_element.tag == 'direction': direction = measure_element s = None if direction.find('direction-type/coda') is not None: s = 'O' elif direction.find( 'direction-type/segno') is not None: s = 'S' elif direction.find( 'direction-type/words') is not None: words = direction.find('direction-type/words') offset = words.get('default-y') text = direction.findtext('direction-type/words', '').strip() # if this is just a numbering of the voice and it comes before key signature och metre info, then ignore it if not voices and text.replace( '"', '') == str(part_no + 1): text = '' text = text.replace( '"', r'\u0022') # add escape code for " characters ##if text.lower() == 'fine': ## s = '!fine!' ##elif text.upper() == 'D.C.': ## s = '!D.C.!' ##elif text.upper() == 'D.S.': ## s = '!D.S.!' # up/down instruction encoded as a text direction if noteworthy_composer_mode and text == 'u': s = 'v' elif noteworthy_composer_mode and text == 'd': s = 'u' # chord encoded as text elif chord_pattern.match( text) and noteworthy_composer_mode: s = '"%s"' % text # Sibelius sometimes seems to use this for segno elif text == '$': s = 'S' elif offset and int(offset) < 0: s = '"_%s"' % text else: s = '"^%s"' % text s = s.replace('\n', ' ') if s: #if s.startswith('"'): voices[first_voice].append(s) #else: # voices[voice].append(n) elif measure_element.tag == 'harmony': voices['1'].append(xml_harmony_to_abc(measure_element)) elif measure_element.tag == 'print': print_element = measure_element if print_element is not None and print_element.get( 'new-system') == 'yes': for voice in voices.values(): voice[-1] = voice[-1] + '\n' for lyrics_number in lyrics: lyrics[lyrics_number].append('\n') for voice in voices.values(): voice.append('|') debug_print('|') for voice in voices: fix_chords(voices[voice]) fix_tuplets(voices[voice]) voices[voice] = introduce_grace_starts_and_ends(voices[voice]) voices[voice] = fix_barlines(voices[voice]) fix_slurs_before_repeat_ends(voices[voice]) introduce_broken_rythms(voices[voice]) if noteworthy_composer_mode: # beams in the XML aren't reliable in this case, so make it so that beams are always broken at quarters (might not be optimal for all metres) reset_whitespace( voices[voice], introduce_new_lines=not any(lyrics.values())) for voice_name, voice in sorted(voices.items()): if not voice_name and not voice: continue s = ''.join(map(unicode, voice)) s = s.replace('|-|', '||').replace(':|||:', '::').replace( '||:', '|:').replace(':||', ':|').replace('||[', '|[').replace( '||[', '|[').replace('|-|', '||').replace( '|||', '||').replace('|||', '||').replace( ']|', ']').replace('|]|', '|]').strip() if s.endswith('||'): s = s[0:-2] + '|]' if s.startswith('"^"'): s = s[3:] if num_parts > 1: if voice_name: voice_name = part.get('id') + '_' + str(voice_name) else: voice_name = part.get('id') # if this is the first voice, then pair up each line of note output with the lyrics lines (if there are any) if voice_name == first_voice: result = [] notes_lines = s.split('\n') lines_for_each_lyrics = [ ' '.join(lyrics[lyrics_number]).split('\n') for (lyrics_number, lyrics_parts) in sorted(lyrics.items()) ] for line_no in range(len((notes_lines))): result.append(notes_lines[line_no]) for lines in lines_for_each_lyrics: if re.search( r'[^-*_ ]', lines[line_no]): # if line is not empty result.append('w: %s' % lines[line_no]) s = '\n'.join(result) parts.append(('V:%s' % str(voice_name), s)) file_numbers = [int(x) for x in re.findall(r'(\d+)', filename)] if file_numbers and 0 <= file_numbers[-1] <= 100000: tune_fields.insert(0, Field('X', str(file_numbers[-1]))) else: tune_fields.insert(0, Field('X', '1')) tune_fields.append(Field('L', str(default_len))) ##tune_fields.append(Field('R', '')) ##tune_fields.append(Field('O', '')) output = StringIO.StringIO(u'') for f in tune_fields: if f.field_name != 'K': output.write( unicode(f).replace('[', '').replace(']', '') + '\n') for f in tune_fields: if f.field_name == 'K': output.write( unicode(f).replace('[', '').replace(']', '') + '\n') if not [f for f in tune_fields if f.field_name == 'K']: output.write('K:C\n') for pname, p in parts: if len(parts) > 1 and pname: output.write(pname + '\n') lines = p.split('\n') for line in lines: output.write(line.strip() + '\n') results_for_different_L_fields.append(output.getvalue()) # use the L-field that gives the shortest output (but add some extra penalty for using many '/' characters) len_and_texts = [(len(s) + s.count('/') * 0.15, s) for s in results_for_different_L_fields] len_and_texts.sort() return len_and_texts[0][1]
def GetEngData(self): self.WriteLog("Get engdata ----- Start ") readsource = self.ConnectionHTTP(testtype=const.TEST_TYPE_ENGDATA) domtree = ElementTree(XML(readsource[2])) domtree.getroot() mobileip = domtree.findtext("p-answer/engdata/settings/mobile_ip") access_overload_class = domtree.findtext("p-answer/engdata/settings/access_overload_class") preferred_operating_mode = domtree.findtext("p-answer/engdata/settings/preferred_operating_mode") slot_cycle_index = domtree.findtext("p-answer/engdata/field_test/slot_cycle_index") current_nam = domtree.findtext("p-answer/engdata/field_test/current_nam") auto_nam = domtree.findtext("p-answer/engdata/field_test/auto_nam") spc_change_enabled = domtree.findtext("p-answer/engdata/field_test/spc_change_enabled") directory_number = domtree.findtext("p-answer/engdata/field_test/directory_number") access_overload_class1 = domtree.findtext("p-answer/engdata/field_test/access_overload_class") mcc = domtree.findtext("p-answer/engdata/field_test/mcc") mnc = domtree.findtext("p-answer/engdata/field_test/mnc") channel_primary_a = domtree.findtext("p-answer/engdata/field_test/channel_primary_a") channel_primary_b = domtree.findtext("p-answer/engdata/field_test/channel_primary_b") channel_secondary_a = domtree.findtext("p-answer/engdata/field_test/channel_secondary_a") channel_secondary_b = domtree.findtext("p-answer/engdata/field_test/channel_secondary_b") home_sid_table = domtree.findtext("p-answer/engdata/field_test/home_sid_table") terminated_reg_home_sid = domtree.findtext("p-answer/engdata/field_test/terminated_reg_home_sid") terminated_reg_foreign_sid = domtree.findtext("p-answer/engdata/field_test/terminated_reg_foreign_sid") terminated_reg_foreign_nid = domtree.findtext("p-answer/engdata/field_test/terminated_reg_foreign_nid") system_preffered_mode = domtree.findtext("p-answer/engdata/field_test/system_preffered_mode") prl_version_number = domtree.findtext("p-answer/engdata/field_test/prl_version_number") dns_primary = domtree.findtext("p-answer/engdata/field_test/dns_primary") dns_secondary = domtree.findtext("p-answer/engdata/field_test/dns_secondary") packet_dial_string = domtree.findtext("p-answer/engdata/field_test/packet_dial_string") mdr_mode = domtree.findtext("p-answer/engdata/field_test/mdr_mode") data_scrm = domtree.findtext("p-answer/engdata/field_test/data_scrm") mip_ha_spi_value = domtree.findtext("p-answer/engdata/field_test/mip_ha_spi_value") mip_reverse_tunneling = domtree.findtext("p-answer/engdata/field_test/mip_reverse_tunneling") mip_home = domtree.findtext("p-answer/engdata/field_test/mip_home") mip_primary_ha_address = domtree.findtext("p-answer/engdata/field_test/mip_primary_ha_address") mip_secondary_ha_address = domtree.findtext("p-answer/engdata/field_test/mip_secondary_ha_address") mip_behavior = domtree.findtext("p-answer/engdata/field_test/mip_behavior") mip_pre_registration_timeout = domtree.findtext("p-answer/engdata/field_test/mip_pre_registration_timeout") mip_registration_retries = domtree.findtext("p-answer/engdata/field_test/mip_registration_retries") # dmu_key_exchange_indicator nid = domtree.findtext("p-answer/engdata/field_test/nid") fer = domtree.findtext("p-answer/engdata/field_test/fer") rssi = domtree.findtext("p-answer/engdata/field_test/rssi") # ec_io channel = domtree.findtext("p-answer/engdata/field_test/channel") latitude = domtree.findtext("p-answer/engdata/field_test/latitude") longitude = domtree.findtext("p-answer/engdata/field_test/longitude") # <tx_power /> # <rx_power /> band_class = domtree.findtext("p-answer/engdata/field_test/band_class") p_rev = domtree.findtext("p-answer/engdata/field_test/p_rev") packet_zone_id = domtree.findtext("p-answer/engdata/field_test/packet_zone_id") # last_call_error /> # <service_option_in_use /> # <call_state /> dormant_state = domtree.findtext("p-answer/engdata/field_test/dormant_state") # <mac_index /> subnet_mask = domtree.findtext("p-answer/engdata/field_test/subnet_mask") color_code = domtree.findtext("p-answer/engdata/field_test/color_code") uati024 = domtree.findtext("p-answer/engdata/field_test/uati024") # finger_info_pn_offsets /> # <finger_info_walsh_codes /> # <finger_info_rssi /> # <active_set_pn_offsets /> # <active_set_ec_io_dbm /> # <active_set_channel /> # <neighbor_set_pn_offsets /> # <neighbor_set_ec_io_dbm /> # <neighbor_set_channel /> # <candidate_set_pn_offsets /> # <candidate_set_ec_io_dbm /> # <candidate_set_channel /> self.WriteLog(const.STR_SHAP) self.WriteLog("mobile IP : " + mobileip) self.WriteLog("access_overload_class : " + access_overload_class) self.WriteLog("preferred_operating_mode : " + preferred_operating_mode) self.WriteLog("slot_cycle_index : " + slot_cycle_index) self.WriteLog("current_nam : " + current_nam) self.WriteLog("auto_nam : " + auto_nam) self.WriteLog("spc_change_enabled : " + spc_change_enabled) self.WriteLog("directory_number : " + directory_number) self.WriteLog("access_overload_class : " + access_overload_class1) self.WriteLog("mcc : " + str(mcc)) self.WriteLog("mnc : " + str(mnc)) self.WriteLog("channel_primary_a : " + str(channel_primary_a)) self.WriteLog("channel_primary_b : " + str(channel_primary_b)) self.WriteLog("channel_secondary_a : " + str(channel_secondary_a)) self.WriteLog("channel_secondary_b : " + channel_secondary_b) self.WriteLog("home_sid_table : " + str(home_sid_table)) self.WriteLog("terminated_reg_home_sid> : " + str(terminated_reg_home_sid)) self.WriteLog("terminated_reg_foreign_sid : " + str(terminated_reg_foreign_sid)) self.WriteLog("terminated_reg_foreign_nid : " + str(terminated_reg_foreign_nid)) self.WriteLog("system_preffered_mode : " + str(system_preffered_mode)) self.WriteLog("prl_version_number : " + str(prl_version_number)) self.WriteLog("dns_primary : " + str(dns_primary)) self.WriteLog("dns_secondary : " + str(dns_secondary)) self.WriteLog("packet_dial_string : " + str(packet_dial_string)) self.WriteLog("mdr_mode : " + str(mdr_mode)) self.WriteLog("data_scrm : " + str(data_scrm)) self.WriteLog("mip_ha_spi_value : " + str(mip_ha_spi_value)) self.WriteLog("mip_reverse_tunneling : " + str(mip_reverse_tunneling)) self.WriteLog("mip_home : " + str(mip_home)) self.WriteLog("mip_primary_ha_address : " + str(mip_primary_ha_address)) self.WriteLog("mip_secondary_ha_address : " + str(mip_secondary_ha_address)) self.WriteLog("mip_behavior : " + str(mip_behavior)) self.WriteLog("mip_pre_registration_timeout : " + str(mip_pre_registration_timeout)) self.WriteLog("mip_registration_retries : " + str(mip_registration_retries)) self.WriteLog("nid : " + str(nid)) self.WriteLog("fer : " + str(fer)) self.WriteLog("rssi : " + str(rssi)) self.WriteLog("channel : " + str(channel)) self.WriteLog("latitude : " + str(latitude)) self.WriteLog("longitude : " + str(longitude)) self.WriteLog("band_class : " + str(band_class)) self.WriteLog("p_rev : " + str(p_rev)) self.WriteLog("packet_zone_id : " + str(packet_zone_id)) self.WriteLog("dormant_state : " + str(dormant_state)) self.WriteLog("subnet_mask : " + str(subnet_mask)) self.WriteLog("color_code : " + str(color_code)) self.WriteLog("uati024 : " + str(uati024)) self.WriteLog(const.STR_SHAP) # consource.close() self.WriteLog("Get engdata ----- End ")
def GetSRVConnState(self, testtype=const.TEST_TYPE_CONDIS): self.WriteLog("GetSRVConnState ----- Start ") ### # try: # self.conHTTP = httplib.HTTPConnection(const.SERVER_URL) # self.conHTTP.request("GET", testtype) # self.consource = self.conHTTP.getresponse() # except httplib.socket.timeout: # self.WriteLog("GetSRVConnState server timeout error : Server Check") # return False # except httplib.CannotSendRequest: # self.WriteLog("GetSRVconnStae Request Error : cannot connect server") # return Fasle ### # consource = urllib.urlopen(const.SERVER_URL+testtype) # readsource = consource.read() readsource = self.ConnectionHTTP(testtype) domtree = ElementTree(XML(readsource[2])) domtree.getroot() srvstate = domtree.findtext("p-answer/condata/state/value") srvstatedes = domtree.findtext("p-answer/condata/state/description") netsrvingName = domtree.findtext("p-answer/condata/network/serving/name") netsrvingType = domtree.findtext("p-answer/condata/network/serving/type") netsrvingID = domtree.findtext("p-answer/condata/network/serving/id") netsrvingServer = domtree.findtext("p-answer/condata/network/serving/server") netsrvingServertype = domtree.findtext("p-answer/condata/network/serving/servertype") netsrvingEncruption = domtree.findtext("p-answer/condata/network/serving/encryption") netroamType = domtree.findtext("p-answer/condata/network/serving/roam/type") netroamIndicator = domtree.findtext("p-answer/condata/network/serving/roam/indicator") netroamIndicatordescription = domtree.findtext("p-answer/condata/network/serving/roam/indicatordescription") netroamRoaminglistversion = domtree.findtext("p-answer/condata/network/serving/roam/roaminglistversion") netHomeType = domtree.findtext("p-answer/condata/network/home/type") netHomeID = domtree.findtext("p-answer/condata/network/home/id") Actionsss = domtree.findtext("p-answer/condata/actions") connectionAddrIPV4IP = domtree.findtext("p-answer/condata/connection/address/ipv4/ip") connectionAddrIPV4SUB = domtree.findtext("p-answer/condata/connection/address/ipv4/subnet") connectionAddrIPV4GWY = domtree.findtext("p-answer/condata/connection/address/ipv4/gateway") connectionAddrIPV4DNS = domtree.findtext("p-answer/condata/connection/address/ipv4/dns") connectionAddrIPV6IP = domtree.findtext("p-answer/condata/connection/address/ipv6/ip") connectionAddrIPV6SUB = domtree.findtext("p-answer/condata/connection/address/ipv6/subnet") connectionAddrIPV6GWY = domtree.findtext("p-answer/condata/connection/address/ipv6/gateway") connectionAddrIPV6DNS = domtree.findtext("p-answer/condata/connection/address/ipv6/dns") self.WriteLog(const.STR_SHAP) self.WriteLog("Server State : " + srvstate) self.WriteLog("Description : " + srvstatedes) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Serving Nmae : " + netsrvingName) self.WriteLog("Network Serving Type : " + netsrvingType) self.WriteLog("Network Serving ID : " + netsrvingID) self.WriteLog("Network Serving Server : " + netsrvingServer) self.WriteLog("Netwrok Serving Server Type : " + netsrvingServertype) self.WriteLog("Network Serving Encryption : " + netsrvingEncruption) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Serving Roam Type : " + str(netroamType)) self.WriteLog("Network Serving Roam Indicator : " + str(netroamIndicator)) self.WriteLog("Network Serving Roam Indicator Description : " + str(netroamIndicatordescription)) self.WriteLog("Network Serving Roam Indicator Description : " + str(netroamRoaminglistversion)) self.WriteLog("Network Serving Home Type : " + str(netHomeType)) self.WriteLog("Network Serving Home ID : " + str(netHomeID)) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Actions : " + Actionsss) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Connection IP4 IP : " + str(connectionAddrIPV4IP)) self.WriteLog("Network Connection IP4 Subnet : " + str(connectionAddrIPV4SUB)) self.WriteLog("Network connection IP4 Gateway : " + str(connectionAddrIPV4GWY)) self.WriteLog("Network Connection IP4 DNS : " + str(connectionAddrIPV4DNS)) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Connection IP6 IP : " + str(connectionAddrIPV6IP)) self.WriteLog("Network Connection IP6 Subnet : " + str(connectionAddrIPV6SUB)) self.WriteLog("Network connection IP6 Gateway : " + str(connectionAddrIPV6GWY)) self.WriteLog("Network Connection IP6 DNS : " + str(connectionAddrIPV6DNS)) self.WriteLog(const.STR_SHAP) # consource.close() self.WriteLog("GetSRVConnState ----- END ") return srvstate
class LCTWindow(Gtk.Window): def __init__(self): self.parserrunning = False self.LogFolder = "" self.settings = ElementTree() self.LoadSettings() Gtk.Window.__init__(self, title="Lazy Combat Tracker") self.set_keep_above(True) self.connect("delete-event", Gtk.main_quit) self.VBox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6) self.add(self.VBox) self.HBox = Gtk.Box(spacing=6) if self.LogFolder == "": self.LogLable = Gtk.Label("No log file selected!") else: self.LogLable = Gtk.Label(self.GetServer()) self.HBox.pack_start(self.LogLable, True, True, 0) self.button = Gtk.Button(label="Change") self.button.connect("clicked", self.on_file_select) self.HBox.pack_start(self.button, True, True, 0) self.runButton = Gtk.Button(label="Start Parser!") self.VBox.pack_start(self.HBox, True, True, 0) self.runButton.connect("clicked", self.toggle_parser) self.VBox.pack_start(self.runButton, True, True, 0) self.Statusbar = Gtk.ProgressBar() self.Statusbar.set_text("Please select a log file directory.") self.Statusbar.set_show_text(True) self.Statusbar.set_pulse_step(0.01) self.VBox.pack_start(self.Statusbar, True, True, 0) #StatusIcon self.StatusIcon = Gtk.StatusIcon() try: self.StatusIcon.set_from_file("LCT.png") self.StatusIcon.set_visible(True) except: pass def GetServer(self): server = self.LogFolder.split('/') server = server[len(server)-1] return server def toggle_parser(self, widget): if (self.parserrunning): self.Statusbar.set_text("Stopping parser...") Gtk.main_iteration() self.parser_thread.stop() self.parserrunning = False self.Statusbar.set_text("Done!") self.Statusbar.set_fraction(0.0) try: self.StatusIcon.set_from_file("LCT.png") except: pass self.runButton.set_label("Start Parser!") else: self.Statusbar.set_text("Scanning log file, please wait...") Gtk.main_iteration() self.parser_thread = Parser(self.LogFolder, self.CopyToClipBoard, self.Statusbar, self.StatusIcon) self.parser_thread.name = 0 self.parser_thread.start() self.parserrunning = True self.Statusbar.set_text("Parser is running...") self.Statusbar.set_fraction(1.0) self.runButton.set_label("Stop Parser!") def on_file_select(self, widget): dialog = Gtk.FileChooserDialog("Please choose a folder", self, Gtk.FileChooserAction.SELECT_FOLDER, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, "Select", Gtk.ResponseType.OK)) dialog.set_default_size(800, 400) response = dialog.run() if response == Gtk.ResponseType.OK: logging.info("Open clicked") logging.info("File selected: " + dialog.get_filename()) self.LogFolder = dialog.get_filename() self.SaveSettings() self.LogLable.set_text(self.GetServer()) self.Statusbar.set_text("Log directory set!") elif response == Gtk.ResponseType.CANCEL: logging.info("Cancel clicked") dialog.destroy() def CopyToClipBoard(self, text): self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) GObject.idle_add(self.clipboard.set_text, text, -1) def LoadSettings(self): try: os.chdir(os.path.expanduser("~/.LCT")) except: os.makedirs(os.path.expanduser("~/.LCT")) os.chdir(os.path.expanduser("~/.LCT")) try: self.settings.parse(os.path.expanduser("~/.LCT/Settings.xml")) except: file = open(os.path.expanduser("~/.LCT/Settings.xml"), 'w+') file.write('<LCT><LOGFOLDER></LOGFOLDER></LCT>') file.close() self.settings.parse(os.path.expanduser("~/.LCT/Settings.xml")) if not exists("LCT.png"): print("Downloading: LCT.png") DownLoad("http://www.lejoni.com/lct/LCT.png", "LCT.png") if not exists("LCT-running.png"): print("Downloading: LCT-running.png") DownLoad("http://www.lejoni.com/lct/LCT-running.png", "LCT-running.png") if not exists("LCT-fighting.png"): print("Downloading: LCT-fighting.png") DownLoad("http://www.lejoni.com/lct/LCT-fighting.png", "LCT-fighting.png") self.LogFolder = self.settings.findtext("LOGFOLDER") def SaveSettings(self): self.settings.find("LOGFOLDER").text = self.LogFolder self.settings.write(os.path.expanduser("~/.LCT/Settings.xml"))
class MainWindow(wx.Frame,InetFTP.InetFTP): def _init_ctrls(self, prnt): # generated method, don't edit wx.Frame.__init__(self, id=wxID_MAINWINDOW, name=u'MainWindow', parent=prnt, pos=wx.Point(63, 191), size=wx.Size(1297, 581), style=wx.RAISED_BORDER | wx.FRAME_TOOL_WINDOW | wx.DEFAULT_FRAME_STYLE, title=u'Sparkle Stability tool V') self.SetClientSize(wx.Size(1289, 554)) self.Bind(wx.EVT_CLOSE, self.OnMainWindowClose) self.staticText1 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT1, label=u'Connect/Disconnect Test : Loop counter', name='staticText1', parent=self, pos=wx.Point(200, 16), size=wx.Size(230, 14), style=0) self.conCounter = wx.TextCtrl(id=wxID_MAINWINDOWCONCOUNTER, name=u'conCounter', parent=self, pos=wx.Point(456, 8), size=wx.Size(58, 22), style=0, value=u'10') self.conCounter.SetToolTipString(u'\ubc18\ubcf5\ud69f\uc218') self.conCounter.SetMaxLength(100000) self.conCounter.SetCursor(wx.STANDARD_CURSOR) self.conCounter.SetHelpText(u'') self.conCounter.SetConstraints(LayoutAnchors(self.conCounter, True, True, False, False)) self.ConDis = wx.Button(id=wxID_MAINWINDOWCONDIS, label=u'TEST START', name=u'ConDis', parent=self, pos=wx.Point(528, 8), size=wx.Size(88, 24), style=0) self.ConDis.Bind(wx.EVT_BUTTON, self.OnConDisButton, id=wxID_MAINWINDOWCONDIS) self.ToolClose = wx.lib.buttons.GenButton(id=wxID_MAINWINDOWTOOLCLOSE, label=u'Tool Close', name=u'ToolClose', parent=self, pos=wx.Point(8, 512), size=wx.Size(616, 26), style=0) self.ToolClose.Bind(wx.EVT_BUTTON, self.OnToolCloseButton, id=wxID_MAINWINDOWTOOLCLOSE) self.staticBox1 = wx.StaticBox(id=wxID_MAINWINDOWSTATICBOX1, label=u'SMS TEST Cases', name='staticBox1', parent=self, pos=wx.Point(8, 48), size=wx.Size(616, 80), style=0) self.smsCounter = wx.TextCtrl(id=wxID_MAINWINDOWSMSCOUNTER, name=u'smsCounter', parent=self, pos=wx.Point(456, 80), size=wx.Size(57, 22), style=0, value=u'10') self.smsCounter.SetToolTipString(u'\ubc18\ubcf5\ud69f\uc218') self.smsCounter.SetMaxLength(100000) self.smsTest = wx.Button(id=wxID_MAINWINDOWSMSTEST, label=u'SMS TEST', name=u'smsTest', parent=self, pos=wx.Point(528, 80), size=wx.Size(83, 24), style=0) self.smsTest.Bind(wx.EVT_BUTTON, self.OnSmsTestButton, id=wxID_MAINWINDOWSMSTEST) self.SMSCase1 = wx.CheckBox(id=wxID_MAINWINDOWSMSCASE1, label='Disconnect_160byte', name='chkSMSCase1', parent=self, pos=wx.Point(26, 86), size=wx.Size(152, 14), style=0) self.SMSCase2 = wx.CheckBox(id=wxID_MAINWINDOWSMSCASE2, label='Disconnect_0-160byte', name=u'SMSCase2', parent=self, pos=wx.Point(25, 107), size=wx.Size(152, 14), style=0) self.SMSCase3 = wx.CheckBox(id=wxID_MAINWINDOWSMSCASE3, label='Connect_160byte', name=u'SMSCase3', parent=self, pos=wx.Point(206, 88), size=wx.Size(144, 14), style=0) self.SMSCase4 = wx.CheckBox(id=wxID_MAINWINDOWSMSCASE4, label='Disconnect_0-160byte', name=u'SMSCase4', parent=self, pos=wx.Point(206, 106), size=wx.Size(152, 14), style=0) self.staticBox2 = wx.StaticBox(id=wxID_MAINWINDOWSTATICBOX2, label=u'FTP TEST', name='staticBox2', parent=self, pos=wx.Point(8, 137), size=wx.Size(616, 113), style=0) self.staticText2 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT2, label=u'Server Name', name='staticText2', parent=self, pos=wx.Point(24, 154), size=wx.Size(88, 14), style=0) self.textCtrl1 = wx.TextCtrl(id=wxID_MAINWINDOWTEXTCTRL1, name='textCtrl1', parent=self, pos=wx.Point(120, 154), size=wx.Size(288, 22), style=0, value=u'ftp.pantechwireless.com') self.textCtrl2 = wx.TextCtrl(id=wxID_MAINWINDOWTEXTCTRL2, name='textCtrl2', parent=self, pos=wx.Point(456, 177), size=wx.Size(68, 22), style=0, value=u'10') self.textCtrl2.SetToolTipString(u'\ubc18\ubcf5\ud69f\uc218') self.textCtrl2.SetMaxLength(100000) self.staticText3 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT3, label=u'Test Directory', name='staticText3', parent=self, pos=wx.Point(22, 185), size=wx.Size(78, 14), style=0) self.textCtrl3 = wx.TextCtrl(id=wxID_MAINWINDOWTEXTCTRL3, name='textCtrl3', parent=self, pos=wx.Point(122, 184), size=wx.Size(100, 22), style=0, value=u'RAY_QE') self.staticText4 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT4, label=u'ID', name='staticText4', parent=self, pos=wx.Point(29, 217), size=wx.Size(12, 14), style=0) self.textCtrl4 = wx.TextCtrl(id=wxID_MAINWINDOWTEXTCTRL4, name='textCtrl4', parent=self, pos=wx.Point(123, 217), size=wx.Size(197, 22), style=0, value=u'*****@*****.**') self.staticText5 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT5, label=u'Passwrod', name='staticText5', parent=self, pos=wx.Point(330, 221), size=wx.Size(51, 14), style=0) self.textCtrl5 = wx.TextCtrl(id=wxID_MAINWINDOWTEXTCTRL5, name='textCtrl5', parent=self, pos=wx.Point(389, 216), size=wx.Size(100, 22), style=0, value=u'test123') self.button1 = wx.Button(id=wxID_MAINWINDOWBUTTON1, label=u'FTP TEST', name='button1', parent=self, pos=wx.Point(536, 161), size=wx.Size(75, 72), style=0) self.staticBox3 = wx.StaticBox(id=wxID_MAINWINDOWSTATICBOX3, label=u'WEB Surffing TEST', name='staticBox3', parent=self, pos=wx.Point(8, 258), size=wx.Size(616, 70), style=0) self.staticText6 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT6, label=u'TEST URL', name='staticText6', parent=self, pos=wx.Point(24, 284), size=wx.Size(55, 14), style=0) self.textCtrl6 = wx.TextCtrl(id=wxID_MAINWINDOWTEXTCTRL6, name='textCtrl6', parent=self, pos=wx.Point(93, 282), size=wx.Size(288, 22), style=0, value='textCtrl6') self.staticBox4 = wx.StaticBox(id=wxID_MAINWINDOWSTATICBOX4, label=u'TOTAL TEST', name='staticBox4', parent=self, pos=wx.Point(8, 336), size=wx.Size(616, 64), style=0) self.staticBox5 = wx.StaticBox(id=wxID_MAINWINDOWSTATICBOX5, label=u'SETT TEST', name='staticBox5', parent=self, pos=wx.Point(8, 408), size=wx.Size(616, 88), style=0) self.button2 = wx.Button(id=wxID_MAINWINDOWBUTTON2, label=u'WEB TEST', name='button2', parent=self, pos=wx.Point(536, 288), size=wx.Size(75, 24), style=0) self.button3 = wx.Button(id=wxID_MAINWINDOWBUTTON3, label=u'TEST', name='button3', parent=self, pos=wx.Point(536, 360), size=wx.Size(75, 24), style=0) self.staticText7 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT7, label=u'\uc5f0\uad6c\uc18c \ud68c\uc2e0 \ub300\uae30\uc911', name='staticText7', parent=self, pos=wx.Point(248, 448), size=wx.Size(104, 14), style=0) self.LogView = wx.TextCtrl(id=wxID_MAINWINDOWLOGVIEW, name=u'LogView', parent=self, pos=wx.Point(640, 8), size=wx.Size(640, 536), style=wx.TE_MULTILINE | wx.HSCROLL | wx.VSCROLL | wx.TE_READONLY, value=u'') self.LogView.SetToolTipString(u'LogView') self.staticText8 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT8, label=u'Input MIN', name='staticText8', parent=self, pos=wx.Point(399, 287), size=wx.Size(55, 14), style=0) self.textCtrl7 = wx.TextCtrl(id=wxID_MAINWINDOWTEXTCTRL7, name='textCtrl7', parent=self, pos=wx.Point(464, 288), size=wx.Size(60, 22), style=0, value='textCtrl7') self.textCtrl7.SetMaxLength(100000) self.staticText9 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT9, label=u'Loop counter', name='staticText9', parent=self, pos=wx.Point(370, 184), size=wx.Size(74, 14), style=0) self.textCtrl8 = wx.TextCtrl(id=wxID_MAINWINDOWTEXTCTRL8, name='textCtrl8', parent=self, pos=wx.Point(424, 360), size=wx.Size(100, 22), style=0, value='textCtrl8') self.staticText10 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT10, label='staticText10', name='staticText10', parent=self, pos=wx.Point(342, 365), size=wx.Size(69, 16), style=0) self.staticText11 = wx.StaticText(id=wxID_MAINWINDOWSTATICTEXT11, label=u'Phone Number : ', name='staticText11', parent=self, pos=wx.Point(33, 67), size=wx.Size(94, 14), style=0) self.PhoneNumber = wx.TextCtrl(id=wxID_MAINWINDOWPHONENUMBER, name=u'PhoneNumber', parent=self, pos=wx.Point(128, 61), size=wx.Size(240, 24), style=0, value=u'01038250940') def __init__(self, parent): self._init_ctrls(parent) self.srtprelog = self.SetPreLog() try: self.oFile = open(self.srtprelog+".log", "a") except IOError: wx.MessageBox("cannot file open") return False def OnConDisLeftUp(self, event): event.Skip() pass def OnMainWindowClose(self, event): event.Skip() pass def OnToolCloseButton(self, event): self.Destroy() event.Skip() def OnConDisButton(self, event): self.WriteLog("Connect Disconnect : Start ") n = 1 counter = self.conCounter.GetValue() while n <= int(counter): self.srvstate = self.GetSRVConnState() if self.srvstate == const.SRV_STATE_IDLE: self.vbool = self.SrvStateChange(self.srvstate) if not self.vbool : self.WriteLog("server state change : fail") return False elif self.srvstate == const.SRV_STATE_CONNECTED or self.srvstate == const.SRV_STATE_DORMANT : self.vbool = self.SrvStateChange(self.srvstate) if not self.vbool : self.WriteLog("server state change : fail") return False elif self.srvstate == const.SRV_STATE_INACTIVE: self.WriteLog("Server state : %s device Check" %self.srvstate) return False elif self.srvstate == const.SRV_STATE_SEARCHING: self.WriteLog("Server state : %s device Check" %self.srvstate) return False elif self.srvstate == const.SRV_STATE_UNAVAILABLE: self.WriteLog("Server state : %s device Check" %self.srvstate) return False elif self.srvstate == const.SRV_STATE_UPDATING: self.WriteLog("Server state : %s device Check" %self.srvstate) return False self.WriteLog("Connedt Disconnect :::: " + str(n)) n = n +1 self.WriteLog("Connect Disconnect : End") return True def WriteLog(self,logString): self.prelog = self.SetPreLog() self.setlogstring = self.prelog + logString + "\n" self.LogView.AppendText(self.setlogstring) try: self.oFile.write(self.setlogstring) except IOError: return False return True def SetPreLog(self): self.tempTime = time.localtime(time.time()) self.prelog = "%d%d%d%d%d%d " %(self.tempTime.tm_year,self.tempTime.tm_mon,self.tempTime.tm_mday,self.tempTime.tm_hour,self.tempTime.tm_min,self.tempTime.tm_sec) return self.prelog def __del__(self): self.oFile.close() self.conHTTP.close() pass def GetSRVConnState(self, testtype=const.TEST_TYPE_CONDIS): self.WriteLog("GetSRVConnState ----- Start ") ### # try: # self.conHTTP = httplib.HTTPConnection(const.SERVER_URL) # self.conHTTP.request("GET", testtype) # self.consource = self.conHTTP.getresponse() # except httplib.socket.timeout: # self.WriteLog("GetSRVConnState server timeout error : Server Check") # return False # except httplib.CannotSendRequest: # self.WriteLog("GetSRVconnStae Request Error : cannot connect server") # return Fasle ### consource = urllib.urlopen(const.SERVER_URL+testtype) readsource = consource.read() domtree = ElementTree(XML(readsource)) domtree.getroot() srvstate = domtree.findtext("p-answer/condata/state/value") srvstatedes = domtree.findtext("p-answer/condata/state/description") netsrvingName = domtree.findtext("p-answer/condata/network/serving/name") netsrvingType = domtree.findtext("p-answer/condata/network/serving/type") netsrvingID = domtree.findtext("p-answer/condata/network/serving/id") netsrvingServer = domtree.findtext("p-answer/condata/network/serving/server") netsrvingServertype = domtree.findtext("p-answer/condata/network/serving/servertype") netsrvingEncruption = domtree.findtext("p-answer/condata/network/serving/encryption") netroamType = domtree.findtext("p-answer/condata/network/serving/roam/type") netroamIndicator = domtree.findtext("p-answer/condata/network/serving/roam/indicator") netroamIndicatordescription = domtree.findtext("p-answer/condata/network/serving/roam/indicatordescription") netroamRoaminglistversion = domtree.findtext("p-answer/condata/network/serving/roam/roaminglistversion") netHomeType = domtree.findtext("p-answer/condata/network/home/type") netHomeID = domtree.findtext("p-answer/condata/network/home/id") Actionsss = domtree.findtext("p-answer/condata/actions") connectionAddrIPV4IP = domtree.findtext("p-answer/condata/connection/address/ipv4/ip") connectionAddrIPV4SUB = domtree.findtext("p-answer/condata/connection/address/ipv4/subnet") connectionAddrIPV4GWY = domtree.findtext("p-answer/condata/connection/address/ipv4/gateway") connectionAddrIPV4DNS = domtree.findtext("p-answer/condata/connection/address/ipv4/dns") connectionAddrIPV6IP = domtree.findtext("p-answer/condata/connection/address/ipv6/ip") connectionAddrIPV6SUB = domtree.findtext("p-answer/condata/connection/address/ipv6/subnet") connectionAddrIPV6GWY = domtree.findtext("p-answer/condata/connection/address/ipv6/gateway") connectionAddrIPV6DNS = domtree.findtext("p-answer/condata/connection/address/ipv6/dns") self.WriteLog(const.STR_SHAP) self.WriteLog("Server State : " + srvstate) self.WriteLog("Description : "+ srvstatedes) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Serving Nmae : " + netsrvingName) self.WriteLog("Network Serving Type : " + netsrvingType) self.WriteLog("Network Serving ID : " + netsrvingID) self.WriteLog("Network Serving Server : " + netsrvingServer) self.WriteLog("Netwrok Serving Server Type : " + netsrvingServertype) self.WriteLog("Network Serving Encryption : " + netsrvingEncruption) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Serving Roam Type : " + str(netroamType)) self.WriteLog("Network Serving Roam Indicator : " + str(netroamIndicator)) self.WriteLog("Network Serving Roam Indicator Description : " + str(netroamIndicatordescription)) self.WriteLog("Network Serving Roam Indicator Description : " + str(netroamRoaminglistversion)) self.WriteLog("Network Serving Home Type : " + str(netHomeType)) self.WriteLog("Network Serving Home ID : " + str(netHomeID)) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Actions : " + Actionsss) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Connection IP4 IP : " + str(connectionAddrIPV4IP)) self.WriteLog("Network Connection IP4 Subnet : " + str(connectionAddrIPV4SUB)) self.WriteLog("Network connection IP4 Gateway : " + str(connectionAddrIPV4GWY)) self.WriteLog("Network Connection IP4 DNS : " + str(connectionAddrIPV4DNS)) self.WriteLog("+++++++++++++++++++++++++++++++++++++++++") self.WriteLog("Network Connection IP6 IP : " + str(connectionAddrIPV6IP)) self.WriteLog("Network Connection IP6 Subnet : " + str(connectionAddrIPV6SUB)) self.WriteLog("Network connection IP6 Gateway : " + str(connectionAddrIPV6GWY)) self.WriteLog("Network Connection IP6 DNS : " + str(connectionAddrIPV6DNS)) self.WriteLog(const.STR_SHAP) consource.close() self.WriteLog("GetSRVConnState ----- END ") return srvstate def GetSrvID(self,testtype=const.TEST_TYPE_CONDIS): self.WriteLog("GetSrvID ----- Start ") self.consource = urllib.urlopen(const.SERVER_URL+testtype) self.readsource = self.consource.read() self.WriteLog(self.readsource) self.domtree = ElementTree(XML(self.readsource)) self.domtree.getroot() self.emel = self.domtree.findtext("p-answer/id") self.WriteLog(const.STR_SHAP) self.WriteLog("Server ID : " + self.emel) self.WriteLog(const.STR_SHAP) self.consource.close() self.WriteLog("GetSrvID ----- END ") return self.emel def ActSrvURL(self,actiontype,testtype=const.TEST_TYPE_CONDIS, phnumber="",smsbody=""): self.WriteLog("ActSrvURL ----- Start ") srvid = self.GetSrvID() wx.MessageBox(testtype) if testtype == const.TEST_TYPE_CONDIS: srvAction = const.SERVER_URL + const.TEST_TYPE_CONDIS + "?id=%s&action=%s" %(srvid,actiontype) elif testtype == const.TEST_TYPE_MSG: #end url : 192.168.7.2:4330/messaging?action=send& amp;serviceid=0&to=&body=message body srvAction = const.SERVER_URL + const.TEST_TYPE_MSG + "?action="+ actiontype +"&servvice=0&to"+ phnumber + "&body=" + smsbody self.WriteLog(const.STR_SHAP) self.WriteLog("Action URL : " + srvAction) self.WriteLog(const.STR_SHAP) self.WriteLog("ActSrvURL ----- END ") return srvAction def SrvStateChange(self,srvState): self.WriteLog("SrvStateChange ----- Start ") if const.SRV_STATE_IDLE == srvState: ActURL = self.ActSrvURL(const.CONDATA_ACT_CONNECT) elif const.SRV_STATE_CONNECTED == srvState: ActURL = self.ActSrvURL(const.CONDATA_ACT_DISCONNECT) self.WriteLog("SRv STATE change server url :: " + ActURL) consource = urllib.urlopen(ActURL) srvstate = self.GetSRVConnState() time.sleep(1) while True: if (const.SRV_STATE_IDLE == srvstate) or (const.SRV_STATE_CONNECTED == srvstate): self.WriteLog("SrvStateChange ----- END ") self.consource.close() return True else: self.WriteLog("Server State Checking ::: %s" %srvstate) time.sleep(1) self.srvstate = self.GetSRVConnState() def SrvCHK(self, server=const.SERVER_URL,page=const.TEST_TYPE_CONDIS): self.WriteLog("SrvCHK ----- START ") while True: try: self.conHTTP.connect() except httplib.HTTPException, socket.error : self.WriteLog("%s Error Check the device PLZ" %server) self.WriteLog("SrvCHK ----- Fail ") self.conHTTP.close() return False self.WriteLog("SrvCHK ----- END:: True ") self.conHTTP.close() return True
class City(): # note that we are are grabbing a different # data URL then in city list def __init__(self, dataurl): self.tree = ElementTree() try: urlhandle = urllib.request.urlopen(dataurl) except IOError: print(("[Error] Unable to open the data url: " + dataurl)) sys.exit(1) self.tree.parse(urlhandle) def get_quantity(self, path): """Get the quatity contained at the XML XPath""" return self.tree.findtext(path) def get_attribute(self, path, attribute): """Get the attribute of the element at XPath path""" element = self.tree.find(path) if element is not None and attribute in element.attrib: return element.attrib[attribute] return None def get_available_quantities(self): """Get a list of all the available quatities in the form of their XML XPaths """ pathlist = [] # we are getting the full XPath with the attribute strings # this output is pretty long so maybe it would be wise # to also have an option to get the XPath without the attributes # self._get_all_xpaths(pathlist,"",self.tree.getroot()) self._get_all_xpaths_with_attributes(pathlist, "", self.tree.getroot()) return pathlist # This nasty little function recursively traverses # an element tree to get all the available XPaths # you have to pass in the pathlist you want to contain # the list def _get_all_xpaths(self, pathlist, path, element): children = element.getchildren() if not children: pathlist.append(path + "/" + element.tag) else: for child in children: self._get_all_xpaths(pathlist, path + "/" + element.tag, child) def _make_attribute_list(self, attrib): xpathattrib = "" for attribute, value in list(attrib.items()): xpathattrib = xpathattrib + "[@" + attribute + "='" + value + "']" return xpathattrib # This nasty little function recursively traverses # an element tree to get all the available XPaths # you have to pass in the pathlist you want to contain # the list def _get_all_xpaths_with_attributes(self, pathlist, path, element): children = element.getchildren() if not children: xpathattrib = self._make_attribute_list(element.attrib) if path == "": pathlist.append(element.tag + xpathattrib) else: pathlist.append(path + "/" + element.tag + xpathattrib) else: xpathattrib = self._make_attribute_list(element.attrib) for child in children: # skip the root tag if element.tag == "siteData": self._get_all_xpaths_with_attributes(pathlist, path, child) else: # we avoid the opening / since we start below the root if path == "": self._get_all_xpaths_with_attributes( pathlist, element.tag + xpathattrib, child) else: self._get_all_xpaths_with_attributes( pathlist, path + "/" + element.tag + xpathattrib, child) # This function will break is thre is any change in the city weather # XML format def get_available_forecast_names(self): forecasts = self.tree.findall('forecastGroup/forecast/period') forecastnames = [] for forecast in forecasts: forecastnames.append(forecast.get("textForecastName")) return forecastnames # This function will break is thre is any change in the city weather # XML format def get_available_forecast_periods(self): forecasts = self.tree.findall('forecastGroup/forecast/period') forecastnames = [] for forecast in forecasts: forecastnames.append(forecast.text) return forecastnames
# The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'genmsg' copyright = u'2011, Willow Garage' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. try: root = ElementTree(None, os.path.join('..', 'package.xml')) version = root.findtext('version') except Exception as e: raise RuntimeError('Could not extract version from package.xml:\n%s' % e) # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y'
class City(): # note that we are are grabbing a different # data URL then in city list def __init__(self, dataurl): self.tree = ElementTree() try: urlhandle = urllib.urlopen(dataurl) except IOError: print "[Error] Unable to open the data url: " + dataurl sys.exit(1) self.tree.parse(urlhandle) def get_quantity(self,path): """Get the quatity contained at the XML XPath""" return self.tree.findtext(path) def get_atrribute(self, path, attribute): """Get the atribute of the element at XPath path""" element = self.tree.find(path) if attribute in element: return element['attribute'] return None def get_available_quantities(self): """Get a list of all the available quatities in the form of their XML XPaths """ pathlist =[] # we are getting the full XPath with the attribute strings # this output is pretty long so maybe it would be wise # to also have an option to get the XPath without the attributes # self._get_all_xpaths(pathlist,"",self.tree.getroot()) self._get_all_xpaths_with_attributes(pathlist,"",self.tree.getroot()) return pathlist # This nasty little function recursively traverses # an element tree to get all the available XPaths # you have to pass in the pathlist you want to contain # the list def _get_all_xpaths(self, pathlist, path, element): children = element.getchildren() if not children: pathlist.append(path + "/"+element.tag) else: for child in children: self._get_all_xpaths(pathlist, path + "/" + element.tag, child) def _make_attribute_list(self, attrib): xpathattrib = "" for attribute, value in attrib.iteritems(): xpathattrib = xpathattrib + "[@" + attribute + "='" + value + "']" return xpathattrib # This nasty little function recursively traverses # an element tree to get all the available XPaths # you have to pass in the pathlist you want to contain # the list def _get_all_xpaths_with_attributes(self, pathlist, path, element): children = element.getchildren() if not children: xpathattrib = self._make_attribute_list(element.attrib) if path == "": pathlist.append(element.tag + xpathattrib) else: pathlist.append(path + "/" + element.tag + xpathattrib) else: xpathattrib = self._make_attribute_list(element.attrib) for child in children: # skip the root tag if element.tag == "siteData": self._get_all_xpaths_with_attributes(pathlist, path, child) else: # we avoid the opening / since we start below the root if path == "": self._get_all_xpaths_with_attributes(pathlist, element.tag + xpathattrib, child) else: self._get_all_xpaths_with_attributes(pathlist, path + "/" + element.tag + xpathattrib, child) # This function will break is thre is any change in the city weather # XML format def get_available_forecast_names(self): forecasts = self.tree.findall('forecastGroup/forecast/period') forecastnames = [] for forecast in forecasts: forecastnames.append(forecast.get("textForecastName")) return forecastnames # This function will break is thre is any change in the city weather # XML format def get_available_forecast_periods(self): forecasts = self.tree.findall('forecastGroup/forecast/period') forecastnames = [] for forecast in forecasts: forecastnames.append(forecast.text) return forecastnames
def search(query, params, off, num_results_wanted): off = int(off) # query and params both come from sidebar, so should have exactly one. if not query and not params: return Result(0, off), get_empty_parameters() elif query and params: raise NotImplementedError elif query: query_terms = Query_Language(identifier).searcher_translator(query) else: query_terms = parse_parameters(params) for key in query_terms: query_terms[key] = list_to_str(query_terms[key]) """ Disable modifiers and adv search for now Todo: Work out if artstor can process adv search and modifiers Adding Adv sidebar if possible """ del_list = [] for key in query_terms: if not key == "": del_list.append(key) for key in del_list: if key in query_terms: del query_terms[key] if "query_string" in query_terms: del query_terms["query_string"] # return empty result if no search terms (submitting an empty query breaks artstor) if len(query_terms) is 0: return Result(0, 0), get_empty_parameters() """ Caching of results, uncomment and fix if supported in other searchers TODO cached, created = HitCount.current_objects.get_or_create( source=self.get_source_id(), query='%s [%s:%s]' % (keyword, page, pagesize), defaults=dict(hits=0, valid_until=datetime.datetime.now() + datetime.timedelta(1))) if not created and cached.results: return simplejson.loads(cached.results) """ pagesize = num_results_wanted url = _get_url(query_terms, pagesize, off) #html_page = _get_html_page(url) try: html_page = _get_html_page(url) if not html_page: print "ArtStor did not get any data from server, make sure MDID can reach the server through the firewall" return Result(0, off), _build_returnable_parameters(query_terms) results = ElementTree(file=html_page) num_results = int(results.findtext('{http://www.loc.gov/zing/srw/}numberOfRecords')) or 0 except: # XML parsing error print "ArtStor XML parsing error" num_results = 0 if not num_results: # other type of error or no results found return Result(0, off), _build_returnable_parameters(query_terms) #pages = int(math.ceil(float(total) / pagesize)) result = Result(num_results, off+50) image_divs = results.findall('.//{info:srw/schema/1/dc-v1.1}dc') for div in image_divs: (url, thumb, image_identifier, title) = _get_image(div) result.addImage(ResultImage(url, thumb, title, image_identifier)) # TODO cope with full image not actually giving result (timeout error) return result, _build_returnable_parameters(query_terms)
outputFileName = sys.argv[9] #example - T1tttt.json fjrFileNames = glob.glob(fjrdir + '/*fjr*xml') #location of your fjr files. May need to change the 'fjr*xml' to match whatever pattern you fjr follow. array = [] id = 1 array = [] for fjrFileName in fjrFileNames: print fjrFileName tree = ElementTree() tree.parse(fjrFileName) lfn = tree.findtext("LFN").strip() #cksum = tree.findtext("Checksums").strip().rstrip("}").lstrip("{") #if not len(cksum) == 0: # cksum = cksum.split(":")[1].strip("'") #else: # cksum = "NotSet" size = tree.findtext("Size").strip() events = tree.findtext("TotalEvents").strip() runs = tree.findall("Runs/Run") rundict = {} for run in runs: runId = run.get("ID") lumis = run.findall("LumiSection") lumilist = [] for lumi in lumis: lumilist.append(lumi.get("ID"))
def retrieveShowMetadata(self, folder): """ Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB. :param folder: :return: """ empty_return = (None, None, None) metadata_path = os.path.join(folder, self._show_metadata_filename) if not os.path.isdir(folder) or not os.path.isfile(metadata_path): sickrage.srLogger.debug("Can't load the metadata file from " + metadata_path + ", it doesn't exist") return empty_return try: sickrage.srLogger.debug("Loading show info from metadata file in {}".format(folder)) except: pass try: with io.open(metadata_path, 'rb') as xmlFileObj: showXML = ElementTree(file=xmlFileObj) if showXML.findtext('title') is None or ( showXML.findtext('tvdbid') is None and showXML.findtext('id') is None): sickrage.srLogger.info("Invalid info in tvshow.nfo (missing name or id): {0:s} {1:s} {2:s}".format( showXML.findtext('title'), showXML.findtext('tvdbid'), showXML.findtext('id'))) return empty_return name = showXML.findtext('title') if showXML.findtext('tvdbid') is not None: indexer_id = int(showXML.findtext('tvdbid')) elif showXML.findtext('id') is not None: indexer_id = int(showXML.findtext('id')) else: sickrage.srLogger.warning("Empty <id> or <tvdbid> field in NFO, unable to find a ID") return empty_return if indexer_id is None: sickrage.srLogger.warning("Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file") return empty_return indexer = None if showXML.find('episodeguide/url') is not None: epg_url = showXML.findtext('episodeguide/url').lower() if str(indexer_id) in epg_url: if 'thetvdb.com' in epg_url: indexer = 1 elif 'tvrage' in epg_url: sickrage.srLogger.debug("Invalid Indexer ID (" + str( indexer_id) + "), not using metadata file because it has TVRage info") return empty_return except Exception as e: sickrage.srLogger.warning( "There was an error parsing your existing metadata file: '" + metadata_path + "' error: {}".format(e.message)) return empty_return return indexer_id, name, indexer
def get_module_name(pom_file): tree = ElementTree() tree.parse(pom_file) return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
def retrieveShowMetadata(self, folder): """ Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB. :param folder: :return: """ empty_return = (None, None, None) metadata_path = os.path.join(folder, self._show_metadata_filename) if not os.path.isdir(folder) or not os.path.isfile(metadata_path): sickrage.LOGGER.debug("Can't load the metadata file from " + metadata_path + ", it doesn't exist") return empty_return try: sickrage.LOGGER.debug( "Loading show info from metadata file in {}".format(folder)) except: pass try: with io.open(metadata_path, 'rb') as xmlFileObj: showXML = ElementTree(file=xmlFileObj) if showXML.findtext('title') is None or ( showXML.findtext('tvdbid') is None and showXML.findtext('id') is None): sickrage.LOGGER.info( "Invalid info in tvshow.nfo (missing name or id): {0:s} {1:s} {2:s}" .format(showXML.findtext('title'), showXML.findtext('tvdbid'), showXML.findtext('id'))) return empty_return name = showXML.findtext('title') if showXML.findtext('tvdbid') is not None: indexer_id = int(showXML.findtext('tvdbid')) elif showXML.findtext('id') is not None: indexer_id = int(showXML.findtext('id')) else: sickrage.LOGGER.warning( "Empty <id> or <tvdbid> field in NFO, unable to find a ID") return empty_return if indexer_id is None: sickrage.LOGGER.warning("Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file") return empty_return indexer = None if showXML.find('episodeguide/url') is not None: epg_url = showXML.findtext('episodeguide/url').lower() if str(indexer_id) in epg_url: if 'thetvdb.com' in epg_url: indexer = 1 elif 'tvrage' in epg_url: sickrage.LOGGER.debug( "Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file because it has TVRage info" ) return empty_return except Exception as e: sickrage.LOGGER.warning( "There was an error parsing your existing metadata file: '" + metadata_path + "' error: {}".format(e)) return empty_return return indexer_id, name, indexer
def get_module_name(pom_file): tree = ElementTree() tree.parse(pom_file) return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
# The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = u"genmsg" copyright = u"2011, Willow Garage" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. try: root = ElementTree(None, os.path.join("..", "package.xml")) version = root.findtext("version") except Exception as e: raise RuntimeError("Could not extract version from package.xml:\n%s" % e) # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y'