Ejemplo n.º 1
0
    def check_theme(self, view):
        # Get some settings
        self.settings = sublime.load_settings(
            'Dart - Plugin Settings.sublime-settings')
        error_color = self.settings.get('dart_linter_underline_color_error')
        warn_color = self.settings.get('dart_linter_underline_color_warning')
        info_color = self.settings.get('dart_linter_underline_color_info')
        error_icon = self.settings.get('dart_linter_gutter_icon_error')
        warn_icon = self.settings.get('dart_linter_gutter_icon_warning')
        info_icon = self.settings.get('dart_linter_gutter_icon_info')
        # Set the icons and colors in the file scope
        GUTTER_Icon.update({
            'dartlint_ERROR': error_icon,
            'dartlint_WARNING': warn_icon,
            'dartlint_INFO': info_icon
        })
        ULINE_Color.update({
            'dartlint.mark.error': error_color,
            'dartlint.mark.warning': warn_color,
            'dartlint.mark.info': info_color,
            'dartlint.mark.gutter': 'not used'
        })

        # Get the current theme
        system_prefs = sublime.load_settings('Preferences.sublime-settings')
        theme = system_prefs.get('color_scheme')
        theme_xml = sublime.load_resource(theme)
        append_xml = False
        append_scopes = []

        # Check for required scopes
        for scopes in SCOPES_Dartlint:
            if theme_xml.find(scopes) is -1:
                # append to xml
                append_xml = True
                append_scopes.append(scopes)
                print('%s not found in theme' % scopes)
        plist = ElementTree.XML(theme_xml)
        styles = plist.find('./dict/array')

        # Add missing elements
        if append_xml:
            for s2append in append_scopes:
                styles.append(
                    ElementTree.fromstring(
                        SCOPES_Dartlint[s2append]['style'].format(
                            ULINE_Color[s2append])))
        else:
            # No need to do anything
            return

        # write back to 'Packages/User/<theme> DL.tmTheme'
        original_name = os.path.splitext(os.path.basename(theme))[0]
        new_name = original_name + ' DL'
        theme_path = os.path.join(sublime.packages_path(), 'User',
                                  new_name + '.tmTheme')
        with open(theme_path, 'w', encoding='utf8') as f:
            f.write(THEME_Head.format('UTF-8'))
            f.write(ElementTree.tostring(plist, encoding='unicode'))

        # Set the amended color scheme to the current color scheme
        path = os.path.join('User', os.path.basename(theme_path))
        prep_path = FormRelativePath(path)
        if prep_path is not False:
            system_prefs.set('color_scheme', prep_path)
            sublime.save_settings('Preferences.sublime-settings')
            print('Created: %s' % prep_path)
Ejemplo n.º 2
0
def start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,
          verbose_level, socks_proxy, retries, methods_args, scan_id,
          scan_cmd):  # Main function
    if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(target) != 'HTTP' or target_type != 'SINGLE_IPv6':
        # output format
        time.sleep(time_sleep)
        if socks_proxy is not None:
            socks_version = socks.SOCKS5 if socks_proxy.startswith(
                'socks5://') else socks.SOCKS4
            socks_proxy = socks_proxy.rsplit('://')[1]
            if '@' in socks_proxy:
                socks_username = socks_proxy.rsplit(':')[0]
                socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
                socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
                                        int(socks_proxy.rsplit(':')[-1]), username=socks_username,
                                        password=socks_password)
                socket.socket = socks.socksocket
                socket.getaddrinfo = getaddrinfo
            else:
                socks.set_default_proxy(socks_version, str(
                    socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
                socket.socket = socks.socksocket
                socket.getaddrinfo = getaddrinfo
        # set user agent
        headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0",
                   "Accept": "text/javascript, text/html, application/xml, text/xml, */*",
                   "Accept-Language": "en-US,en;q=0.5",
                   "Referer": "https://viewdns.info/"
                   }
        total_req = 1
        trying = 1
        info(messages(language, "trying_process").format(
            trying, total_req, num, total, target, 'viewdns ip lookup'))
        n = 0
        while 1:
            try:
                res = requests.get('https://viewdns.info/reverseip/?host={0}&t=1'.format(target), timeout=timeout_sec,
                                   headers=headers, verify=True).text
                break
            except:
                n += 1
                if n is retries:
                    warn(messages(language, "http_connection_timeout").format(
                        "viewdns.info"))
                    return 1
        _values = []
        try:
            s = '<table>' + \
                res.rsplit('''<table border="1">''')[
                    1].rsplit("<br></td></tr><tr></tr>")[0]
            table = ET.XML(s)
            rows = iter(table)
            headers = [col.text for col in next(rows)]
            for row in rows:
                values = [col.text for col in row]
                _values.append(dict(zip(headers, values))["Domain"])
        except Exception:
            pass
        if len(_values) is 0:
            info(messages(language, "viewdns_domain_404"))
        if len(_values) > 0:
            info(messages(language, "len_domain_found").format(len(_values)))
            for domain in _values:
                if verbose_level > 3:
                    info(messages(language, "domain_found").format(domain))
                data = json.dumps({'HOST': target, 'USERNAME': '', 'PASSWORD': '', 'PORT': '',
                                   'TYPE': 'viewdns_reverse_ip_lookup_scan', 'DESCRIPTION': domain,
                                   'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd}) + "\n"
                __log_into_file(log_in_file, 'a', data, language)
        if verbose_level is not 0:
            data = json.dumps({'HOST': target, 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'viewdns_reverse_ip_lookup_scan',
                               'DESCRIPTION': messages(language, "domain_found").format(len(_values), ", ".join(_values) if len(
                                   _values) > 0 else "None"), 'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id,
                               'SCAN_CMD': scan_cmd}) + "\n"
            __log_into_file(log_in_file, 'a', data, language)
    else:
        warn(messages(language, "input_target_error").format(
            'viewdns_reverse_ip_lookup_scan', target))
Ejemplo n.º 3
0
	def showData(self):
		readers = self.getReaders()
		result = []
		title2 = ""
		for i in readers:
			xmldata = self.openWebIF(part="readerstats", reader=i[1])
			emm_wri = emm_ski = emm_blk = emm_err = ""
			if xmldata[0]:
				xdata = ElementTree.XML(xmldata[1])
				rdr = xdata.find("reader")
#					emms = rdr.find("emmstats")
#					if "totalwritten" in emms.attrib:
#						emm_wri = emms.attrib["totalwritten"]
#					if "totalskipped" in emms.attrib:
#						emm_ski = emms.attrib["totalskipped"]
#					if "totalblocked" in emms.attrib:
#						emm_blk = emms.attrib["totalblocked"]
#					if "totalerror" in emms.attrib:
#						emm_err = emms.attrib["totalerror"]

				ecmstat = rdr.find("ecmstats")
				totalecm = ecmstat.attrib["totalecm"]
				ecmcount = int(ecmstat.attrib["count"])
				lastacc = ecmstat.attrib["lastaccess"]
				ecm = ecmstat.findall("ecm")
				if ecmcount > 0:
					for j in ecm:
						caid = j.attrib["caid"]
						channel = j.attrib["channelname"]
						avgtime = j.attrib["avgtime"]
						lasttime = j.attrib["lasttime"]
						retcode = j.attrib["rc"]
						rcs = j.attrib["rcs"]
						num = j.text
						if rcs == "found":
							avg_time = str(float(avgtime) / 1000)[:5]
							last_time = str(float(lasttime) / 1000)[:5]
							if "lastrequest" in j.attrib:
								lastreq = j.attrib["lastrequest"]
								try:
									last_req = lastreq.split("T")[1][:-5]
								except IndexError:
									last_req = time.strftime("%H:%M:%S", time.localtime(float(lastreq)))
							else:
								last_req = ""
						else:
							avg_time = last_time = last_req = ""
#						if lastreq != "":
#							last_req = lastreq.split("T")[1][:-5]
						if self.allreaders:
							result.append((i[1], caid, channel, avg_time, last_time, rcs, last_req, int(num)))
							title2 = _("( All readers)")
						else:
							if i[1] == self.reader:
								result.append((i[1], caid, channel, avg_time, last_time, rcs, last_req, int(num)))
							title2 = _("(Show only reader:") + "%s )" % self.reader

		outlist = self.sortData(result, 7, True)
		out = [(_("Label"), _("CAID"), _("Channel"), _("ECM avg"), _("ECM last"), _("Status"), _("Last Req."), _("Total"))]
		for i in outlist:
			out.append((i[0], i[1], i[2], i[3], i[4], i[5], i[6], str(i[7])))

		if HDSKIN:
			self["output"].setStyle("HD")
		else:
			self["output"].setStyle("default")
		self["output"].setList(out)
		title = [_("Reader Statistics"), title2]
		self.setTitle(" ".join(title))
Ejemplo n.º 4
0
 def test_parses_utf8_document_correctly(self):
     root = ET.XML(_utf8_document, parser=AglyphDefaultXMLParser())
     self._assert_parsed_document(root)
    URL = "https://" + ADDR + ":" + API_PORT + "/api/hosts"

    request = urllib2.Request(URL)

    base64string = base64.encodestring('%s:%s' % (USER, PASSWD)).strip()
    request.add_header("Authorization", "Basic %s" % base64string)

    try:
        xmldata = urllib2.urlopen(request).read()
    except urllib2.URLError, e:
        print "Error: cannot connect to REST API: %s" % (e)
        print "\tTry to login using the same user/pass by the Admin Portal and check the error!"
        sys.exit(2)

    tree = ElementTree.XML(xmldata)
    list = tree.findall("host")

    host_id = None
    for item in list:
        if host_name == item.find("name").text:
            host_id = item.attrib["id"]
            print "host id %s" % (host_id)
            break

    return host_id


if __name__ == "__main__":

    if len(sys.argv) != 4:
## 7.6.7 Parsing Strings
# To work ith smaller bits of XML text, use XML() with the string containing the XML as the only argument
xml_string_to_parse = """
<root>
    <group>
        <child id="a">This is child "a".</child>
        <child id="b">This is child "b".</child>
    </group>
    <group>
        <child id="c">This is child "c".</child>
    </group>
</root>
"""

print "Using XML():"
parsed = ElementTree.XML(xml_string_to_parse)
print "parsed=", parsed


def show_node(node):
    print node.tag
    if node.text is not None and node.text.strip():
        print "  text: '%s'" % node.text
    if node.tail is not None and node.tail.strip():
        print "  tail: '%s'" % node.tail
    for (name, value) in sorted(node.attrib.items()):
        print "  %-4s = '%s'" % (name, value)
    for child in node:
        show_node(child)
    return
Ejemplo n.º 7
0
def list_videos(params):
    """Build videos listing"""
    videos = []

    if params.state_video == 'Toutes les videos (sans les categories)':

        file_path = utils.download_catalog(
            URL_ALL_VIDEO,
            '%s_all_video.xml' % params.channel_name,
        )
        replay_xml = open(file_path).read()

        xml_elements = ET.XML(replay_xml)

        programs = xml_elements.findall(
            "{http://www.sitemaps.org/schemas/sitemap/0.9}url")

        for program in programs:

            url_site = program.findtext(
                "{http://www.sitemaps.org/schemas/sitemap/0.9}loc").encode(
                    'utf-8')
            check_string = '%s/replay/' % params.channel_name
            if url_site.count(check_string) > 0:

                # Title
                title = url_site.rsplit('/', 1)[1].replace("-", " ").upper()

                video_node = program.findall(
                    "{http://www.google.com/schemas/sitemap-video/1.1}video"
                )[0]

                # Duration
                duration = 0

                # Image
                img = ''
                img_node = video_node.find(
                    "{http://www.google.com/schemas/sitemap-video/1.1}thumbnail_loc"
                )
                img = img_node.text.encode('utf-8')

                # Url Video
                url = ''
                url_node = video_node.find(
                    "{http://www.google.com/schemas/sitemap-video/1.1}content_loc"
                )
                url = url_node.text.encode('utf-8')

                # Plot
                plot = ''
                plot_node = video_node.find(
                    "{http://www.google.com/schemas/sitemap-video/1.1}description"
                )
                if plot_node.text:
                    plot = plot_node.text.encode('utf-8')

                # Date
                value_date = ''
                value_date_node = video_node.find(
                    "{http://www.google.com/schemas/sitemap-video/1.1}publication_date"
                )
                value_date = value_date_node.text.encode('utf-8')
                date = value_date.split('T')[0].split('-')
                day = date[2]
                mounth = date[1]
                year = date[0]
                date = '.'.join((day, mounth, year))
                aired = '-'.join((year, mounth, day))

                info = {
                    'video': {
                        'title': title,
                        'plot': plot,
                        'duration': duration,
                        'aired': aired,
                        'date': date,
                        'year': year,
                        'mediatype': 'tvshow'
                    }
                }

                download_video = (
                    common.GETTEXT('Download'), 'XBMC.RunPlugin(' +
                    common.PLUGIN.get_url(action='download_video',
                                          module_path=params.module_path,
                                          module_name=params.module_name,
                                          url_video=url_site) + ')')
                context_menu = []
                context_menu.append(download_video)

                videos.append({
                    'label':
                    title,
                    'fanart':
                    img,
                    'thumb':
                    img,
                    'url':
                    common.PLUGIN.get_url(module_path=params.module_path,
                                          module_name=params.module_name,
                                          action='replay_entry',
                                          next='play_r',
                                          url_video=url),
                    'is_playable':
                    True,
                    'info':
                    info,
                    'context_menu':
                    context_menu
                })

    else:
        file_path = utils.download_catalog(
            URL_REPLAY_API % params.channel_name,
            '%s_replay.xml' % params.channel_name,
        )
        replay_xml = open(file_path).read()

        xml_elements = ET.XML(replay_xml)

        programs = xml_elements.findall("program")

        for program in programs:
            if params.state_video == 'Toutes les videos':

                # Title
                title = program.findtext("title").encode('utf-8') + " - " + \
                    program.findtext("subtitle").encode('utf-8')

                # Duration
                duration = 0
                if program.findtext("duration"):
                    try:
                        duration = int(program.findtext("duration")) * 60
                    except ValueError:
                        pass  # or whatever

                # Image
                img = program.find("photos").findtext("photo")

                # Url Video
                url = ''
                # program.find("offres").find("offre").find("videos").findtext("video)
                for i in program.find("offres").findall("offre"):

                    date_value = i.get("startdate")
                    date_value_list = date_value.split(' ')[0].split('-')
                    day = date_value_list[2]
                    mounth = date_value_list[1]
                    year = date_value_list[0]

                    date = '.'.join((day, mounth, year))
                    aired = '-'.join((year, mounth, day))

                    for j in i.find("videos").findall("video"):
                        url = j.text.encode('utf-8')

                # Plot
                plot = ''
                for i in program.find("stories").findall("story"):
                    if int(i.get("maxlength")) == 680:
                        plot = i.text.encode('utf-8')

                info = {
                    'video': {
                        'title': title,
                        'plot': plot,
                        'duration': duration,
                        'aired': aired,
                        'date': date,
                        'year': year,
                        'mediatype': 'tvshow'
                    }
                }

                download_video = (
                    common.GETTEXT('Download'), 'XBMC.RunPlugin(' +
                    common.PLUGIN.get_url(action='download_video',
                                          module_path=params.module_path,
                                          module_name=params.module_name,
                                          url_video=url) + ')')
                context_menu = []
                context_menu.append(download_video)

                videos.append({
                    'label':
                    title,
                    'fanart':
                    img,
                    'thumb':
                    img,
                    'url':
                    common.PLUGIN.get_url(module_path=params.module_path,
                                          module_name=params.module_name,
                                          action='replay_entry',
                                          next='play_r',
                                          url_video=url),
                    'is_playable':
                    True,
                    'info':
                    info,
                    'context_menu':
                    context_menu
                })

            elif params.id_program == program.get("IDSERIE"):

                # Title
                title = program.findtext("title").encode('utf-8') + " - " + \
                    program.findtext("subtitle").encode('utf-8')

                # Duration
                duration = 0
                if program.findtext("duration"):
                    try:
                        duration = int(program.findtext("duration")) * 60
                    except ValueError:
                        pass  # or whatever

                # Image
                img = program.find("photos").findtext("photo")

                # Url Video
                url = ''
                # program.find("offres").find("offre").find("videos").findtext("video)
                for i in program.find("offres").findall("offre"):

                    date_value = i.get("startdate")
                    date_value_list = date_value.split(' ')[0].split('-')
                    day = date_value_list[2]
                    mounth = date_value_list[1]
                    year = date_value_list[0]

                    date = '.'.join((day, mounth, year))
                    aired = '-'.join((year, mounth, day))

                    for j in i.find("videos").findall("video"):
                        url = j.text.encode('utf-8')

                # Plot
                plot = ''
                for i in program.find("stories").findall("story"):
                    if int(i.get("maxlength")) == 680:
                        plot = i.text.encode('utf-8')

                info = {
                    'video': {
                        'title': title,
                        'plot': plot,
                        'duration': duration,
                        'aired': aired,
                        'date': date,
                        'year': year,
                        'mediatype': 'tvshow'
                    }
                }

                download_video = (
                    common.GETTEXT('Download'), 'XBMC.RunPlugin(' +
                    common.PLUGIN.get_url(action='download_video',
                                          module_path=params.module_path,
                                          module_name=params.module_name,
                                          url_video=url) + ')')
                context_menu = []
                context_menu.append(download_video)

                videos.append({
                    'label':
                    title,
                    'fanart':
                    img,
                    'thumb':
                    img,
                    'url':
                    common.PLUGIN.get_url(module_path=params.module_path,
                                          module_name=params.module_name,
                                          action='replay_entry',
                                          next='play_r',
                                          url_video=url),
                    'is_playable':
                    True,
                    'info':
                    info,
                    'context_menu':
                    context_menu
                })

    return common.PLUGIN.create_listing(
        videos,
        sort_methods=(common.sp.xbmcplugin.SORT_METHOD_DATE,
                      common.sp.xbmcplugin.SORT_METHOD_DURATION,
                      common.sp.xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE,
                      common.sp.xbmcplugin.SORT_METHOD_GENRE,
                      common.sp.xbmcplugin.SORT_METHOD_PLAYCOUNT,
                      common.sp.xbmcplugin.SORT_METHOD_UNSORTED),
        content='tvshows',
        category=common.get_window_title())
Ejemplo n.º 8
0
    def CaptureStatus(files, cwd, no_ignore=False):
        """Returns the svn 1.5 svn status emulated output.

    @files can be a string (one file) or a list of files.

    Returns an array of (status, file) tuples."""
        command = ["status", "--xml"]
        if no_ignore:
            command.append('--no-ignore')
        if not files:
            pass
        elif isinstance(files, basestring):
            command.append(files)
        else:
            command.extend(files)

        status_letter = {
            None: ' ',
            '': ' ',
            'added': 'A',
            'conflicted': 'C',
            'deleted': 'D',
            'external': 'X',
            'ignored': 'I',
            'incomplete': '!',
            'merged': 'G',
            'missing': '!',
            'modified': 'M',
            'none': ' ',
            'normal': ' ',
            'obstructed': '~',
            'replaced': 'R',
            'unversioned': '?',
        }
        dom = ElementTree.XML(SVN.Capture(command, cwd))
        results = []
        if dom is None:
            return results
        # /status/target/entry/(wc-status|commit|author|date)
        for target in dom.findall('target'):
            for entry in target.findall('entry'):
                file_path = entry.attrib['path']
                wc_status = entry.find('wc-status')
                # Emulate svn 1.5 status ouput...
                statuses = [' '] * 7
                # Col 0
                xml_item_status = wc_status.attrib['item']
                if xml_item_status in status_letter:
                    statuses[0] = status_letter[xml_item_status]
                else:
                    raise gclient_utils.Error(
                        'Unknown item status "%s"; please implement me!' %
                        xml_item_status)
                # Col 1
                xml_props_status = wc_status.attrib['props']
                if xml_props_status == 'modified':
                    statuses[1] = 'M'
                elif xml_props_status == 'conflicted':
                    statuses[1] = 'C'
                elif (not xml_props_status or xml_props_status == 'none'
                      or xml_props_status == 'normal'):
                    pass
                else:
                    raise gclient_utils.Error(
                        'Unknown props status "%s"; please implement me!' %
                        xml_props_status)
                # Col 2
                if wc_status.attrib.get('wc-locked') == 'true':
                    statuses[2] = 'L'
                # Col 3
                if wc_status.attrib.get('copied') == 'true':
                    statuses[3] = '+'
                # Col 4
                if wc_status.attrib.get('switched') == 'true':
                    statuses[4] = 'S'
                # TODO(maruel): Col 5 and 6
                item = (''.join(statuses), file_path)
                results.append(item)
        return results
Ejemplo n.º 9
0
 def __init__(self, xmp):
     self.tree = ET.XML(xmp)
     self.rdftree = self.tree.find(RDF_NS + 'RDF')
Ejemplo n.º 10
0
	def post(self, usernick):
		user = users.get_current_user()
		if (user.nickname() == usernick) or users.is_current_user_admin():
			source = self.request.get('source')
			interval = self.request.get('interval')
			#check if feed has '?' in it. raise error if yes. (right?)

			#check if feed already exists
			feedset = models.Feed().all().filter('uri = ', source).fetch(1)
			if not feedset:
				#max 1000 items, no questions asked.
				fetchFeedURI = ("http://www.google.com/reader/public/atom/feed/%s?n=1000&client=feedpacer" % source)
				try:
					result = urllib2.urlopen(fetchFeedURI)
				except urllib2.HTTPError:
					result = None
					#eventually push an error.
					self.redirect('/')
				else:
					res = result.read()
					feedtree = et.XML(res)
					#self.response.out.write(feedtree.attrib)
    				
					feed = models.Feed()
					link = feedtree.find('{http://www.w3.org/2005/Atom}link')
					#handle -feed not found- error.
					if link.attrib['rel']=='alternate': feed.alt = link.attrib['href']
					feed.uri = source
					feed.fetched = datetime.now()
					feed.title = feedtree.find('{http://www.w3.org/2005/Atom}title').text
					feed.updated = datetime.strptime(feedtree.find('{http://www.w3.org/2005/Atom}updated').text, 
																		"%Y-%m-%dT%H:%M:%SZ")
					atom_id = feedtree.find('{http://www.w3.org/2005/Atom}id').text
					feedlinks = feedtree.findall('{http://www.w3.org/2005/Atom}link')
					for link in feedlinks:
						if link.attrib['rel'] == 'self':
							feed.rel_self = link.attrib['href']
						elif link.attrib['rel'] == 'alternate':
							feed.rel_alt = link.attrib['href']
					subt = feedtree.find('{http://www.w3.org/2005/Atom}subtitle')
					feed.subtitle = subt.text if subt is not None else None
					feed.atom_id = 'tag:feedpacer.appspot.com' + atom_id[14:20] + atom_id[27:]
					feed.idx = feedtree.attrib['{urn:atom-extension:indexing}index']
					feed.dir = feedtree.attrib['{http://www.google.com/schemas/reader/atom/}dir']
					entries = feedtree.findall('{http://www.w3.org/2005/Atom}entry')
					feed.latestItemId = entries[0].find('{http://www.w3.org/2005/Atom}id').text
					feed.checkedForUpdates = datetime.now()
					feed.totalItems = len(entries)
					feed.put()
					
					ns = u'{http://www.w3.org/2005/Atom}'
					nsl = len(ns)
					entries.reverse()
					for i,item in enumerate(entries):
						feedItem = models.FeedItem()
						feedItem.feed = feed
						feedItem.num = i
						
						for elem in item.getiterator():
							if elem.tag.startswith(ns):
								elem.tag = elem.tag[nsl:]
						feedItem.whole = "<entry" + et.tostring(item)[100:]
						feedItem.put()
												
						'''atom_id = item.find('id').text
						atom_id = 'tag:feedpacer.appspot.com' + atom_id[14:20] + atom_id[27:]
						feedItem.atom_id = atom_id
						
						feedItem.title = item.find('{http://www.w3.org/2005/Atom}title').text
						feedItem.uri = item.find('{http://www.w3.org/2005/Atom}link').attrib['href']
						feedItem.published = datetime.strptime(item.find('{http://www.w3.org/2005/Atom}published').text, 
																		"%Y-%m-%dT%H:%M:%SZ")
						feedItem.updated = datetime.strptime(item.find('{http://www.w3.org/2005/Atom}updated').text, 
																		"%Y-%m-%dT%H:%M:%SZ")
						it_sum = item.find('{http://www.w3.org/2005/Atom}summary')
						it_cont = item.find('{http://www.w3.org/2005/Atom}content')
						if it_sum is not None:
							feedItem.summary = it_sum.text
						elif it_cont is not None:
							feedItem.summary = it_cont.text
						else:
							feedItem.summary = ""
						#self.response.out.write((i, it_sum, it_cont.text))
						#self.response.out.write(feedItem.summary)'''
			else:
				feed = feedset[0]
			
				#check if user is already registered for this feed
				ufset = models.UserFeed().all().filter('user = '******'feed = ', feed).count(1)
			
			if (not feedset) or (not ufset):
				uf = models.UserFeed()
				uf.user = user
				uf.feed = feed
				uf.interval = int(interval)
				uf.currentItem = 0
				uf.lastUpdated = datetime.now()
				uf.put()
				self.redirect('/')
			else:
				self.redirect('/')
				#eventually push an error 'already subscribed'.
		else:
			self.redirect('/')
Ejemplo n.º 11
0
def execute_send_at(myServerHost, myServerPort, myServerProtocol,
                    myServerMHSID, myServerSite, sites, filterSites, mhsSites,
                    issueTime, countDict, fname, xmlIncoming, xmtScript):
    logger.info('reqSite= ' + repr(sites))
    logger.info('filterSites= ' + repr(filterSites))
    logger.info('mhsSite= ' + repr(mhsSites))
    logger.info('reqCountDict= ' + repr(countDict))
    if issueTime is None:
        logger.info('reqIssueTime= None')
    else:
        logger.info('reqIssueTime= ' + str(issueTime) + ' ' +
                    time.asctime(time.gmtime(issueTime)))

    irt = IrtAccess.IrtAccess("")
    myServer = {
        'mhsid': myServerMHSID,
        'host': myServerHost,
        'port': myServerPort,
        'protocol': myServerProtocol,
        'site': myServerSite
    }
    logger.info('MyServer: ' + irt.printServerInfo(myServer))

    #--------------------------------------------------------------------
    # Prepare the file for sending
    #--------------------------------------------------------------------
    with open(fname, 'rb') as fd:
        buf = fd.read()
    os.remove(fname)
    table = cPickle.loads(buf)  #unpickle it
    logger.info("Local Table Length= " + str(len(table)))

    filtTable = []
    # filter by sites listing
    if filterSites:
        tpcSites = JUtil.javaObjToPyVal(
            JavaVTECPartners.getInstance(myServerSite).getTpcSites())
        spcSites = JUtil.javaObjToPyVal(
            JavaVTECPartners.getInstance(myServerSite).getSpcSites())

        sites4 = [VTECPartners.get4ID(s) for s in sites]
        for t in table:
            if t['oid'] in filterSites or t['oid'] in sites4 or \
              t['oid'] in tpcSites or t['oid'] in spcSites:
                filtTable.append(t)
    else:
        filtTable = table  #no filtering
    logger.info("Site Filtered Table Length= " + str(len(filtTable)))

    # eliminate obsolete records
    ctime = time.time()  #now time
    vts = VTECTableSqueeze.VTECTableSqueeze(ctime)
    filtTable = rename_fields_for_A2(filtTable)
    actTable, tossRecords = vts.squeeze(filtTable)
    actTable = rename_fields_for_A1(actTable)
    logger.info("Squeezed Table Length= " + str(len(actTable)))

    # check issuance time - any times newer in remote table (this table) than
    # the local table (requesting site)?
    if issueTime is not None:
        newerRec = False
        newestRec = 0.0
        for t in actTable:
            if newestRec < t['issueTime']:
                newestRec = t['issueTime']
        if issueTime < newestRec:
            newerRec = True

        logger.info("NewestFound= " + str(newestRec) + ' ' +
                    time.asctime(time.gmtime(newestRec)))
        logger.info("IssueTime check.  Newer record found=  " + str(newerRec))
    else:
        newerRec = True  #just assume there are newer records

    # check "counts" for number of records.  Any more records means that
    # we may be missing some records.
    if countDict:
        missingRec = False
        localCountDict = {}
        for t in actTable:
            if localCountDict.has_key(t['oid']):
                localCountDict[t['oid']] = localCountDict[t['oid']] + 1
            else:
                localCountDict[t['oid']] = 1
        for site in localCountDict:
            reqCount = countDict.get(site, 0)  #number in requesting site
            if reqCount != localCountDict[
                    site]:  #records different in request site
                missingRec = True
                break
        logger.info("MissingRec check. Missing record found= " +
                    str(missingRec))
        logger.info("lclCountBySite= " + repr(localCountDict))
        logger.info("reqCountBySite= " + repr(countDict))
    else:
        missingRec = True  #just assume there are

    #should we send?
    if missingRec or newerRec or FORCE_SEND:
        sendIt = True
    else:
        sendIt = False

    if sendIt:
        actTablePickled = cPickle.dumps(actTable)  #repickle it
        rawSize = len(actTablePickled)

        #output it as gzipped
        fname = fname + ".gz"
        with gzip.open(fname, 'wb', 6) as fd:
            fd.write(actTablePickled)

        gzipSize = os.stat(fname)[stat.ST_SIZE]
        logger.info('#dataSize: ' + str(rawSize) + ', #gzipSize: ' +
                    str(gzipSize))

        #--------------------------------------------------------------------
        # Create the destination XML file
        #--------------------------------------------------------------------
        iscOut = ElementTree.Element('isc')
        irt.addSourceXML(iscOut, myServer)

        destServers = []

        if xmlIncoming is not None:
            with open(xmlIncoming, 'rb') as fd:
                xml = fd.read()
            os.remove(xmlIncoming)
            reqTree = ElementTree.ElementTree(ElementTree.XML(xml))
            sourceE = reqTree.find('source')
            for addressE in sourceE.getchildren():
                destServer = irt.decodeXMLAddress(addressE)
                if destServer is None:
                    continue
                destServers.append(destServer)
                break

        # no XML received on request, this is probably from an older site.
        # create a dummy response XML file. Try all combinations.  Only
        # the mhsid is important for older sites.
        else:
            servers = []
            for mhss in mhsSites:
                for port in xrange(98000000, 98000002):
                    for site in sites:
                        for host in ['dx4f', 'px3']:
                            destServer = {
                                'mhsid': mhss,
                                'host': host,
                                'port': port,
                                'protocol': "20070723",
                                'site': site
                            }
                            destServers.append(destServer)

        irt.addDestinationXML(iscOut, destServers)  #add the dest server xml

        # print out destinations
        s = "Destinations:"
        for destServer in destServers:
            s += "\n" + irt.printServerInfo(destServer)
        logger.info(s)

        # create XML file
        tempdir = os.path.join(siteConfig.GFESUITE_HOME, "products", "ATBL")
        with tempfile.NamedTemporaryFile(suffix='.xml',
                                         dir=tempdir,
                                         delete=False) as fd:
            fnameXML = fd.name
            fd.write(ElementTree.tostring(iscOut))

        #--------------------------------------------------------------------
        # Send it
        #--------------------------------------------------------------------
        irt.transmitFiles("PUT_ACTIVE_TABLE2", mhsSites, myServerSite,
                          [fname, fnameXML], xmtScript)

    else:
        logger.info("Send has been skipped")
Ejemplo n.º 12
0
script = """
    <script type="text/ecmascript">
    <![CDATA[
    
    function init(evt) {
        if ( window.svgDocument == null ) {
            svgDocument = evt.target.ownerDocument;
            }
        }
        
    function ShowTooltip(obj) {
        var cur = obj.id.slice(-1);
        
        var tip = svgDocument.getElementById('tooltip_' + cur);
        tip.setAttribute('visibility',"visible")
        }
        
    function HideTooltip(obj) {
        var cur = obj.id.slice(-1);
        var tip = svgDocument.getElementById('tooltip_' + cur);
        tip.setAttribute('visibility',"hidden")
        }
        
    ]]>
    </script>
    """

# Insert the script at the top of the file and save it.
tree.insert(0, ET.XML(script))
ET.ElementTree(tree).write('svg_tooltip.svg')
Ejemplo n.º 13
0
def NewXMLTree(rootType="Problem"):
    tree = ElementTree.ElementTree(ElementTree.XML("<" + rootType + "/>"))
    return tree
Ejemplo n.º 14
0
        print >> sys.stderr, "%d total warnings, %d hidden" % (
            warning_count, warning_count - warning_max)


# define lambda
n = lambda x: "{http://schemas.openxmlformats.org/spreadsheetml/2006/main}%s" % x

# parse multiple xlsx files
sys.stderr.write("Processing xlsx file(s)...\n")
for fname in sys.argv[1:]:
    sys.stderr.write(" %s\n" % fname)
    # prepare output directory
    outdir = fname + "_sheets"
    if not os.path.isdir(outdir):
        os.makedirs(outdir)
    # load file
    z = zipfile.ZipFile(fname)
    # get sheets
    sheet_filenames = [
        f for f in z.namelist() if re.search("^xl/worksheets/sheet.*xml$", f)
    ]
    workbook_x = ET.XML(z.read("xl/workbook.xml"))
    sheet_xs = workbook_x.find(n("sheets")).findall(n("sheet"))
    # save sheets
    for sheet_num, x in enumerate(sheet_xs, 1):
        name = x.get('name')
        outfn = os.path.join(outdir, name) + '.tsv'
        sys.stderr.write("  %s %s > %s\n" % (sheet_num, name, outfn))
        out = open(outfn, "w")  #sys.stdout
        read_sheet_and_save(z, sheet_num, out)
Ejemplo n.º 15
0
def main():

   # Process command line args
   parseArgs()

   print "UMS Server:", host + ", Destination:", dst + ", Domain:", domain
   print

   # Login to UMS
   url = DEFAULT_CONTEXT_ROOT + "/simple?service=login" + \
      "&user="******"&password="******"")
   if rtnCode == -1:
      print "Failed to login to UMS server."
      sys.exit(1)
   conn.close()

   # Create a SOAP envelope
   (envelope, body) = buildSOAPEnvelope(sid)

   # Get a message from the server (if any)
   url = DEFAULT_CONTEXT_ROOT + "/xml"
   total = 0
   while 1:
      # Send request
      (rtnCode, xmlData) = doSend(url, envelope)
      if rtnCode == -1:
         break

      if len(xmlData) > 0:
         fileNameAttr = ""
         msg = ""

         root = etree.XML(xmlData)
         for e in root.getiterator():
            if (e.tag == body.tag and e.text != "None"):
               msg = e.text
            elif (e.tag == "File" and e.text != "None"):
               fileNameAttr = e.get("name")
               msg = e.text
 
         # For a binary message, i.e. a file, save the file in the current
         # working directory; otherwise print the text message received.
         if not (msg is None or len(msg) == 0):
            total += 1
            currentTime = datetime.datetime.now()
            if len(fileNameAttr) > 0:
               (file, ext) = os.path.splitext(fileNameAttr)
               fileName = file + "_" + str(long(time.time())) + ext
               data = base64.b64decode(msg)
               print currentTime.strftime("[%d/%m/%Y:%H:%M:%S ") + \
                  str(total) + "]: Received: " + fileNameAttr + \
                  " (" + str(len(data)) + " bytes" + ")"
               print "\tSaved File As: " + fileName
               tmpFile = open(fileName, "wb")
               tmpFile.write(data)
               tmpFile.close()
            else:
               print currentTime.strftime("[%d/%m/%Y:%H:%M:%S ") + \
                  str(total) + "]: Received: " + msg

   # Close the UMS session
   if len(sid) > 0:
      print
      print "Closing UMS connection, please wait..."

      conn = httplib.HTTPConnection(host)
      url = DEFAULT_CONTEXT_ROOT + "/simple?service=close&sid=" + sid
      (rtnCode, respMsg) = doPost(conn, url, "")
      conn.close()
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

import xml.etree.ElementTree as ET
from os.path import dirname, realpath, join

# Derive the POM path from the current script location
TOP_LEVEL_PATH = dirname(dirname(dirname(realpath(__file__))))
POM_PATH = join(TOP_LEVEL_PATH, 'pom.xml')

root = ET.XML(open(POM_PATH).read())
print(root.find('{http://maven.apache.org/POM/4.0.0}version').text)
Ejemplo n.º 17
0
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:lichengbing

# xml是实现不同语言和程序之间进行数据交换的协议

from xml.etree import ElementTree as ET

# xml有两个常见格式
# 1)直接读取字符串格式的xml
str_xml = open('xo.xml', 'r').read()
root = ET.XML(str_xml)  # 这里没有建立 xml tree 所以不能直接将内存中的xml写回文件

# 2)读取xml格式文件
# tree = ET.parse('xo.xml')  # 首先建立了一个 xml tree 对象
# root = tree.getroot()
# print(root)  # 获取根节点
# print(root.tag)  # 取根节点名
# print(root.attrib)  # 获取节点属性

# 3) 遍历多层xml
for child in root:
    print(child.tag, child.attrib)
    for child_second in child:
        print(child_second.tag, child_second.text)  # child_second.text 节点内容

# 4) 遍历指定的节点
for node in root.iter('year'):
    print(node.tag, node.text)

# 5) 修改节点内容
Ejemplo n.º 18
0
def convert_file(path):
    substitute_regex = re.compile('\(\d*\)')
    tree = etree.parse(path)
    root = tree.getroot()
    root_topic = root.find('.//topic')
    title = root.find('.//topic/title')
    title.text = substitute_regex.sub('', title.text).replace(
        'couchbase-cli-', '')
    to_remove = [
        root.find('.//topic/topic[@id="_see_also"]'),
        root.find('.//topic/topic[@id="_couchbase_cli"]')
    ]

    name_topic = root.find('.//topic/topic[@id="_name"]')
    name_topic_content = root.find('.//topic/topic[@id="_name"]/body/p')
    short_desc = name_topic_content.text.split(' - ', 1)[1]
    root_topic.remove(name_topic)

    synopsis_topic = root.find('.//topic/topic[@id="_synopsis"]/body')
    synopsis_lq = root.find('.//topic/topic[@id="_synopsis"]/body/lq')
    synopsis_lines = root.find(
        './/topic/topic[@id="_synopsis"]/body/lq/lines/*')

    synopsis_content = etree.tostring(synopsis_lines,
                                      encoding="unicode").replace('\n', '')
    synopsis_content = ' '.join(synopsis_content.split())

    syntax_codeblock = etree.XML(
        '<codeblock outputclass="language-bash">{}</codeblock>'.format(
            synopsis_content))
    synopsis_topic.remove(synopsis_lq)
    synopsis_topic.append(syntax_codeblock)
    synopsis_title = root.find('.//topic/topic[@id="_synopsis"]/title')
    synopsis_title.text = 'SYNTAX'

    links = root.findall('.//topic/topic[@id="_see_also"]/body/p/cite//ph')
    links_to_create = []
    for child in links:
        try:
            int(child.text)
        except ValueError:
            href = substitute_regex.sub('', child.text)

            if href == 'couchbase-cli':
                continue

            if not href.startswith('couchbase-cli-'):
                href = href.replace('couchbase', 'couchbase-cli')
            links_to_create.append(href)

    for element in to_remove:
        root_topic.remove(element)

    for topic in root_topic.findall('./topic'):
        topic_body = topic.find('./body')
        section = etree.Element('section')
        section.append(topic.find('./title'))
        section.extend(topic_body)
        root_topic.remove(topic)
        root_topic.find('./body').append(section)

    root_topic.remove(root_topic.find('./prolog'))
    related_links = etree.Element('related-links')

    for link in links_to_create:
        link_element = etree.XML('<link href="{}.dita"/>'.format(link))
        related_links.append(link_element)

    short_desc = etree.XML('<shortdesc>{}</shortdesc>'.format(short_desc))
    root_topic.insert(1, short_desc)
    root_topic.append(related_links)

    result = '<?xml version="1.0" standalone="no"?>'
    result += '<!DOCTYPE dita PUBLIC "-//OASIS//DTD DITA Composite//EN" "../../../../dtd/ditabase.dtd">'
    result += etree.tostring(root, encoding="unicode")
    return result
Ejemplo n.º 19
0
def list_shows(params):
    """Build shows listing"""
    shows = []

    if 'list_shows_without_categories' in params.next:

        # Pour avoir toutes les videos
        state_video = 'Toutes les videos (sans les categories)'

        shows.append({
            'label':
            state_video,
            'url':
            common.PLUGIN.get_url(
                module_path=params.module_path,
                module_name=params.module_name,
                action='replay_entry',
                state_video=state_video,
                next='list_videos_1',
                # title_category=category_name,
                window_title=state_video)
        })

    else:
        unique_item = dict()

        file_path = utils.download_catalog(
            URL_COLLECTION_API % params.channel_name,
            '%s_collection.xml' % params.channel_name,
        )
        collection_xml = open(file_path).read()

        xml_elements = ET.XML(collection_xml)

        if 'list_shows_1' in params.next:
            # Build categories list (Tous les programmes, Séries, ...)
            collections = xml_elements.findall("collection")

            # Pour avoir toutes les videos, certaines videos ont des
            # categories non presentes dans cette URL 'url_collection_api'
            state_video = 'Toutes les videos'

            shows.append({
                'label':
                state_video,
                'url':
                common.PLUGIN.get_url(
                    module_path=params.module_path,
                    module_name=params.module_name,
                    action='replay_entry',
                    state_video=state_video,
                    next='list_videos_1',
                    # title_category=category_name,
                    window_title=state_video)
            })

            for collection in collections:

                category_name = collection.findtext("category").encode('utf-8')
                if category_name not in unique_item:
                    if category_name == '':
                        category_name = 'NO_CATEGORY'
                    unique_item[category_name] = category_name
                    shows.append({
                        'label':
                        category_name,
                        'url':
                        common.PLUGIN.get_url(
                            module_path=params.module_path,
                            module_name=params.module_name,
                            action='replay_entry',
                            category_name=category_name,
                            next='list_shows_programs',
                            # title_category=category_name,
                            window_title=category_name)
                    })

        elif 'list_shows_programs' in params.next:
            # Build programm list (Tous les programmes, Séries, ...)
            collections = xml_elements.findall("collection")

            state_video = 'VIDEOS_BY_CATEGORY'

            for collection in collections:
                if params.category_name == collection.findtext(
                        "category").encode('utf-8') \
                        or (params.category_name == 'NO_CATEGORY' and
                            collection.findtext("category").encode('utf-8') == ''):
                    name_program = collection.findtext("name").encode('utf-8')
                    img_program = collection.findtext("picture")
                    id_program = collection.get("id")

                    shows.append({
                        'label':
                        name_program,
                        'thumb':
                        img_program,
                        'url':
                        common.PLUGIN.get_url(
                            module_path=params.module_path,
                            module_name=params.module_name,
                            action='replay_entry',
                            next='list_videos_1',
                            state_video=state_video,
                            id_program=id_program,
                            # title_program=name_program,
                            window_title=name_program)
                    })

    return common.PLUGIN.create_listing(
        shows,
        sort_methods=(common.sp.xbmcplugin.SORT_METHOD_UNSORTED,
                      common.sp.xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE),
        category=common.get_window_title())
Ejemplo n.º 20
0
def compat_etree_fromstring(text):
    return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
Ejemplo n.º 21
0
def make_text_def(class_name, parent, text):
    text = html_escape(text)
    pos = 0
    while (True):
        pos = text.find("[", pos)
        if (pos == -1):
            break

        endq_pos = text.find("]", pos + 1)
        if (endq_pos == -1):
            break

        pre_text = text[:pos]
        post_text = text[endq_pos + 1:]
        tag_text = text[pos + 1:endq_pos]

        if (tag_text in class_names):
            if (single_page):
                tag_text = '<a href="#' + tag_text + '">' + tag_text + '</a>'
            else:
                tag_text = '<a href="' + tag_text + '.html">' + tag_text + '</a>'
        else:  #command
            cmd = tag_text
            space_pos = tag_text.find(" ")
            if (cmd.find("html") == 0):
                cmd = tag_text[:space_pos]
                param = tag_text[space_pos + 1:]
                tag_text = "<" + param + ">"
            elif (cmd.find("method") == 0):
                cmd = tag_text[:space_pos]
                param = tag_text[space_pos + 1:]

                if (not single_page and param.find(".") != -1):
                    class_param, method_param = param.split(".")
                    tag_text = tag_text = '<a href="' + class_param + '.html#' + class_param + "_" + method_param + '">' + class_param + '.' + method_param + '()</a>'
                else:
                    tag_text = tag_text = '<a href="#' + class_name + "_" + param + '">' + class_name + '.' + param + '()</a>'
            elif (cmd.find("image=") == 0):
                print("found image: " + cmd)
                tag_text = "<img src=" + cmd[6:] + "/>"
            elif (cmd.find("url=") == 0):
                tag_text = "<a href=" + cmd[4:] + ">"
            elif (cmd == "/url"):
                tag_text = "</a>"
            elif (cmd == "center"):
                tag_text = "<div align=\"center\">"
            elif (cmd == "/center"):
                tag_text = "</div>"
            elif (cmd == "br"):
                tag_text = "<br/>"
            elif (cmd == "i" or cmd == "/i" or cmd == "b" or cmd == "/b"
                  or cmd == "u" or cmd == "/u"):
                tag_text = "<" + tag_text + ">"  #html direct mapping
            else:
                tag_text = "[" + tag_text + "]"

        text = pre_text + tag_text + post_text
        pos = len(pre_text) + len(tag_text)

    #tnode = ET.SubElement(parent,"div")
    #tnode.text=text
    text = "<div class=\"description\">" + text + "</div>"
    try:
        tnode = ET.XML(text)
        parent.append(tnode)
    except:
        print("Error parsing description text: '" + text + "'")
        sys.exit(255)

    return tnode
Ejemplo n.º 22
0
 def __init__(self, xml_data):
     self.root = ET.XML(xml_data)
Ejemplo n.º 23
0
    def payload(self, place=None, parameter=None, value=None, newValue=None, where=None):
        """
        This method replaces the affected parameter with the SQL
        injection statement to request
        """

        if conf.direct:
            return self.payloadDirect(newValue)

        retVal = ""

        if where is None and isTechniqueAvailable(kb.technique):
            where = kb.injection.data[kb.technique].where

        if kb.injection.place is not None:
            place = kb.injection.place

        if kb.injection.parameter is not None:
            parameter = kb.injection.parameter

        paramString = conf.parameters[place]
        paramDict = conf.paramDict[place]
        origValue = paramDict[parameter]

        if place == PLACE.URI:
            origValue = origValue.split(CUSTOM_INJECTION_MARK_CHAR)[0]
            origValue = origValue[origValue.rfind('/') + 1:]
            for char in ('?', '=', ':'):
                if char in origValue:
                    origValue = origValue[origValue.rfind(char) + 1:]
        elif place == PLACE.CUSTOM_POST:
            origValue = origValue.split(CUSTOM_INJECTION_MARK_CHAR)[0]
            origValue = extractRegexResult(r"(?s)(?P<result>(\W+\Z|\w+\Z))", origValue)

        if value is None:
            if where == PAYLOAD.WHERE.ORIGINAL:
                value = origValue
            elif where == PAYLOAD.WHERE.NEGATIVE:
                if conf.invalidLogical:
                    match = re.search(r'\A[^ ]+', newValue)
                    newValue = newValue[len(match.group() if match else ""):]
                    value = "%s%s AND %s=%s" % (origValue, match.group() if match else "", randomInt(2), randomInt(2))
                elif conf.invalidBignum:
                    value = "%d.%d" % (randomInt(6), randomInt(1))
                else:
                    if newValue.startswith("-"):
                        value = ""
                    else:
                        value = "-%s" % randomInt()
            elif where == PAYLOAD.WHERE.REPLACE:
                value = ""
            else:
                value = origValue

            newValue = "%s%s" % (value, newValue)

        newValue = self.cleanupPayload(newValue, origValue)

        if place == PLACE.SOAP:
            root = ET.XML(paramString)
            iterator = root.getiterator(parameter)

            for child in iterator:
                child.text = self.addPayloadDelimiters(newValue)

            retVal = ET.tostring(root)
        elif place in (PLACE.URI, PLACE.CUSTOM_POST):
            retVal = paramString.replace("%s%s" % (origValue, CUSTOM_INJECTION_MARK_CHAR), self.addPayloadDelimiters(newValue))
        elif place in (PLACE.UA, PLACE.REFERER, PLACE.HOST):
            retVal = paramString.replace(origValue, self.addPayloadDelimiters(newValue))
        else:
            retVal = paramString.replace("%s=%s" % (parameter, origValue),
                                           "%s=%s" % (parameter, self.addPayloadDelimiters(newValue)))

        return retVal
Ejemplo n.º 24
0
#!/usr/bin/python
import xml.etree.ElementTree as ET

# XML to add
mysql_xml_string = \
    "<dependency>" \
        "<groupId>org.mariadb.jdbc</groupId>" \
        "<artifactId>mariadb-java-client</artifactId>" \
        "<version>2.3.0</version>" \
        "<type>jar</type>" \
        "<scope>compile</scope>" \
    "</dependency>"

# Register xml namespaces and parse
ET.register_namespace('', 'http://maven.apache.org/POM/4.0.0')
ET.register_namespace('xsi', 'http://www.w3.org/2001/XMLSchema-instance')

tree = ET.parse('pom.xml')
root = tree.getroot()

# Search for "dependencies" tag and add mysql dependency
for child in root:
    simplified_tag = (child.tag).split("}")[1]
    if simplified_tag == "dependencies":
        child.append(ET.XML(mysql_xml_string))
        break

tree.write('pom.xml')
Ejemplo n.º 25
0
def complete(complete_output_var, output_var, base_var , alter_var, collapse_var):
    complete_output = vim.eval(complete_output_var)
    base = vim.eval(base_var)
    alter_sig = vim.eval(alter_var) != '0'
    collapse_overload = vim.eval(collapse_var) != '0'
    if complete_output is None: complete_output = ''
    completes = []

    # wrap in a tag to prevent parsing errors
    root = ET.XML("<output>" + complete_output + "</output>")

    fields = root.findall("list/i")
    types = root.findall("type")
    completes = []

    if len(fields) > 0: # field completion
        def fieldxml2completion(x):
            word = x.attrib["n"]
            menu = x.find("t").text
            info = x.find("d").text
            menu = '' if menu is None else menu
            if info is None:
                info = ['']
            else:
                # get rid of leading/trailing ws/nl
                info = info.strip()
                # strip html
                info = remove_html_markup(info)
                # split and collapse extra whitespace
                info = [re.sub(r'\s+',' ',s.strip()) for s in info.split('\n')]

            abbr = word
            kind = 'v'
            if  menu == '': kind = 'm'
            elif re.search("\->", menu):
                kind = 'f' # if it has a ->
                if alter_sig:
                    menu = alter_signature(menu)
                word += "("

            return {  'word': word, 'info': info, 'kind': kind
                    ,'menu': menu, 'abbr': abbr, 'dup':1 }

        completes = map(fieldxml2completion, fields)
    elif len(types) > 0: # function type completion
        otype = types[0].text.strip()
        h = HTMLParser.HTMLParser()
        word = ' '
        info = [h.unescape(otype)]
        abbr = info[0]
        if otype == "Dynamic":
            completes = [{'word': word,'info':info
                        , 'abbr': "Dynamic (Will likely cause compiler error.)"
                        , 'dup':1}
                        ]
        elif alter_sig:
            abbr = alter_signature(abbr)
            completes= [{'word': word,'info':info, 'abbr':abbr, 'dup':1}]

    if base != '' and base is not None:
        completes = [c for c in completes if re.search("^" + base, c['word'])]

    if collapse_overload:
        dict_complete = dict()
        def complete_exists(c):
            if c in dict_complete:
                dict_complete[c] += 1
                return True
            else:
                dict_complete[c] = 1
                return False
        completes = [c for c in completes if not complete_exists(c['abbr'])]
        for c in completes:
            if dict_complete[c['abbr']] > 1:
                c['menu'] = "@:overload " + c['menu']

    vim.command("let " + output_var + " = " + json.dumps(completes))
Ejemplo n.º 26
0
 def query_data(self):
     OOoOO00OOO0OO = d_t.datetime.now(pytz.timezone(self.time_zone))
     iI1I111Ii111i = int(OOoOO00OOO0OO.strftime("%Y%m%d"))
     I11IiI1I11i1i = int(OOoOO00OOO0OO.strftime("%H%M"))
     if I11IiI1I11i1i < 400:
         if 38 - 38: Ii1I
         iI1I111Ii111i = int(
             (OOoOO00OOO0OO - d_t.timedelta(days=1)).strftime("%Y%m%d"))
         if 57 - 57: ooOoO / O00oOoOoO0o0O * ooO / I11i.o0
     i1iIIi1 = {'in': [], 'pre': [], 'end': []}
     i11iIIIIIi1 = self.league
     try:
         iiII1i1 = urllib2.urlopen(self.url % (i11iIIIIIi1, iI1I111Ii111i))
         o00oOO0o = iiII1i1.read()
         iiII1i1.close()
         OOO00O = o00oOO0o.replace('shsMSNBCTicker.loadGamesData(',
                                   '').replace(');', '')
         OOoOO0oo0ooO = json.loads(OOO00O)
         for O0o0O00Oo0o0 in OOoOO0oo0ooO.get('games', []):
             O00O0oOO00O00 = Et.XML(O0o0O00Oo0o0)
             if 11 - 11: i11Ii11I1Ii1i.iii1I1I
             if i11iIIIIIi1 == 'EPL':
                 o0oo0oOo = O00O0oOO00O00.find('home-team')
                 o000O0o = O00O0oOO00O00.find('visiting-team')
             else:
                 o0oo0oOo = O00O0oOO00O00.find('visiting-team')
                 o000O0o = O00O0oOO00O00.find('home-team')
             iI1iII1 = O00O0oOO00O00.find('gamestate')
             oO0OOoo0OO = o000O0o.get('nickname')
             O0 = o000O0o.get('alias')
             ii1ii1ii = o000O0o.get('score')
             oooooOoo0ooo = o0oo0oOo.get('nickname')
             I1I1IiI1 = o0oo0oOo.get('alias')
             III1iII1I1ii = o0oo0oOo.get('score')
             oOOo0 = iI1iII1.get('status')
             oo00O00oO = int(
                 time.mktime(
                     time.strptime(
                         '%s %d' % (iI1iII1.get('gametime'), iI1I111Ii111i),
                         '%I:%M %p %Y%m%d')))
             if 23 - 23: OOooOOo + OOooOOo.O0oo0OO0
             if oOOo0 == 'In-Progress':
                 ii1ii11IIIiiI = 'in'
             elif oOOo0 == 'Pre-Game':
                 ii1ii11IIIiiI = 'pre'
             else:
                 ii1ii11IIIiiI = 'end'
             i1iIIi1[ii1ii11IIIiiI].append({
                 'league':
                 i11iIIIIIi1.upper(),
                 'orig-start':
                 oo00O00oO,
                 'start':
                 self._to_local_time(oo00O00oO).strftime("%I:%M %p").lstrip(
                     '0'),
                 'home':
                 oO0OOoo0OO,
                 'home-alias':
                 O0.upper(),
                 'home-guide':
                 self._get_mapped_team(i11iIIIIIi1, O0.upper()),
                 'away':
                 oooooOoo0ooo,
                 'away-alias':
                 I1I1IiI1.upper(),
                 'away-guide':
                 self._get_mapped_team(i11iIIIIIi1, I1I1IiI1.upper()),
                 'home-score':
                 ii1ii1ii,
                 'away-score':
                 III1iII1I1ii,
                 'status':
                 oOOo0,
                 'clock':
                 iI1iII1.get('display_status1'),
                 'clock-section':
                 iI1iII1.get('display_status2')
             })
     except Exception, O00OOOoOoo0O:
         xbmc.log(
             "[%s] ERROR: %s" %
             (ADDON.getAddonInfo('id'), O00OOOoOoo0O.message),
             xbmc.LOGERROR)
         if 77 - 77: II % II * O00oOoOoO0o0O - i11iIiiIii
Ejemplo n.º 27
0
        url = 'http://{}:{}/ClientServer/loginserver/rest/client/requestDepAndPer'.format( ServerIp, ServerPort)
        req = urllib2.Request(url, data=post_data, headers={'Content-Type': 'text/Plain'})
        try:
            sc_get_req = urllib2.urlopen(req)
            content = sc_get_req.read()
            logging.debug("服务器返回值: {}".format(content))
        except urllib2.HTTPError, e:
            logging.error(e)
        except urllib2.URLError, e:
            logging.error(e)
        except Exception, e:
            logging.error(e)
    except Exception as e:
        logging.error(e, exc_info=True)
    try:
        root = ET.XML(content)
        lists = root.findall("Body/Respond/DepList/")  #在当前指定目录下遍历
        department =[]
        for list in lists:
            department.append(list.find("DepName").text)
            logging.debug("获取部门: {}".format(department))
        return  department
    except Exception as e:
        logging.error(e, exc_info=True)



if __name__ == '__main__':
    lists = query_unitinfo_request("192.168.28.88","8089")
    for list in lists:
        print list
Ejemplo n.º 28
0
def processFeeds(f, entries, offset, totalEntries):
    entryNumber = 0
    for entry in entries:
        #Buzz post starts
        f.write("<div class=\"buzz\">")

        #Process post date
        entryDate = entry.findall(namespace +
                                  'updated')[0].text.split('T')[0].split('-')
        prettyDate = "%s %s %s " % (entryDate[2], month_name[int(
            entryDate[1])], entryDate[0])
        f.write("<div class=\"postdate\"><b>" + prettyDate + "</b></div>")

        #Process post content
        post = entry.findall(namespace + 'content')[0].text
        f.write("<div class=\"post\">" + smart_str(post) + "</div>")

        #Process attachments
        f.write("<div class=\"attachments\">")
        attachments = entry.findall(namespace_activity +
                                    'object')[0].findall(namespace_buzz +
                                                         'attachment')

        for attachment in attachments:
            if attachment.findall(namespace + 'title'):
                title = attachment.findall(namespace +
                                           'title')[0].text + "<br>"
            else:
                title = ""
            if attachment.findall(namespace + 'content'):
                content = attachment.findall(namespace + 'content')[0].text
            else:
                content = ""
            href = ""
            links = attachment.findall(namespace + 'link')
            for link in links:
                if link.get('rel') == 'alternate':
                    href = link.get('href')
            f.write(
                "<div class=\"attachment\"><b><a href=\"%s\">%s</a></b>%s</div>"
                % (href, smart_str(title), smart_str(content)))
        f.write("</div")
        #Process Links
        postId = entry.findall(namespace + 'id')[0].text
        links = entry.findall(namespace + 'link')
        f.write("<div class=\"links\">")

        #Process likers feed
        for link in links:
            if link.get('rel') == (namespace_buzz[1:-1] + '#liked'):
                likeFeedUrl = 'http' + link.get(
                    'href')[5:] + '&max-results=100&bhu'
                rawLikedFeed = readFeed(likeFeedUrl)
                likersTree = ET.XML(rawLikedFeed)
                likeCounter = 0
                totalLikes = link.get(namespace_buzz + 'count')
                if int(totalLikes) != 0:
                    f.write(
                        "<div class=\"likers\"><strong>Liked by</strong> [%s]: "
                        % totalLikes)
                    likers = likersTree.findall(namespace_likers + 'entry')
                    for liker in likers:
                        likerName = liker.findall(namespace_likers +
                                                  'displayName')[0].text
                        likerUri = liker.findall(namespace_likers +
                                                 'profileUrl')[0].text
                        f.write("<a href=\"%s\">%s</a>" %
                                (likerUri, smart_str(likerName)))
                        likeCounter += 1
                        if likeCounter != len(likers):
                            f.write(", ")
                    f.write("</div>")

        #Process comment feed
        for link in links:
            if link.get('rel') == 'replies':
                replyCount = link.get(namespace_thr + 'count')
                if int(replyCount) != 0:
                    f.write(
                        "<div style=\"color:#111;\" class=\"comments\"><b>Comments(%s)</b><br>"
                        % replyCount)
                    replyFeedUrl = 'http' + link.get('href')[5:] + '&bhu'
                    rawReplyFeed = readFeed(replyFeedUrl)
                    comments = ET.XML(rawReplyFeed).findall(namespace +
                                                            'entry')
                    for comment in comments:
                        f.write(
                            "<div style=\"margin-left:20px;\" class=\"comment\">"
                        )
                        reply = comment.findall(namespace + 'content')[0].text
                        author = comment.findall(namespace + 'author')[0]
                        authorName = author.findall(namespace + 'name')[0].text
                        authorUri = author.findall(namespace + 'uri')[0].text
                        commentDate = entry.findall(namespace +
                                                    'updated')[0].text.split(
                                                        'T')[0].split('-')
                        prettyDate = "%s %s %s " % (commentDate[2], month_name[
                            int(commentDate[1])], commentDate[0])
                        f.write("<a href=\"%s\">%s</a> (%s) - %s" %
                                (authorUri, smart_str(authorName), prettyDate,
                                 smart_str(reply)))
                        f.write("</div>")
                    f.write("</div>")
        f.write("</div></div><br>")
        entryNumber += 1
        print "Progress: (%d/%d) %d%%" % (entryNumber + offset, totalEntries,
                                          (entryNumber + offset) * 100 /
                                          totalEntries)
Ejemplo n.º 29
0
	def readXML(self, typ):
		if typ == "l":
			self.showLog = True
			part = "status&appendlog=1"
		else:
			self.showLog = False
			part = None
		result = self.openWebIF(part)
		retval = []
		tmp = {}
		if result[0]:
			if not self.showLog:
				data = ElementTree.XML(result[1])
#				if typ=="version":
#					if "version" in data.attrib:
#						self.version = data.attrib["version"]
#					else:
#						self.version = "n/a"
#					return self.version
				status = data.find("status")
				clients = status.findall("client")
				for cl in clients:
					name = cl.attrib["name"]
					proto = cl.attrib["protocol"]
					if "au" in cl.attrib:
						au = cl.attrib["au"]
					else:
						au = ""
					caid = cl.find("request").attrib["caid"]
					srvid = cl.find("request").attrib["srvid"]
					if "ecmtime" in cl.find("request").attrib:
						ecmtime = cl.find("request").attrib["ecmtime"]
						if ecmtime == "0" or ecmtime == "":
							ecmtime = _("n/a")
						else:
							ecmtime = str(float(ecmtime) / 1000)[:5]
					else:
						ecmtime = "not available"
					srvname = cl.find("request").text
					if srvname is not None:
						if ":" in srvname:
							srvname_short = srvname.split(":")[1].strip()
						else:
							srvname_short = srvname
					else:
						srvname_short = _("n/a")
					login = cl.find("times").attrib["login"]
					online = cl.find("times").attrib["online"]
					if proto.lower() == "dvbapi":
						ip = ""
					else:
						ip = cl.find("connection").attrib["ip"]
						if ip == "0.0.0.0":
							ip = ""
					port = cl.find("connection").attrib["port"]
					connstatus = cl.find("connection").text
					if name != "" and name != "anonymous" and proto != "":
						try:
							tmp[cl.attrib["type"]].append((name, proto, "%s:%s" % (caid, srvid), srvname_short, ecmtime, ip, connstatus))
						except KeyError:
							tmp[cl.attrib["type"]] = []
							tmp[cl.attrib["type"]].append((name, proto, "%s:%s" % (caid, srvid), srvname_short, ecmtime, ip, connstatus))
			else:
				if b"<![CDATA" not in result[1]:
					tmp = result[1].replace("<log>", "<log><![CDATA[").replace("</log>", "]]></log>")
				else:
					tmp = result[1]
				data = ElementTree.XML(tmp)
				log = data.find("log")
				logtext = log.text
			if typ == "s":
				if "r" in tmp:
					for i in tmp["r"]:
						retval.append(i)
				if "p" in tmp:
					for i in tmp["p"]:
						retval.append(i)
			elif typ == "c":
				if "c" in tmp:
					for i in tmp["c"]:
						retval.append(i)
			elif typ == "l":
				tmp = logtext.split("\n")
				retval = []
				for i in tmp:
					tmp2 = i.split(" ")
					if len(tmp2) > 2:
						del tmp2[2]
						txt = ""
						for j in tmp2:
							txt += "%s " % j.strip()
						retval.append(txt)

			return retval

		else:
			return result[1]
Ejemplo n.º 30
0
 def test_can_be_created_from_xml(self):
     xml = ElementTree.XML(self.data['palette.xml'])
     base = cl.Base.from_xml(xml)