コード例 #1
0
ファイル: test_builders.py プロジェクト: mrjmad/creme_crm
    def test_myschema_xsd02(self):
        "With Document."
        body_map = {
            'user_id': 1, 'title': '',
            'description': '', 'linked_folder': '', 'filedata': '',
        }
        backend = self._get_backend(DocumentFakeBackend, subject='create_doc',
                                    body_map=body_map, model=Document
                                   )
        builder = self._get_builder(backend)
        xsd = '{http://www.w3.org/2001/XMLSchema}'

        content = builder._render_myschema_xsd(self.request)
        xml = XML(content)

        self.assertEqual(builder.namespace, xml.get('targetNamespace'))
        self.assertEqual(
            builder.namespace,
            re.search(r'xmlns:my="(?P<ns>[\w\d\-:/\.]*)"', content)['ns']
        )  # Can't be got with ElementTree, because it's a namespace

        ref_attrs = {
            node.get('ref')
            for node in xml.findall(f'{xsd}element/{xsd}complexType/{xsd}sequence/{xsd}element')
        }
        expected_ref_attrs = {f'my:{key}' for key in body_map}
        self.assertEqual(expected_ref_attrs, ref_attrs)

        xsd_elements = {
            'CremeCRMCrudity': {'name': 'CremeCRMCrudity'},
            # <xsd:element name="user_id" type="xsd:integer"/>
            'user_id': {'name': 'user_id', 'type': 'xsd:integer'},
            # <xsd:element name="first_name" type="xsd:requiredString"/>
            'title':   {'name': 'title', 'type': 'my:requiredString'},
            # <xsd:element name="description">
            #   <xsd:complexType mixed="true">
            #       <xsd:sequence>
            #           <xsd:any minOccurs="0" maxOccurs="unbounded"
            #                    namespace="http://www.w3.org/1999/xhtml" processContents="lax"/>
            #       </xsd:sequence>
            #   </xsd:complexType>
            # </xsd:element>
            'description': {'name': 'description'},
            'linked_folder': {'name': 'linked_folder', 'type': 'xsd:integer'},
            'filedata':    {'name': 'filedata', 'type': 'my:requiredBase64Binary'},
        }

        for element_node in xml.findall(f'{xsd}element'):
            xsd_element_attrs = xsd_elements.pop(element_node.get('name'))

            if xsd_element_attrs is None:
                self.fail(
                    f'There is at least an extra node named: {element_node.get("name")}'
                )

            self.assertSetEqual({*xsd_element_attrs.keys()}, {*element_node.keys()})

            for attr in element_node.keys():
                self.assertEqual(xsd_element_attrs[attr], element_node.get(attr))

        self.assertFalse(
            xsd_elements,
            f'The elements with the following names have not been found: {xsd_elements}'
        )
コード例 #2
0
ファイル: config.py プロジェクト: pezra/openmicroscopy
class ConfigXml(object):
    """
    dict-like wrapper around the config.xml file usually stored
    in etc/grid. For a copy of the dict, use "as_map"
    """
    KEY = "omero.config.version"
    VERSION = "4.2.1"
    INTERNAL = "__ACTIVE__"
    DEFAULT = "omero.config.profile"
    IGNORE = (KEY, DEFAULT)

    def __init__(self, filename, env_config = None, exclusive = True):
        self.logger = logging.getLogger(self.__class__.__name__)    #: Logs to the class name
        self.XML = None                                             #: Parsed XML Element
        self.filename = filename                                    #: Path to the file to be read and written
        self.env_config = Environment(env_config)                   #: Environment override
        self.exclusive = exclusive                                  #: Whether or not an exclusive lock should be acquired
        self.save_on_close = True

        try:
            # Try to open the file for modification
            # If this fails, then the file is readonly
            self.source = open(filename, "a+")                      #: Open file handle
            self.lock = self._open_lock()                           #: Open file handle for lock
        except IOError:
            self.logger.debug("open('%s', 'a+') failed" % filename)
            self.lock = None
            self.exclusive = False
            self.save_on_close = False

            # Before we open the file read-only we need to check
            # that no other configuration has been requested because
            # it will not be possible to modify the __ACTIVE__ setting
            # once it's read-only
            val = self.env_config.is_non_default()
            if val is not None:
                raise Exception("Non-default OMERO_CONFIG on read-only: %s" % val)

            self.source = open(filename, "r")                       #: Open file handle read-only

        if self.exclusive:  # must be "a+"
            try:
                portalocker.lock(self.lock, portalocker.LOCK_NB|portalocker.LOCK_EX)
            except portalocker.LockException, le:
                self.lock = None # Prevent deleting of the file
                self.close()
                raise

        self.source.seek(0)
        text = self.source.read()

        if text:
            self.XML = XML(text)
            try:
                self.version_check()
            except:
                self.close()
                raise

        # Nothing defined, so create a new tree
        if self.XML is None:
            default = self.default()
            self.XML = Element("icegrid")
            properties = SubElement(self.XML, "properties", id=self.INTERNAL)
            _ = SubElement(properties, "property", name=self.DEFAULT, value=default)
            _ = SubElement(properties, "property", name=self.KEY, value=self.VERSION)
            properties = SubElement(self.XML, "properties", id=default)
            _ = SubElement(properties, "property", name=self.KEY, value=self.VERSION)
コード例 #3
0
    def get_nightly_binary_path(self, nightly_date):
        if nightly_date is None:
            return
        if not nightly_date:
            print(
                "No nightly date has been provided although the --nightly or -n flag has been passed."
            )
            sys.exit(1)
        # Will alow us to fetch the relevant builds from the nightly repository
        os_prefix = "linux"
        if is_windows():
            os_prefix = "windows-msvc"
        if is_macosx():
            os_prefix = "mac"
        nightly_date = nightly_date.strip()
        # Fetch the filename to download from the build list
        repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly"
        req = urllib.request.Request("{}/{}/{}".format(repository_index,
                                                       os_prefix,
                                                       nightly_date))
        try:
            response = urllib.request.urlopen(req).read()
            tree = XML(response)
            namespaces = {'ns': tree.tag[1:tree.tag.index('}')]}
            file_to_download = tree.find('ns:Contents', namespaces).find(
                'ns:Key', namespaces).text
        except urllib.error.URLError as e:
            print(
                "Could not fetch the available nightly versions from the repository : {}"
                .format(e.reason))
            sys.exit(1)
        except AttributeError:
            print(
                "Could not fetch a nightly version for date {} and platform {}"
                .format(nightly_date, os_prefix))
            sys.exit(1)

        nightly_target_directory = path.join(self.context.topdir, "target")
        # ':' is not an authorized character for a file name on Windows
        # make sure the OS specific separator is used
        target_file_path = file_to_download.replace(':', '-').split('/')
        destination_file = os.path.join(nightly_target_directory,
                                        os.path.join(*target_file_path))
        # Once extracted, the nightly folder name is the tar name without the extension
        # (eg /foo/bar/baz.tar.gz extracts to /foo/bar/baz)
        destination_folder = os.path.splitext(destination_file)[0]
        nightlies_folder = path.join(nightly_target_directory, 'nightly',
                                     os_prefix)

        # Make sure the target directory exists
        if not os.path.isdir(nightlies_folder):
            print(
                "The nightly folder for the target does not exist yet. Creating {}"
                .format(nightlies_folder))
            os.makedirs(nightlies_folder)

        # Download the nightly version
        if os.path.isfile(path.join(nightlies_folder, destination_file)):
            print("The nightly file {} has already been downloaded.".format(
                destination_file))
        else:
            print("The nightly {} does not exist yet, downloading it.".format(
                destination_file))
            download_file(destination_file,
                          NIGHTLY_REPOSITORY_URL + file_to_download,
                          destination_file)

        # Extract the downloaded nightly version
        if os.path.isdir(destination_folder):
            print("The nightly folder {} has already been extracted.".format(
                destination_folder))
        else:
            self.extract_nightly(nightlies_folder, destination_folder,
                                 destination_file)

        return self.get_executable(destination_folder)
コード例 #4
0
ファイル: DBFix.py プロジェクト: deslona/evemu_personal
        typeID = typeID_List[index]
        typeID_Progress = 100.0 * ((1.0 * index) / typeID_List_length)

        if (verbose == 1):
            print "Getting eve-central data for typeID ", typeID_List[
                index], " - ", typeName_List[index]
        contents = eve_central_query_market_data(regionID, option)
        if (verbose == 1):
            print "Response from eve-central:"
            print contents
            print "END RESPONSE"

        if (contents[0:5] == "<?xml"):
            # Parse the XML response from EVE-Central
            # See usage here for ElementTree:  http://effbot.org/zone/element-index.htm
            tree = XML(contents)  # From a string
            if (verbose == 1):
                print "XML received from eve-central:"
                print tree
            #print "XML Element count = ", len(tree)

            # the tree root is the toplevel html element
            tree_map = dict((c, p) for p in tree.getiterator() for c in p)
            root = tree

            for c in root.getchildren():
                if (verbose == 1):
                    print c.tag
                for d in c.getchildren():
                    if (verbose == 1):
                        print "    ", d.tag
コード例 #5
0
 def add_filter_string(self, filter_str):
     filter = XML(filter_str)
     self.defs.append(filter)
コード例 #6
0
ファイル: soap.py プロジェクト: venkatesh22/sap_sf
    def updatequote(self, quote_id):
        logging.info("SAP is sending quote")
        logging.info("CONNECTING TO SALESFORCE PARTNER WSDL FOR SESSION ID")
        url = "https://login.salesforce.com/services/Soap/u/28.0"

        data = """<?xml version="1.0" encoding="UTF-8"?>
                <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn="urn:partner.soap.sforce.com">
                   <soapenv:Header>
                        <urn:CallOptions>
                            <urn:client></urn:client>
                            <urn:defaultNamespace></urn:defaultNamespace>
                        </urn:CallOptions>
                        <urn:LoginScopeHeader>
                            <urn:organizationId></urn:organizationId>
                            <urn:portalId></urn:portalId>
                        </urn:LoginScopeHeader>
                   </soapenv:Header>
                   <soapenv:Body>
                      <urn:login>
                          <urn:username>{{username}}</urn:username>
                          <urn:password>{{password}}</urn:password>
                      </urn:login>
                   </soapenv:Body>
                </soapenv:Envelope>"""
        t = Template(data)
        c = Context({
            #            "username": "******",
            #            "password": "******"
            "username": "******",
            "password": "******"
        })
        data = t.render(c)

        logging.info("SENDING:")
        logging.info(data)

        headers = {
            'Content-Type': 'text/xml; charset=utf-8',
            'SOAPAction': 'https://login.salesforce.com/services/Soap/u/28.0'
        }
        #        httplib2.debuglevel = 1

        head = httplib2.Http()
        #    head.follow_all_redirects = True
        response, content = head.request(url, "POST", smart_str(data), headers)
        logging.info("########### SESSION ID response ###############%s" %
                     response)
        logging.info("########## SESSION ID content ############## \n %s" %
                     pretty(content))
        if response.get('status') == '200':
            logging.info("GOT THE SESSION ID FROM SALESFORCE")
            xml = XML(content)
            session_response = xml.find(
                "{http://schemas.xmlsoap.org/soap/envelope/}Body").getchildren(
                )[0]
            session_id = session_response[0][4].text
            quote_id_to_sf(session_id, quote_id)
        else:
            return content

        return "OK"
コード例 #7
0
ファイル: test_evaluation.py プロジェクト: mr-c/galaxy
 def test_thresh_param(self):
     elem = XML('<param name="thresh" type="integer" value="5" />')
     return IntegerToolParameter(self, elem)
コード例 #8
0
    def gather(self, all_ips):

        try:
            print "Grabbing list of TOR exit nodes.."
            req = urllib2.Request(
                'http://torstatus.blutmagie.de/ip_list_exit.php/Tor_ip_list_EXIT.csv'
            )
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            tor_response = response.read()
        except NameError:
            tor_response = "Not able to grab information"
        except urllib2.HTTPError:
            tor_response = "Not able to grab information"

        try:
            print "Grabbing attacker IP list from the Animus project..."
            req = urllib2.Request(
                'https://raw.githubusercontent.com/animus-project/threat_data/master/master_lists/all_ips_frequency.txt'
            )
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            animus_lines = response.read()
        except NameError:
            animus_lines = "Not able to grab information"
        except urllib2.HTTPError:
            animus_lines = "Not able to grab information"

        try:
            print "Grabbing EmergingThreats list..."
            req = urllib2.Request(
                'http://rules.emergingthreats.net/blockrules/compromised-ips.txt'
            )
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            ethreats_response = response.read()
        except NameError:
            ethreats_response = "Not able to grab information"
        except urllib2.HTTPError:
            ethreats_response = "Not able to grab information"

        try:
            print "Grabbing AlienVault reputation list..."
            req = urllib2.Request(
                'http://reputation.alienvault.com/reputation.data')
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'
            )
            response = urllib2.urlopen(req)
            alientvault_resp = response.read()
        except NameError:
            alientvault_resp = "Not able to grab information"
        except urllib2.HTTPError:
            alientvault_resp = "Not able to grab information"

        try:
            print "Grabbing Blocklist.de info..."
            req = urllib2.Request(
                'http://www.blocklist.de/lists/bruteforcelogin.txt')
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            blocklist_resp = response.read()
        except NameError:
            blocklist_resp = "Not able to grab information"
        except urllib2.HTTPError:
            blocklist_resp = "Not able to grab information"

        try:
            print "Grabbing DragonResearch's SSH list..."
            req = urllib2.Request(
                'http://dragonresearchgroup.org/insight/sshpwauth.txt')
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            drag_ssh_resp = response.read()
        except NameError:
            drag_ssh_resp = "Not able to grab information"
        except urllib2.HTTPError:
            drag_ssh_resp = "Not able to grab information"

        try:
            print "Grabbing DragonResearch's VNC list..."
            req = urllib2.Request(
                'http://dragonresearchgroup.org/insight/vncprobe.txt')
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            drag_vnc_resp = response.read()
        except NameError:
            drag_vnc_resp = "Not able to grab information"
        except urllib2.HTTPError:
            drag_vnc_resp = "Not able to grab information"

        #try:
        #    print "Grabbing OpenBlock IP list..."
        #    req = urllib2.Request('http://www.openbl.org/lists/date_all.txt')
        #    req.add_header(
        #        'User-agent', 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0')
        #    response = urllib2.urlopen(req)
        #    openblock_resp = response.read()
        #except NameError:
        #    openblock_resp = "Not able to grab information"
        #except urllib2.HTTPError:
        #    openblock_resp = "Not able to grab information"

        try:
            print "Grabbing NoThinkMalware list..."
            req = urllib2.Request(
                'http://www.nothink.org/blacklist/blacklist_malware_http.txt')
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            ntmalware_resp = response.read()
        except NameError:
            ntmalware_resp = "Not able to grab information"
        except urllib2.HTTPError:
            ntmalware_resp = "Not able to grab information"

        try:
            print "Grabbing NoThinkSSH list..."
            req = urllib2.Request(
                'http://www.nothink.org/blacklist/blacklist_ssh_all.txt')
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            ntssh_resp = response.read()
        except NameError:
            ntssh_resp = "Not able to grab information"
        except urllib2.HTTPError:
            ntssh_resp = "Not able to grab information"

        try:
            print "Grabbing Feodo list..."
            req = urllib2.Request(
                'https://feodotracker.abuse.ch/blocklist/?download=ipblocklist'
            )
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            feodo_resp = response.read()
        except NameError:
            feodo_resp = "Not able to grab information"
        except urllib2.HTTPError:
            feodo_resp = "Not able to grab information"

        try:
            print "Grabbing antispam spam list..."
            req = urllib2.Request('http://antispam.imp.ch/spamlist')
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            antispam_resp = response.read()
        except NameError:
            antispam_resp = "Not able to grab information"
        except urllib2.HTTPError:
            antispam_resp = "Not able to grab information"

        try:
            print "Grabbing malc0de list..."
            req = urllib2.Request('http://malc0de.com/bl/IP_Blacklist.txt')
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            malc0de_resp = response.read()
        except NameError:
            malc0de_resp = "Not able to grab information"
        except urllib2.HTTPError:
            malc0de_resp = "Not able to grab information"

        try:
            print "Grabbing MalwareBytes list..."
            req = urllib2.Request('http://hosts-file.net/rss.asp')
            req.add_header(
                'User-agent',
                'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
            )
            response = urllib2.urlopen(req)
            malbytes_resp = response.read()
        except NameError:
            malbytes_resp = "Not able to grab information"
        except urllib2.HTTPError:
            malbytes_resp = "Not able to grab information"

        MISP_authkey = ""
        MISP_address = "https://misppriv.circl.lu"

        if MISP_authkey is "" or MISP_address is "":
            print "You did not define your MISP server address or Authorization Key"
        else:
            try:
                print "Grabbing MISP ip-dst @ " + MISP_address
                req = urllib2.Request(MISP_address +
                                      '/attributes/text/download/ip-dst')
                req.add_header(
                    'User-agent',
                    'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
                )
                req.add_header('Authorization', MISP_authkey)
                response = urllib2.urlopen(req)
                MISP_ip_dst_resp = response.read()
            except NameError:
                MISP_ip_dst_resp = "Not able to grab information"
            except urllib2.HTTPError:
                MISP_ip_dst_resp = "Not able to grab information"

            try:
                print "Grabbing MISP ip-src @ " + MISP_address
                req = urllib2.Request(MISP_address +
                                      '/attributes/text/download/ip-src')
                req.add_header(
                    'User-agent',
                    'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
                )
                req.add_header('Authorization', MISP_authkey)
                response = urllib2.urlopen(req)
                MISP_ip_src_resp = response.read()
            except NameError:
                MISP_ip_src_resp = "Not able to grab information"
            except urllib2.HTTPError:
                MISP_ip_src_resp = "Not able to grab information"

        for path, incoming_ip_obj in all_ips.iteritems():

            if incoming_ip_obj[0].ip_address != "":

                if incoming_ip_obj[0].tor_exit is "":
                    if incoming_ip_obj[0].ip_address in tor_response:
                        incoming_ip_obj[0].tor_exit = True
                    else:
                        incoming_ip_obj[0].tor_exit = False

                if incoming_ip_obj[0].animus_data is "":
                    if incoming_ip_obj[0].ip_address in animus_lines:
                        incoming_ip_obj[0].animus_data = True
                    else:
                        incoming_ip_obj[0].animus_data = False

                if incoming_ip_obj[0].emerging_threat is "":
                    if incoming_ip_obj[0].ip_address in ethreats_response:
                        incoming_ip_obj[0].emerging_threat = True
                    else:
                        incoming_ip_obj[0].emerging_threat = False

                if incoming_ip_obj[0].in_alienv is "":
                    if incoming_ip_obj[0].ip_address in alientvault_resp:
                        incoming_ip_obj[0].in_alienv = True
                    else:
                        incoming_ip_obj[0].in_alienv = False

                if incoming_ip_obj[0].blocklist_de is "":
                    if incoming_ip_obj[0].ip_address in blocklist_resp:
                        incoming_ip_obj[0].blocklist_de = True
                    else:
                        incoming_ip_obj[0].blocklist_de = False

                if incoming_ip_obj[0].dragon_ssh is "":
                    if incoming_ip_obj[0].ip_address in drag_ssh_resp:
                        incoming_ip_obj[0].dragon_ssh = True
                    else:
                        incoming_ip_obj[0].dragon_ssh = False

                if incoming_ip_obj[0].dragon_vnc is "":
                    if incoming_ip_obj[0].ip_address in drag_vnc_resp:
                        incoming_ip_obj[0].dragon_vnc = True
                    else:
                        incoming_ip_obj[0].dragon_vnc = False

                #if incoming_ip_obj[0].openblock is "":
                #    if incoming_ip_obj[0].ip_address in openblock_resp:
                #        incoming_ip_obj[0].openblock = True
                #    else:
                #        incoming_ip_obj[0].openblock = False

                if incoming_ip_obj[0].nothink_malware is "":
                    if incoming_ip_obj[0].ip_address in ntmalware_resp:
                        incoming_ip_obj[0].nothink_malware = True
                    else:
                        incoming_ip_obj[0].nothink_malware = False

                if incoming_ip_obj[0].nothink_ssh is "":
                    if incoming_ip_obj[0].ip_address in ntssh_resp:
                        incoming_ip_obj[0].nothink_ssh = True
                    else:
                        incoming_ip_obj[0].nothink_ssh = False

                if incoming_ip_obj[0].feodo is "":
                    if incoming_ip_obj[0].ip_address in feodo_resp:
                        incoming_ip_obj[0].feodo = True
                    else:
                        incoming_ip_obj[0].feodo = False

                if incoming_ip_obj[0].antispam is "":
                    if incoming_ip_obj[0].ip_address in antispam_resp:
                        incoming_ip_obj[0].antispam = True
                    else:
                        incoming_ip_obj[0].antispam = False

                if incoming_ip_obj[0].malc0de is "":
                    if incoming_ip_obj[0].ip_address in malc0de_resp:
                        incoming_ip_obj[0].malc0de = True
                    else:
                        incoming_ip_obj[0].malc0de = False

                if incoming_ip_obj[0].malwarebytes is "":
                    if incoming_ip_obj[0].ip_address in malbytes_resp:
                        incoming_ip_obj[0].malwarebytes = True
                    else:
                        incoming_ip_obj[0].malwarebytes = False

                if incoming_ip_obj[0].MISP_ip_dst is "":
                    if MISP_authkey and MISP_address:
                        if incoming_ip_obj[0].ip_address in MISP_ip_dst_resp:
                            incoming_ip_obj[0].MISP_ip_dst = True
                        else:
                            incoming_ip_obj[0].MISP_ip_dst = False

                if incoming_ip_obj[0].MISP_ip_src is "":
                    if MISP_authkey and MISP_address:
                        if incoming_ip_obj[0].ip_address in MISP_ip_src_resp:
                            incoming_ip_obj[0].MISP_ip_src = True
                        else:
                            incoming_ip_obj[0].MISP_ip_src = False

                try:
                    print "Checking stopforumspam for " + incoming_ip_obj[
                        0].ip_address
                    req = urllib2.Request(
                        'http://api.stopforumspam.org/api?ip=' +
                        incoming_ip_obj[0].ip_address)
                    req.add_header(
                        'User-agent',
                        'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
                    )
                    response = urllib2.urlopen(req)
                    xmlresponse = response.read()
                    stopforumspam_resp = XML(xmlresponse).find('appears').text
                except NameError:
                    stopforumspam_resp = "Not able to grab information"
                except urllib2.HTTPError:
                    stopforumspam_resp = "Not able to grab information"
                if incoming_ip_obj[0].stopforumspam is "":
                    if stopforumspam_resp == "yes":
                        incoming_ip_obj[0].stopforumspam = True
                    else:
                        incoming_ip_obj[0].stopforumspam = False

        return
コード例 #9
0

def show_node(node):
    print(node.tag)
    if node.text is not None and node.text.strip():
        print('  text: "%s"' % node.text)
    if node.tail is not None and node.tail.strip():
        print('  tail: "%s"' % node.tail)
    for name, value in sorted(node.attrib.items()):
        print('  %-4s = "%s"' % (name, value))
    for child in node:
        show_node(child)


parsed = XML('''
<root>
  <group>
    <child id="a">This is child "a".</child>
    <child id="b">This is child "b".</child>
  </group>
  <group>
    <child id="c">This is child "c".</child>
  </group>
</root>
''')

print('parsed =', parsed)

for elem in parsed:
    show_node(elem)
コード例 #10
0
ファイル: test_builders.py プロジェクト: mrjmad/creme_crm-2.1
    def test_myschema_xsd01(self):
        body_map = {'user_id':     1,
                    'first_name':  '',
                    'last_name':   '',
                    'email':       '*****@*****.**',
                    'description': '',
                    'birthday':    '',
                    'created':     '',  # TODO: ignore this (editable=False)
                    'url_site':    '',
                    'image':       '',
                    'language':    '',
                   }
        backend = self._get_backend(ContactFakeBackend, subject='create_contact',
                                    body_map=body_map, model=Contact,
                                   )
        builder = self._get_builder(backend)
        xsd = '{http://www.w3.org/2001/XMLSchema}'

        content = builder._render_myschema_xsd(self.request)
        xml = XML(content)

        self.assertEqual(builder.namespace, xml.get('targetNamespace'))
        self.assertEqual(builder.namespace,
                         re.search(r'xmlns:my="(?P<ns>[\w\d\-:/\.]*)"', content).groupdict()['ns']
                        )  # Can't be got with ElementTree, because it's a namespace

        ref_attrs = {node.get('ref')
                      for node in xml.findall('{xsd}element/{xsd}complexType/{xsd}sequence/{xsd}element'.format(xsd=xsd))
                    }
        # chain() because language_value is not declared in body_map, only language has to (m2m)
        expected_ref_attrs = {'my:{}'.format(key) for key in chain(body_map, ['language_value'])}
        self.assertEqual(expected_ref_attrs, ref_attrs)

        xsd_elements = {
            'CremeCRMCrudity': {'name': 'CremeCRMCrudity'},

            # <xsd:element name="user_id" type="xsd:integer"/>
            'user_id': {'name': 'user_id', 'type': 'xsd:integer'},

            # # <xsd:element name="is_actived" type="xsd:boolean"/>
            # 'is_actived': {'name': 'is_actived', 'type': 'xsd:boolean'},

            # TODO: check if my:requiredString accepts empty strings
            # # <xsd:element name="first_name" type="xsd:string"/>
            # 'first_name': {'name': 'first_name', 'type': 'xsd:string'},
            # <xsd:element name="first_name" type="my:requiredString"/>
            'first_name': {'name': 'first_name', 'type': 'my:requiredString'},

            # <xsd:element name="last_name" type="xsd:requiredString"/>
            'last_name': {'name': 'last_name', 'type': 'my:requiredString'},

            # TODO: check if my:requiredString accepts empty strings
            # # <xsd:element name="email" type="xsd:string"/>
            # 'email': {'name': 'email', 'type': 'xsd:string'},
            # <xsd:element name="email" type="my:requiredString"/>
            'email': {'name': 'email', 'type': 'my:requiredString'},

            # <xsd:element name="description">
            #   <xsd:complexType mixed="true">
            #     <xsd:sequence>
            #       <xsd:any minOccurs="0" maxOccurs="unbounded"
            #                namespace="http://www.w3.org/1999/xhtml" processContents="lax"/>
            #     </xsd:sequence>
            #   </xsd:complexType>
            # </xsd:element>
            'description': {'name': 'description'},

            # <xsd:element name="birthday" nillable="true" type="xsd:date"/>
            'birthday': {'name': 'birthday', 'type': 'xsd:date', 'nillable': 'true'},

            # <xsd:element name="created" type="xsd:dateTime"/>
            'created': {'name': 'created', 'type': 'xsd:dateTime'},

            # TODO: check if my:requiredAnyURI accepts empty strings
            # 'url_site':       {'name': 'url_site', 'type': 'xsd:anyURI'},
            'url_site':       {'name': 'url_site', 'type': 'my:requiredAnyURI'},

            'image':          {'name': 'image', 'type': 'xsd:base64Binary', 'nillable': 'true'},
            'language':       {'name': 'language'},
            'language_value': {'name': 'language_value', 'type': 'xsd:integer', 'nillable': 'true'},
        }

        for element_node in xml.findall('{}element'.format(xsd)):
            name = element_node.get('name')
            xsd_element_attrs = xsd_elements.get(name)

            if xsd_element_attrs is None:
                self.fail('There is at least an extra node named: {}'.format(name))

            self.assertSetEqual({*xsd_element_attrs.keys()}, {*element_node.keys()})

            for attr in element_node.keys():
                # self.assertEqual(xsd_element_attrs[attr], element_node.get(attr))
                # TODO: factorise
                expected = xsd_element_attrs[attr]
                value = element_node.get(attr)

                if expected != value:
                    self.fail('Value of attribute "{}" in node "{}" is wrong: expected "{}", got "{}".'.format(
                                    attr, name, expected, value,
                                )
                             )
コード例 #11
0
ファイル: test_wrappers.py プロジェクト: katbeaulieu/galaxy-1
def test_input_value_wrapper(tool):
    parameter = IntegerToolParameter( tool, XML( '<param name="blah" type="integer" size="4" value="10" min="0" />' ) )
    wrapper = InputValueWrapper( parameter, "5" )
    assert str( wrapper ) == "5"
コード例 #12
0
 def xmlparse(self, text):
     """ import the XML text into self.et_in  """
     return XML(text)
コード例 #13
0
    def get_nightly_binary_path(self, nightly_date):
        if nightly_date is None:
            return
        if not nightly_date:
            print(
                "No nightly date has been provided although the --nightly or -n flag has been passed."
            )
            sys.exit(1)
        # Will alow us to fetch the relevant builds from the nightly repository
        os_prefix = "linux"
        if is_windows():
            os_prefix = "windows-msvc"
        if is_macosx():
            print("The nightly flag is not supported on mac yet.")
            sys.exit(1)
        nightly_date = nightly_date.strip()
        # Fetch the filename to download from the build list
        repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly"
        req = urllib2.Request("{}/{}/{}".format(repository_index, os_prefix,
                                                nightly_date))
        try:
            response = urllib2.urlopen(req).read()
            tree = XML(response)
            namespaces = {'ns': tree.tag[1:tree.tag.index('}')]}
            file_to_download = tree.find('ns:Contents', namespaces).find(
                'ns:Key', namespaces).text
        except urllib2.URLError as e:
            print(
                "Could not fetch the available nightly versions from the repository : {}"
                .format(e.reason))
            sys.exit(1)
        except AttributeError as e:
            print(
                "Could not fetch a nightly version for date {} and platform {}"
                .format(nightly_date, os_prefix))
            sys.exit(1)

        nightly_target_directory = path.join(self.context.topdir, "target")
        # ':' is not an authorized character for a file name on Windows
        # make sure the OS specific separator is used
        target_file_path = file_to_download.replace(':', '-').split('/')
        destination_file = os.path.join(nightly_target_directory,
                                        os.path.join(*target_file_path))
        # Once extracted, the nightly folder name is the tar name without the extension
        # (eg /foo/bar/baz.tar.gz extracts to /foo/bar/baz)
        destination_folder = os.path.splitext(destination_file)[0]
        nightlies_folder = path.join(nightly_target_directory, 'nightly',
                                     os_prefix)

        # Make sure the target directory exists
        if not os.path.isdir(nightlies_folder):
            print(
                "The nightly folder for the target does not exist yet. Creating {}"
                .format(nightlies_folder))
            os.makedirs(nightlies_folder)

        # Download the nightly version
        if os.path.isfile(path.join(nightlies_folder, destination_file)):
            print("The nightly file {} has already been downloaded.".format(
                destination_file))
        else:
            print("The nightly {} does not exist yet, downloading it.".format(
                destination_file))
            download_file(destination_file,
                          NIGHTLY_REPOSITORY_URL + file_to_download,
                          destination_file)

        # Extract the downloaded nightly version
        if os.path.isdir(destination_folder):
            print("The nightly file {} has already been extracted.".format(
                destination_folder))
        else:
            print("Extracting to {} ...".format(destination_folder))
            if is_windows():
                command = 'msiexec /a {} /qn TARGETDIR={}'.format(
                    os.path.join(nightlies_folder, destination_file),
                    destination_folder)
                if subprocess.call(command, stdout=PIPE, stderr=PIPE) != 0:
                    print(
                        "Could not extract the nightly executable from the msi package."
                    )
                    sys.exit(1)
            else:
                with tarfile.open(
                        os.path.join(nightlies_folder, destination_file),
                        "r") as tar:
                    tar.extractall(destination_folder)
        bin_folder = path.join(destination_folder, "servo")
        if is_windows():
            bin_folder = path.join(destination_folder, "PFiles",
                                   "Mozilla research", "Servo Tech Demo")
        return path.join(bin_folder, "servo{}".format(BIN_SUFFIX))
コード例 #14
0
# ElementTree_extend_node_copy.py

from xml.etree.ElementTree import (Element, SubElement, tostring, XML)
from ElementTree_pretty import prettify

top = Element('top')

parent_a = SubElement(top, 'genitore', id='A')
parent_b = SubElement(top, 'genitore', id='B')

# Crea figlio
children = XML('<root><figlio num="0" /><figlio num="1" />'
               '<figlio num="2" /></root>')

# Imposta l'id all'id dell'oggetto Python del nodo
# per facilitare l'individuazione dei duplicati.
for c in children:
    c.set('id', str(id(c)))

# Aggiunge al primo genitore
parent_a.extend(children)

print('A:')
print(prettify(top))
print()

# Copia nodi al secondo genitore
parent_b.extend(children)

print('B:')
print(prettify(top))
コード例 #15
0
ファイル: xml-string-csv.py プロジェクト: roytuts/python-csv
		<fr_site_limit>315000</fr_site_limit>
		<tiv_2011>315000</tiv_2011>
		<tiv_2012>315000</tiv_2012>
		<eq_site_deductible>265821.57</eq_site_deductible>
		<hu_site_deductible>0</hu_site_deductible>
		<fl_site_deductible>15750</fl_site_deductible>
		<fr_site_deductible>0</fr_site_deductible>
		<point_latitude>0</point_latitude>
		<point_longitude>30.118774</point_longitude>
		<line>-81.704613</line>
		<construction>Residential</construction>
		<point_granularity>Wood</point_granularity>
	</policy>
</policies>"""

parsed = XML(input_xml)

#print (parsed)

data = []

for policy in parsed:
    policyId = policy.find('policyId').text
    statecode = policy.find('statecode').text
    eq_site_limit = policy.find('eq_site_limit').text
    hu_site_limit = policy.find('hu_site_limit').text
    fl_site_limit = policy.find('fl_site_limit').text
    fr_site_limit = policy.find('fr_site_limit').text
    tiv_2011 = policy.find('tiv_2011').text
    tiv_2012 = policy.find('tiv_2012').text
    eq_site_deductible = policy.find('eq_site_deductible').text
コード例 #16
0
ファイル: utils.py プロジェクト: raprasad/cga-worldmap
    # Step 6. Make sure our data always has a valid projection

    logger.info('>>> Step 7. Making sure [%s] has a valid projection' % name)
    check_projection(name, gs_resource)

    # Step 7. Create the style and assign it to the created resource
    # FIXME: Put this in gsconfig.py
    logger.info('>>> Step 6. Creating style for [%s]' % name)
    publishing = cat.get_layer(name)

    if 'sld' in files:
        f = open(files['sld'], 'r')
        sld = f.read()
        f.close()
        try:
            XML(sld)
        except Exception, e:
            msg = _('Your SLD file contains invalid XML')
            logger.warn("%s - %s" % (msg, str(e)))
            e.args = (msg, )
    else:
        sld = get_sld_for(publishing)

    if sld is not None:
        try:
            cat.create_style(name, sld)
        except geoserver.catalog.ConflictingDataError, e:
            msg = (_('There is already a style in GeoServer named ') + '"%s"' %
                   (name))
            logger.warn(msg)
            e.args = (msg, )
コード例 #17
0
# Copyright (c) 2010 Doug Hellmann.  All rights reserved.
#
"""Creating XML documents with lists of nodes
"""
#end_pymotw_header

from xml.etree.ElementTree import Element, SubElement, tostring, XML
from ElementTree_pretty import prettify

top = Element('top')

parent_a = SubElement(top, 'parent', id='A')
parent_b = SubElement(top, 'parent', id='B')

# Create children
children = XML(
    '<root><child num="0" /><child num="1" /><child num="2" /></root>')

# Set the id to the Python object id of the node
# to make duplicates easier to spot.
for c in children:
    c.set('id', str(id(c)))

# Add to first parent
parent_a.extend(children)

print 'A:'
print prettify(top)
print

# Copy nodes to second parent
parent_b.extend(children)
コード例 #18
0
ファイル: recipe-498286.py プロジェクト: kaestnja/pystrict3
from xml.etree.ElementTree import _Element
_Element.textlist = _textlist


## ---------- Sample calls -----------

from xml.etree.ElementTree import XML
from textwrap import fill

xhmtl_fragment = XML('''
<ul>
<li>XHTML documents start with an <span class="code">&lt;html&gt;</span> tag - there is no such thing as an <span class="code">&lt;xhtml&gt;</span> tag.</li>
<li>It is required that you declare the XHTML namespace inside the opening <span class="code">&lt;html&gt;</span> tag.</li>
<li>This XHTML example covered the use of XHTML transitional - for XHTML strict or frameset, use the appropriate
<a title="Declaring a DocType" href="/xhtml/doctype/" >DOCTYPE Declaration</a>.</li>
<li>Remember that declaring a DOCTYPE with a valid identifier at the top of an XHTML page puts most browers
in <i>standards</i> mode- so remember not to use old browser hacks, and non-standard tags. (Otherwise, use just use regular HTML)</li>
<li>For some browsers, including Microsoft Internet Explorer 6, if you start an XHTML page with the XML declaration,
the browser goes into <i>quirks</i> mode, an unfortunate bug. The workaround is to delete the optional 
declaration and declare the the encoding using a meta tag.</li>
<li>The DOCTYPE declaration has to be in all uppercase characters, just like in the XHTML example code.</li>
</ul>
''')

print(fill(''.join(xhmtl_fragment.textlist())))


docbook_fragment = XML('''
<book id="ashortbook">
  <title>History of Computer Programming</title>
  <chapter id="afirstchapter">
    <title>Chapter 1 -- Evolution</title>
コード例 #19
0
        def parse_result (status, data, result):
            (callback, user_data) = data
            if status != 0:
                callback (status, user_data, result)

            try:
                # filter out invalid UTF-8 to avoid breaking the XML parser
                result = result.decode('UTF-8', errors='replace').encode('UTF-8')
                root = XML (result)
                drivers = {}
                # We store the drivers as a dict of:
                # foomatic_id:
                #   { 'name': name,
                #     'url': url,
                #     'supplier': supplier,
                #     'license': short license string e.g. GPLv2,
                #     'licensetext': license text (Plain text),
                #     'nonfreesoftware': Boolean,
                #     'thirdpartysupplied': Boolean,
                #     'manufacturersupplied': Boolean,
                #     'patents': Boolean,
                #     'supportcontacts' (optional):
                #       list of { 'name',
                #                 'url',
                #                 'level',
                #               }
                #     'shortdescription': short description,
                #     'recommended': Boolean,
                #     'functionality':
                #       { 'text': integer percentage,
                #         'lineart': integer percentage,
                #         'graphics': integer percentage,
                #         'photo': integer percentage,
                #         'speed': integer percentage,
                #       }
                #     'packages' (optional):
                #       { arch:
                #         { file:
                #           { 'url': url,
                #             'fingerprint': signature key fingerprint URL
                #             'realversion': upstream version string,
                #             'version': packaged version string,
                #             'release': package release string
                #           }
                #         }
                #       }
                #     'ppds' (optional):
                #       URL string list
                #   }
                # There is more information in the raw XML, but this
                # can be added to the Python structure as needed.

                for driver in root.findall ('driver'):
                    id = driver.attrib.get ('id')
                    if id is None:
                        continue

                    dict = {}
                    for attribute in ['name', 'url', 'supplier', 'license',
                                      'shortdescription' ]:
                        element = driver.find (attribute)
                        if element is not None and element.text is not None:
                            dict[attribute] = _normalize_space (element.text)

                    element = driver.find ('licensetext')
                    if element is not None and element.text is not None:
                        dict['licensetext'] = element.text
                    if not 'licensetext' in dict or \
                       dict['licensetext'] is None:
                        element = driver.find ('licenselink')
                        if element is not None:
                            license_url = element.text
                            if license_url is not None:
                                try:
                                    with urllib.request.urlopen(license_url, timeout=HTTPS_TIMEOUT) as resp:
                                        dict['licensetext'] = resp.read().decode('utf-8')
                                except:
                                    _debugprint('Cannot retrieve %s' %
                                                license_url)

                    for boolean in ['nonfreesoftware', 'recommended',
                                    'patents', 'thirdpartysupplied',
                                    'manufacturersupplied']:
                        dict[boolean] = driver.find (boolean) is not None

                    # Make a 'freesoftware' tag for compatibility with
                    # how the OpenPrinting API used to work (see trac
                    # #74).
                    dict['freesoftware'] = not dict['nonfreesoftware']

                    supportcontacts = []
                    container = driver.find ('supportcontacts')
                    if container is not None:
                        for sc in container.findall ('supportcontact'):
                            supportcontact = {}
                            if sc.text is not None:
                                supportcontact['name'] = \
                                    _normalize_space (sc.text)
                            else:
                                supportcontact['name'] = ""
                            supportcontact['url'] = sc.attrib.get ('url')
                            supportcontact['level'] = sc.attrib.get ('level')
                            supportcontacts.append (supportcontact)

                    if supportcontacts:
                        dict['supportcontacts'] = supportcontacts

                    if 'name' not in dict or 'url' not in dict:
                        continue

                    container = driver.find ('functionality')
                    if container is not None:
                        functionality = {}
                        for attribute in ['text', 'lineart', 'graphics',
                                          'photo', 'speed']:
                            element = container.find (attribute)
                            if element is not None:
                                functionality[attribute] = element.text
                        if functionality:
                            dict[container.tag] = functionality

                    packages = {}
                    container = driver.find ('packages')
                    if container is not None:
                        for arch in list(container):
                            rpms = {}
                            for package in arch.findall ('package'):
                                rpm = {}
                                for attribute in ['realversion','version',
                                                  'release', 'url', 'pkgsys',
                                                  'fingerprint']:
                                    element = package.find (attribute)
                                    if element is not None:
                                        rpm[attribute] = element.text

                                repositories = package.find ('repositories')
                                if repositories is not None:
                                    for pkgsys in list(repositories):
                                        rpm.setdefault('repositories', {})[pkgsys.tag] = pkgsys.text

                                rpms[package.attrib['file']] = rpm
                            packages[arch.tag] = rpms

                    if packages:
                        dict['packages'] = packages

                    ppds = []
                    container = driver.find ('ppds')
                    if container is not None:
                        for each in list(container):
                            ppds.append (each.text)

                    if ppds:
                        dict['ppds'] = ppds

                    drivers[id] = dict
                    _debugprint ("listDrivers/parse_result: OpenPrinting entries: %s" % repr(drivers))
                callback (0, user_data, drivers)
            except:
                callback (1, user_data, sys.exc_info ())
コード例 #20
0
 def __init__(self, str):
     self._rootElement = XML(str)
     self._namespace = self._rootElement.tag[1:].split("}")[0]
コード例 #21
0
ファイル: api.py プロジェクト: tsinkala/commcare-hq
def initiate_outbound_call(call_log_entry, *args, **kwargs):
    phone_number = call_log_entry.phone_number
    if phone_number.startswith("+"):
        phone_number = phone_number[1:]

    if phone_number.startswith("91"):
        phone_number = "0" + phone_number[2:]
    else:
        raise InvalidPhoneNumberException(
            "Kookoo can only send to Indian phone numbers.")

    form = Form.get_form(call_log_entry.form_unique_id)
    app = form.get_app()
    module = form.get_module()

    # Only precache the first response if it's not an only-label form, otherwise we could end up
    # submitting the form regardless of whether the person actually answers the call.
    if form_requires_input(form):
        recipient = call_log_entry.recipient
        case_id = get_case_id(call_log_entry)
        session, responses = start_session(recipient.domain,
                                           recipient,
                                           app,
                                           module,
                                           form,
                                           case_id,
                                           yield_responses=True,
                                           session_type=XFORMS_SESSION_IVR)

        ivr_responses = []
        for response in responses:
            ivr_responses.append(
                format_ivr_response(response.event.caption, app))

        input_length = get_input_length(responses[-1])

        call_log_entry.use_precached_first_response = True
        call_log_entry.xforms_session_id = session.session_id

    url_base = get_url_base()

    params = urlencode({
        "phone_no":
        phone_number,
        "api_key":
        kwargs["api_key"],
        "outbound_version":
        "2",
        "url":
        url_base + reverse("corehq.apps.kookoo.views.ivr"),
        "callback_url":
        url_base + reverse("corehq.apps.kookoo.views.ivr_finished"),
    })
    url = "http://www.kookoo.in/outbound/outbound.php?%s" % params
    response = urlopen(url).read()

    root = XML(response)
    for child in root:
        if child.tag.endswith("status"):
            status = child.text
        elif child.tag.endswith("message"):
            message = child.text

    if status == "queued":
        call_log_entry.error = False
        call_log_entry.gateway_session_id = "KOOKOO-" + message
    elif status == "error":
        call_log_entry.error = True
        call_log_entry.error_message = message
    else:
        call_log_entry.error = True
        call_log_entry.error_message = "Unknown status received from Kookoo."

    if call_log_entry.error:
        call_log_entry.use_precached_first_response = False

    if call_log_entry.use_precached_first_response:
        call_log_entry.first_response = get_http_response_string(
            call_log_entry.gateway_session_id,
            ivr_responses,
            collect_input=True,
            hang_up=False,
            input_length=input_length)

    call_log_entry.save()
    return not call_log_entry.error
コード例 #22
0
 def testBasic(self):
     p = create_path()
     config = ConfigXml(filename=str(p))
     config.close()
     self.assertXml(self.initial(), XML(p.text()))
コード例 #23
0
ファイル: standardMzml.py プロジェクト: fkromer/pymzML
    def _interpol_search(self,
                         target_index,
                         chunk_size=8,
                         fallback_cutoff=100):
        """
        Use linear interpolation search to find spectra faster.

        Arguments:
            target_index (str or int) : native id of the item to access

        Keyword Arguments:
            chunk_size (int)        : size of the chunk to read in one go in kb

        """
        # print('target ', target_index)
        seeker = self.get_binary_file_handler()
        seeker.seek(0, 2)
        chunk_size = chunk_size * 512
        lower_bound = 0
        upper_bound = seeker.tell()
        mid = int(upper_bound / 2)
        seeker.seek(mid, 0)
        current_position = seeker.tell()
        used_indices = set()
        spectrum_found = False
        spectrum = None
        while spectrum_found is False:
            jumper_scaling = 1
            file_pointer = seeker.tell()
            data = seeker.read(chunk_size)
            spec_start = self.spec_open.search(data)
            if spec_start is not None:
                spec_start_offset = file_pointer + spec_start.start()
                seeker.seek(spec_start_offset)
                current_index = int(
                    re.search(b"[0-9]*$", spec_start.group("id")).group())

                self.offset_dict[current_index] = (spec_start_offset, )
                if current_index in used_indices:
                    # seeker.close()
                    if current_index > target_index:
                        jumper_scaling -= 0.1
                    else:
                        jumper_scaling += 0.1

                used_indices.add(current_index)

                dist = current_index - target_index
                if dist < -1 and dist > -(fallback_cutoff):
                    spectrum = self._search_linear(seeker, target_index)
                    seeker.close()
                    spectrum_found = True
                    break
                elif dist > 0 and dist < fallback_cutoff:
                    while current_index > target_index:
                        offset = int(current_position - chunk_size)
                        seeker.seek(offset if offset > 0 else 0)
                        lower_bound = current_position
                        current_position = seeker.tell()
                        data = seeker.read(chunk_size)
                        if self.spec_open.search(data):
                            spec_start = self.spec_open.search(data)
                            current_index = int(
                                re.search(b"[0-9]*$",
                                          spec_start.group("id")).group())
                    seeker.seek(current_position)
                    spectrum = self._search_linear(seeker, target_index)
                    seeker.close()
                    spectrum_found = True
                    break

                if int(current_index) == target_index:

                    seeker.seek(spec_start_offset)
                    start, end = self._read_to_spec_end(seeker)
                    seeker.seek(start)
                    self.offset_dict[current_index] = (start, end)
                    xml_string = seeker.read(end - start)
                    seeker.close()
                    spectrum = spec.Spectrum(XML(xml_string),
                                             measured_precision=5e-6)
                    spectrum_found = True
                    break

                elif int(current_index) > target_index:
                    scaling = target_index / current_index
                    seeker.seek(
                        int(current_position * scaling * jumper_scaling))
                    upper_bound = current_position
                    current_position = seeker.tell()
                elif int(current_index) < target_index:
                    scaling = target_index / current_index
                    seeker.seek(
                        int(current_position * scaling * jumper_scaling))
                    lower_bound = current_position
                    current_position = seeker.tell()

            elif len(data) == 0:
                sorted_keys = sorted(self.offset_dict.keys())
                pos = (bisect.bisect_left(sorted_keys, target_index) - 2
                       )  # dat magic number :)
                try:
                    key = sorted_keys[pos]
                    spec_start_offset = self.offset_dict[key][0]
                except:
                    key = sorted_keys[pos]
                    spec_start_offset = self.offset_dict[key][0]
                seeker = self.get_binary_file_handler()
                seeker.seek(spec_start_offset)
                spectrum = self._search_linear(seeker, target_index)
                seeker.close()
                spectrum_found = True
                break

        return spectrum
コード例 #24
0
 def testWithEnv(self):
     p = create_path()
     config = ConfigXml(filename=str(p), env_config="FOO")
     config.close()
     self.assertXml(self.initial("FOO"), XML(p.text()))
コード例 #25
0
 def _parameter_for(self, **kwds):
     content = kwds["xml"]
     param_xml = XML(content)
     return basic.ToolParameter.build(self.mock_tool, param_xml)
コード例 #26
0
    # Step 6. Make sure our data always has a valid projection

    logger.info('>>> Step 7. Making sure [%s] has a valid projection' % name)
    check_projection(name, gs_resource)

    # Step 7. Create the style and assign it to the created resource
    # FIXME: Put this in gsconfig.py
    logger.info('>>> Step 6. Creating style for [%s]' % name)
    publishing = cat.get_layer(name)

    if 'sld' in files:
        f = open(files['sld'], 'r')
        sld = f.read()
        f.close()
        try:
            sldxml = XML(sld)
            valid_url = re.compile(settings.VALID_SLD_LINKS)
            for elem in sldxml.iter(tag='{http://www.opengis.net/sld}OnlineResource'):
                if '{http://www.w3.org/1999/xlink}href' in elem.attrib:
                    link = elem.attrib['{http://www.w3.org/1999/xlink}href']
                    if valid_url.match(link) is None:
                        raise Exception(_("External images in your SLD file are not permitted.  Please contact us if you would like your SLD images hosted on %s") % (settings.SITENAME))
        except ParseError, e:
            msg =_('Your SLD file contains invalid XML')
            logger.warn("%s - %s" % (msg, str(e)))
            e.args = (msg,)

        try:
            stylename = name + "_".join([choice('qwertyuiopasdfghjklzxcvbnm0123456789') for i in range(4)])
            cat.create_style(stylename, sld)
            #FIXME: Should we use the fully qualified typename?
コード例 #27
0
def configure_fast_poe(netconf_handler, interface):
  '''
  This procedure takes in the netconf handler for the switch and configures 2-event classification on the given interface.
  Procedure returns True if configuration successful, else returns False
  '''

  fastpoe_payload = '''
  <config>
    <native xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-native">
      <interface>
        <{interface_type}>
          <name>{interface_number}</name>
          <power xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-power">
          <inline>
              <port>
                <poe-ha/>
              </port>
            </inline>
          </power>
        </{interface_type}>
      </interface>
    </native>
  </config>
  '''

  #Parse interface type ane name from <interface>
  interfaceType = re.findall(r'([A-Za-z]+)(\d+/\d+/\d+)', interface)
  perpetualPoEConfigured = False
  fastPoEConfigured = False
  #Verify if perpetual PoE and Fast PoE is already configured
  netconf_reply = xml.dom.minidom.parseString(str(netconf_handler.get_config( source='running', filter=('xpath', "/native/interface/%s[name='%s']/power" %(interfaceType[0][0], interfaceType[0][1])))))
  print(netconf_reply.toprettyxml( indent = "  " ))

  config = XML(netconf_reply.toxml("utf-8"))

  for data in config.findall('{urn:ietf:params:xml:ns:netconf:base:1.0}data'):
    for native in data.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-native}native'):
      for itf in native.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-native}interface'):
        for itftype in itf.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-native}%s'%(interfaceType[0][0])):
          for power in itftype.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-power}power'):
            for inline in power.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-power}inline'):
              for port in inline.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-power}port'):
                for iter in port:
                  if "perpetual-poe-ha" in iter.tag:
                    print("Perpetual PoE is already cofigured on the port")
                    perpetualPoEConfigured = True
                  elif "poe-ha" in iter.tag:
                    print("Fast PoE is already cofigured on the port")
                    fastPoEConfigured = True

  #If perpetual PoE is not configured, configure the feature
  if not perpetualPoEConfigured:
  	print("Perpetual PoE is prerequisite for Fast PoE to work. Configuring Perpetual PoE")
  	conf_status = perpetualPoE.configure_perpetual_poe(netconf_handler, interface, skip_check=True)
  	if not conf_status:
  		return False
    
            
  #If already configured, exit. Else continue with configuration
  if fastPoEConfigured:
    return_val = True
  else:
    xmlDom = xml.dom.minidom.parseString(str(netconf_handler.edit_config(fastpoe_payload.format(interface_type=interfaceType[0][0], interface_number=interfaceType[0][1]), target='running')))
    if "<ok/>" in (xmlDom.toprettyxml(indent = "  ")):
      return_val = True
    else:
      print(xmlDom.toprettyxml(indent = "  "))
      return_val = False

  return return_val
コード例 #28
0
ファイル: extension.py プロジェクト: Holt59/mkdocstrings
    def run(self, parent: Element, blocks: Element) -> None:
        block = blocks.pop(0)
        m = self.RE.search(str(block))

        if m:
            # removes the first line
            block = block[m.end():]  # type: ignore

        block, the_rest = self.detab(block)

        if m:
            identifier = m.group(1)
            log.debug(f"mkdocstrings.extension: Matched '::: {identifier}'")
            config = yaml.safe_load(str(block)) or {}

            handler_name = self.get_handler_name(config)
            log.debug(
                f"mkdocstrings.extension: Using handler '{handler_name}'")
            handler = get_handler(
                handler_name, self._config["theme_name"],
                self._config["mkdocstrings"]["custom_templates"])

            selection, rendering = self.get_item_configs(handler_name, config)

            log.debug("mkdocstrings.extension: Collecting data")
            try:
                data = handler.collector.collect(identifier, selection)
            except CollectionError:
                log.error(
                    f"mkdocstrings.extension: Could not collect '{identifier}'"
                )
                return

            log.debug("mkdocstrings.extension: Updating renderer's env")
            handler.renderer.update_env(self.md, self._config)

            log.debug("mkdocstrings.extension: Rendering templates")
            try:
                rendered = handler.renderer.render(data, rendering)
            except TemplateNotFound as error:
                theme_name = self._config["theme_name"]
                log.error(
                    f"mkdocstrings.extension: Template '{error.name}' not found "
                    f"for '{handler_name}' handler and theme '{theme_name}'.")
                return

            log.debug(
                "mkdocstrings.extension: Loading HTML back into XML tree")
            try:
                as_xml = XML(rendered)
            except ParseError as error:
                message = f"mkdocstrings.extension: {error}"
                if "mismatched tag" in str(error):
                    line, column = str(error).split(":")[-1].split(", ")

                    lineno = int(line.split(" ")[-1])
                    columnno = int(column.split(" ")[-1])

                    line = rendered.split("\n")[lineno - 1]
                    character = line[columnno]
                    message += (
                        f" (character {character}):\n{line}\n"
                        f"If your Markdown contains angle brackets < >, try to wrap them between backticks `< >`, "
                        f"or replace them with &lt; and &gt;")
                log.error(message)
                return

            as_xml = atomic_brute_cast(as_xml)  # type: ignore
            parent.append(as_xml)

        if the_rest:
            # This block contained unindented line(s) after the first indented
            # line. Insert these lines as the first block of the master blocks
            # list for future processing.
            blocks.insert(0, the_rest)
コード例 #29
0
#!/usr/bin/env python
import sys 
if len(sys.argv) != 3: 
    raise SystemExit('Usage: mod1_1.py route stopid')

route = sys.argv[1]
stopid = sys.argv[2]

import urllib.request 
u = urllib.request.urlopen('http://ctabustracker.com/bustime/map/getStopPredictions.jsp?route={}&stop={}'.format(route,stopid))
data = u.read()
#print(data)

from xml.etree.ElementTree import XML 
doc = XML(data)

for pt in doc.findall('.//pt'):
    print(pt.text)

コード例 #30
0
ファイル: test_builders.py プロジェクト: mrjmad/creme_crm
    def test_manifest_xsf_01(self):
        "Test some base values."
        backend = self._get_backend(ContactFakeBackend, subject='create_ce')
        builder = self._get_builder(backend)
        ns  = '{http://schemas.microsoft.com/office/infopath/2003/solutionDefinition}'
        ns2 = '{http://schemas.microsoft.com/office/infopath/2006/solutionDefinition/extensions}'

        content = builder._render_manifest_xsf(self.request)
        xml = XML(content)
        xml_find = xml.find

        namespace = builder.get_namespace()
        self.assertEqual(
            re.search(r'xmlns:my="(?P<ns>[\w\d\-:/\.]*)"', content)['ns'],
            namespace
        )  # Can't be got with ElementTree, because it's a namespace

        self.assertEqual(builder.get_urn(), xml.get('name'))

        self.assertEqual(
            namespace,
            xml_find(
                f'{ns}package/{ns}files/{ns}file/{ns}fileProperties/{ns}property'
            ).get('value')
        )
        self.assertEqual(
            namespace,
            xml_find(
                f'{ns}applicationParameters/{ns}solutionProperties'
            ).get('fullyEditableNamespace')
        )
        self.assertEqual(
            namespace,
            xml_find(
                f'{ns}documentSchemas/{ns}documentSchema'
            ).get('location').split()[0]
        )

        # ElementTree 1.2.6 (shipped with python <= 2.6) doesn't support
        # advanced xpath expressions  TODO: improve
        file_nodes = xml.findall(f'{ns}package/{ns}files/{ns}file')

        for node in file_nodes:
            if node.get('name') == 'view1.xsl':
                found_node = node
                break
        else:
            self.fail('<xsf:file name="view1.xsl"> not found')

        for node in found_node.findall(f'{ns}fileProperties/{ns}property'):
            if node.get('name') == 'lang':
                # property_node = node  # TODO: use ?
                self.assertEqual(
                    builder._get_lang_code(self.request.LANGUAGE_CODE),
                    node.get('value')
                )
                break
        else:
            self.fail(
                '<xsf:property name="lang" type="string" value=""></xsf:property> not found'
            )

        mail_form_name = backend.subject
        self.assertEqual(
            mail_form_name,
            xml_find(
                f'{ns}extensions/{ns}extension/{ns2}solutionDefinition/'
                f'{ns2}solutionPropertiesExtension/{ns2}mail'
            ).get('formName')
        )