Esempio n. 1
0
def allCitiesAndEventsXML():
    # Query to get data of interest
    city = session.query(City).all()

    # Declare root node of XML
    top = Element('allEvents')
    comment = Comment('XML Response with all cities and events')
    top.append(comment)

    # Loop through query responses and format as XML 
    for c in city:
        event = SubElement(top, 'event')
        child = SubElement(event, 'id')
        child.text = str(c.id)
        child = SubElement(event, 'city')
        child.text = c.name
        child = SubElement(event, 'state')
        child.text = c.state
        eventInfo = SubElement(event, 'eventInfo')  # Add new node for Events
        for e in c.events:
            en = SubElement(eventInfo, 'event_name')
            en.text = e.name
            child = SubElement(en, 'description')
            child.text = e.description
            child = SubElement(en, 'event_date')
            child.text = str(e.event_date)
            child = SubElement(en, 'event_url')
            child.text = e.event_url
            child = SubElement(en, 'user_id')
            child.text = str(e.user_id)
     
        print tostring(top)
    return app.response_class(tostring(top), mimetype='application/xml')
Esempio n. 2
0
def _enable_wms_caching(layer, cache_secs, disable=False):
    '''This should go into gsconfig'''
    layer.resource.fetch()
    # ensure we fetch stuff fresh or we'll overwrite
    layer.catalog._cache.pop(layer.resource.href, None)
    dom = layer.resource.dom
    metadata = dom.find('metadata')
    if metadata is None:
        metadata = _el('metadata')
        dom.append(metadata)
    def set_entry(k, v):
        entries = metadata.findall("entry")
        entries = [ e for e in entries if e.attrib.get('key', None) == k]
        entry = entries[0] if entries else None
        if v:
            if entry is None:
                entry = _el('entry', v, key=k)
                metadata.append(entry)
            else:
                entry.text = str(v)
        elif entry is not None:
            metadata.remove(entry)
    set_entry('cacheAgeMax', cache_secs)
    set_entry('cachingEnabled', 'false' if disable or cache_secs is 0 else 'true')
    print tostring(dom)

    headers, resp = layer.resource.catalog.http.request(
        layer.resource.href, 'PUT', tostring(dom), {'content-type' : 'text/xml'})
    if headers.status != 200:
        raise Exception('error enabling wms caching: %s' % resp)
Esempio n. 3
0
def qInequalities_template():
    '''Solve inequalities. e.g. 2-3x <= 8.'''
    leftside_section1 = randint(-100,100)
    leftside_section2 = randint(-100,100)
    left_side = leftside_section1 + leftside_section2
    right_side = randint(-100,100)
    equality_type = randint(0,3) #<, <=, >, >=
    question = None 
    x = Symbol('x', real=True) #For 4U Maths use complex=True for ImaginaryNum
    question_str = "Solve "
    if equality_type == 0:
        question = Lt(leftside_section1 + leftside_section2*x, right_side)
    elif equality_type == 1:
        question = Le(leftside_section1 + leftside_section2*x, right_side)
    elif equality_type == 2:
        question = Gt(leftside_section1 + leftside_section2*x, right_side)
    elif equality_type == 3:
        question = Ge(leftside_section1 + leftside_section2*x, right_side)
    question_str += tostring(am.parse(str(question)))

    steps = []
    if leftside_section1 < 0:
        steps.append('Move by +' + str(leftside_section1*-1) + ' to both ' \
                     +'sides')
    else:
        steps.append('Move by -' + str(leftside_section1) + ' to both ' \
                     +'sides')
    steps.append('Divide left and right side by ' + str(leftside_section2))

    answer = []
    answer.append(steps)
    answer.append(tostring(am.parse(str(solve(question, x)))))

    return question_str, answer
Esempio n. 4
0
def test_xml():
    the_tree = etree.parse('test.xml')
    for test in the_tree.getiterator('test'):
        name = test.get('name')
        if not name:
            raise ValueError('test element does not have a name attribute')
        the_string = ''
        for string in test.getiterator('string'):
            the_string += string.text
        test_xml = ascii_to_math_tree(the_string)
        indent(test_xml)
        test_xml_string = tostring(test_xml).encode('utf8')
        standard_test_xml_string = xml_copy_tree(test_xml_string)
        match = None
        for result in test.getiterator('result'):
            math_tree = result[0]
            indent(math_tree)
            xml_string = tostring(math_tree)
            standard_result_xml_string = xml_copy_tree(xml_string)
            if standard_test_xml_string == standard_result_xml_string:
                match = True
                break
        if not match:
            sys.stderr.write('Error for test "%s"\n' % (name))
            sys.stderr.write(standard_test_xml_string.encode("utf8"))
            sys.stderr.write('\ndoes not match\n')
            sys.stderr.write(standard_result_xml_string.encode("utf8"))
Esempio n. 5
0
  def update(self):
    ResourceInfo.update(self)
    doc = self.metadata
    title = doc.find("title")
    abstract = doc.find("description")
    projection = doc.find("srs")
    enabled = doc.find("enabled")
    native_format = doc.find("nativeFormat")
    default_interpolation_method = doc.find("defaultInterpolationMethod")
    request_srs = doc.find("requestSRS/string")
    response_srs = doc.find("responseSRS/string")

    if title is None:
        print self.href
        print tostring(doc)

    self.title = title.text if title is not None else None
    self.abstract = abstract.text if abstract is not None else None
    self.keywords = [(kw.text or None) for kw in doc.findall("keywords/string")]
    self.native_bbox = bbox(doc.find("nativeBoundingBox"))
    self.latlon_bbox = bbox(doc.find("latLonBoundingBox"))
    self.projection = projection.text if projection is not None else None
    self.enabled = enabled.text == "true" if enabled is not None else False
    self.extra_config = dict((entry.attrib['key'], entry.text) for entry in doc.findall("metadata/entry"))
    self.dimensions = [coverage_dimension(d) for d in doc.findall("dimensions/coverageDimension")]
    self.native_format = native_format.text if native_format is not None else None
    self.grid = None # TODO: i guess this merits a little class of its own
    self.supported_formats = [format.text for format in doc.findall("supportedFormats/string")]
    self.default_interpolation_method = default_interpolation_method.text if default_interpolation_method is not None else None
    self.interpolation_methods = [method.text for method in doc.findall("interpolationMethods/string")]
    self.request_srs = request_srs.text if request_srs is not None else None
    self.response_srs = response_srs.text if response_srs is not None else None
    self.metadata_links = [md_link(n) for n in self.metadata.findall("metadataLinks/metadataLink")]
    def deploy_product(self, ip, product_name, product_version, attributes_string):
        url = "%s/%s/%s/%s" % (self.sdc_url, "vdc", self.vdc, "productInstance")
        headers = {'X-Auth-Token': self.token, 'Tenant-Id': self.vdc,
                   'Accept': "application/json", 'Content-Type':  "application/xml"}

        productrequest = ProductRequest(self.keystone_url, self.sdc_url, self.tenant, self.user, self.password)

        productrequest.get_product_info(product_name)
        attributes = self.__process_attributes(attributes_string)

        product_release = ProductReleaseDto(product_name, product_version)

        productInstanceDto = ProductInstanceDto(ip, product_release, attributes)
        payload = productInstanceDto.to_xml()
        print url
        print headers
        print tostring(payload)
        response = http.post(url, headers, tostring(payload))

        ## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.
        if response.status != 200:
            print 'error to add the product sdc ' + str(response.status)
            sys.exit(1)
        else:
            http.processTask(headers, json.loads(response.read()))
Esempio n. 7
0
    def verify(self, manager, uri, response, respdata, args): #@UnusedVariable
        # Get arguments
        files = args.get("filepath", [])
        
        # status code must be 200, 207
        if response.status not in (200,207):
            return False, "        HTTP Status Code Wrong: %d" % (response.status,)
        
        # look for response data
        if not respdata:
            return False, "        No response body"
        
        # look for one file
        if len(files) != 1:
            return False, "        No file to compare response to"
        
        # read in all data from specified file
        fd = open( files[0], "r" )
        try:
            try:
                data = fd.read()
            finally:
                fd.close()
        except:
            data = None

        if data is None:
            return False, "        Could not read data file"

        data = manager.server_info.subs(data)

        result = True
        if data != respdata:
            data = data.replace("\n", "\r\n")
            if data != respdata:
                # If we have an iCalendar file, then unwrap data and do compare
                if files[0].endswith(".ics"):
                    data = data.replace("\r\n ", "")
                    respdata = respdata.replace("\r\n ", "")
                    if data != respdata:
                        result = False
                elif files[0].endswith(".xml"):
                    try:
                        respdata = tostring(ElementTree(file=StringIO(respdata)).getroot())
                    except Exception:
                        return False, "        Could not parse XML response: %s" %(respdata,)
                    try:
                        data = tostring(ElementTree(file=StringIO(data)).getroot())
                    except Exception:
                        return False, "        Could not parse XML data: %s" %(data,)
                    if data != respdata:
                        result = False
                else:
                    result = False

        if result:
            return True, ""
        else:
            error_diff = "\n".join([line for line in unified_diff(data.split("\n"), respdata.split("\n"))])
            return False, "        Response data does not exactly match file data %s" % (error_diff,)
Esempio n. 8
0
def brandsCatalogXML(brand_id):
    if 'username' not in login_session:
        response = make_response(json.dumps("You are not authorized, Login is required"), 401)
        response.headers['Content-Type'] = 'application/json'
        return response
    else:
        brand = session.query(Brand).filter_by(id=brand_id).one()
        items = session.query(Item).filter_by(brand_id=brand_id).all()
    
    root = Element('html')
    for i in items:
        body = SubElement(root, 'body')
        ID = SubElement(body, 'ID')
        ID.text = str(i.id)
        name = SubElement(body, 'name')
        name.text = i.name
        price = SubElement(body, 'price')
        price.text = i.price
        description = SubElement(body, 'description')
        description.text = i.description
        image = SubElement(body, 'image')
        image.text = i.image
        print tostring(root)

    return app.response_class(tostring(root), mimetype='application/xml')
Esempio n. 9
0
def sign_file(xml_file, root_id, assert_id, key_file, cert_file):
    """sign *xml_file* with *key_file* and include content of *cert_file*.
    *xml_file* can be a file, a filename string or an HTTP/FTP url.

    *key_file* contains the PEM encoded private key. It must be a filename string.

    *cert_file* contains a PEM encoded certificate (corresponding to *key_file*),
    included as `X509Data` in the dynamically created `Signature` template.
    """
    # template aware infrastructure
    from dm.xmlsec.binding.tmpl import parse, Element, SubElement, \
         fromstring, XML
    from dm.xmlsec.binding.tmpl import Signature

    doc = parse(xml_file)

    xmlsec.addIDs(doc.getroot(),['ID'])

    assertion = doc.findall('saml:Assertion', {"saml": "urn:oasis:names:tc:SAML:2.0:assertion"})[0]



    assertion_xml = sign_xml(tostring(assertion), assert_id, key_file, cert_file)

    assertion_doc = fromstring(assertion_xml)
    doc.getroot().remove(doc.findall("saml:Assertion", {"saml": "urn:oasis:names:tc:SAML:2.0:assertion"})[0])
    doc.getroot().insert(0, assertion_doc)

    

    return sign_xml(tostring(doc), root_id, key_file, cert_file)
Esempio n. 10
0
 def _serverParseXML_QueryRQ(self, e_root, e_header, e_body):
     #
     # start to parse a query class
     #
     e_query = e_root.find(".//fiap:query", namespaces=_NSMAP)
     if e_query == None:
         self.emsg = "query is not specified. [%s]" % tostring(e_root)
         return None
     #
     # check uuid.
     # XXX it's not used.
     #
     uuid = e_query.get("id")
     #
     # check the type attribute.
     # either storage or stream is valid.
     #
     type = e_query.get("type")
     if type == None:
         self.emsg = "type is not specified. [%s]" % tostring(e_query)
         return None
     elif type == "storage":
         return self._serverParseXML_FETCH(e_root, e_query)
     elif type == "stream":
         return self._serverParseXML_TRAP(e_root, e_query)
     else:
         self.emsg = "invalid type is specified. [%s]" % type
         return None
Esempio n. 11
0
def getItemsXML(expedition_id, category_id):
    """
    Endpoint to return an XML List of all items associated
    with a certain expedition and category
    :param expedition_id:
    :param category_id:
    """
    items = session.query(Item).filter_by(
        expedition_id=expedition_id,
        category_id=category_id).all()
    root = Element('allItems')
    comment = Comment('XML Endpoint Listing '
                      'all Item for a specific Category and Expedition')
    root.append(comment)
    for i in items:
        ex = SubElement(root, 'expedition')
        ex.text = i.expedition.title
        category_name = SubElement(ex, 'category_name')
        category_description = SubElement(category_name,
                                          'category_description')
        category_picture = SubElement(category_name, 'category_picture')
        category_name.text = i.category.name
        category_description.text = i.category.description
        category_picture.text = i.category.picture
        item_name = SubElement(category_name, 'item_name')
        item_decription = SubElement(item_name, 'item_description')
        item_picture = SubElement(item_name, 'item_picture')
        item_name.text = i.name
        item_decription.text = i.description
        item_picture.text = i.picture
    print tostring(root)
    return app.response_class(tostring(root), mimetype='application/xml')
Esempio n. 12
0
    def test_package_serialization(self):
        items = [
            self.request_delivery_factory.factory_item(
                ware_key=uuid.uuid1().get_hex()[:10],
                cost=Decimal(450.0 * 30),
                payment=Decimal(450.0),
                weight=500,
                weight_brutto=600,
                amount=4,
                comment=u'Комментарий на русском',
                link=u'http://shop.ru/item/44'
            ),
            self.request_delivery_factory.factory_item(
                ware_key=uuid.uuid1().get_hex()[:10],
                cost=Decimal(250.0 * 30),
                payment=Decimal(250.0),
                weight=500,
                weight_brutto=600,
                amount=4,
                comment=u'Комментарий на русском',
                link=u'http://shop.ru/item/42'
            )
        ]

        package = self.request_delivery_factory.factory_package(
            number=uuid.uuid1().hex[:10],
            weight=3000,
            items=items
        )
        self.assertIsInstance(package, PackageRequestObject)
        tostring(package.to_xml_element(u'Package'), encoding='UTF-8').replace("'", "\"")
Esempio n. 13
0
 def codeToXML(self, code):
     self.code = code
     #root = 0
     f=tempfile.NamedTemporaryFile(suffix='.cpp', delete=False)
     #code='//this is the content\n'
     f.write(code)
     f.close()
     #print f.name
     #fpath='./test.c'
     #g=open(f.name)
     #print '\n'.join(g.readlines())
     #g.close()
     try:
         p1 = Popen([os.path.dirname(os.path.realpath(__file__))+"/../bin/src2srcml", f.name], stdout=PIPE)
     except:
         print 'unable to open src2srcml binary'
         exit(-1)
     #print p1.stdout.read()
     #content='<unit>\n'+'\n'.join(p1.stdout.read().split('\n')[2:])
     content='\n'.join(p1.stdout.read().split('\n')[1:])
     content=content.replace('xmlns=','namsp=')
     #content=content.replace('xmlns:cpp=','namsp=')
     #content=p1.stdout.read()
     if DEBUG: print content
     #os.remove(f.name)
     root = ET.fromstring(content)
     if DEBUG: print tostring(root)
     #del root.attrib["xmlns"]
     self.root = root
     return root
Esempio n. 14
0
def srcml_code2xml(code):
    srcml_sample = srcML()
    if DEBUG:
        print 'HERE:'
        tostring(srcml_sample.codeToXML(code))
        print '2HERE:'
    return srcml_sample.codeToXML(code)
Esempio n. 15
0
def orgHandler(node):
    """
    node is the start node of an Organization
    orHandler parses the xml info for this organizaton
    gathered info is stored in django module database
    """
    assert node is not None
    o = getEntity(Organization, node.attrib.get('ID'))
    o.name = node.attrib.get('Name')
    for attr in node:
        if attr.tag == 'Crises':
            for elem in attr:
                o.crises.add(getEntity(Crisis, elem.attrib.get('ID')))
        if attr.tag == 'People':
            for elem in attr:
                o.people.add(getEntity(Person, elem.attrib.get('ID')))
        if attr.tag == 'Kind':
            o.kind = attr.text
        if attr.tag == 'Location':
            o.location = attr.text
        if attr.tag == 'History':
            o.history += ''.join([v for v in [tostring(li).strip() for li in attr] if v not in o.history])
        if attr.tag == 'ContactInfo':
            o.contact += ''.join([v for v in [tostring(li).strip() for li in attr] if v not in o.contact])
        if attr.tag == 'Common':
            comHandler(attr, o)
    assert o is not None
    o.save()
Esempio n. 16
0
def qSignificantFigures_template():
    '''Significant figures. e.g. Evaluate cuberoot(651/3) to 5 sig fig.'''
    sig_fig = randint(2,5)
    root_val = randint(2,5)
    numerator = randint(1,1000)
    denom = randint(1,1000)
    val = 'root%s(%s/(%s*pi))' % (root_val, numerator, denom)
    question = 'Evaluate ' +  tostring(am.parse(val)) + ' to %s' % (sig_fig)
    question += ' Significant Figures.'

    steps = []
    inside_root = (numerator/(denom*pi)).evalf() 
    val = root(inside_root, root_val)
    steps.append('This has to be done with a calculator.')
    steps.append('Do the inside calucation first: ' + 
                 tostring(am.parse('%s/(%s*pi)'%(numerator,denom))) + tostring(am.parse('*'))
                 + str(am.parse('pi')))
    steps.append('This should give ' + tostring(am.parse(str(inside_root))))
    steps.append('Then you need to square root the answer.')
    steps.append('Use either [1/y] key or similar on calculator and press ' 
                 + str(root_val))
    steps.append('Please refer to your calculator manual if in doubt.')
    steps.append('Then look for %s significant figures.' % sig_fig)
    steps.append('Note: First non-zero digit is 1st signifiant figure,' + \
                 ' going from left to right. Each digit after that is a' + \
                 ' significant figure.')
    answer = []
    answer.append(steps)
    answer.append(round(val, sig_fig-int(floor(log10(val)))-1))

    return question, answer
Esempio n. 17
0
def createAndWriteXML(upload_id, writeToDB):
    completeVase = Element('vase')
    
    for i in range(len(finalRight)):
        pointpair = SubElement(completeVase, 'pointpair')
        left = SubElement(pointpair, 'left')
        right = SubElement(pointpair, 'right')
        
        t = SubElement(pointpair, 't')
        
        xRight = SubElement(right, 'x')
        xLeft = SubElement(left, 'x')
        yRight = SubElement(right, 'y')
        yLeft = SubElement(left, 'y')
        
        t.text = "0"
        xRight.text = str(finalRight[i][0])
        xLeft.text = str(finalLeft[i][0])
        yRight.text = str(finalRight[i][1])
        yLeft.text = str(finalLeft[i][1])
    
    if writeToDB == 'True':
        writeToDB(upload_id, tostring(completeVase))
    #print "Content-type: text/html"
    #print
    print tostring(completeVase)
Esempio n. 18
0
 def handle_noargs(self, **options):
     """
     Poll flickr for photos and create save an onject to the database for
     each that isn't currently represented.
     
     Todo: keys and ids from settings
     """
     for account in settings.DJANGR_ACCOUNTS:
     
         flickr = flickrapi.FlickrAPI(settings.DJANGR_APIKEY)
         photos_el = flickr.photos_search(user_id=account['user_id'])
     
         for photo_el in photos_el.findall('photos/photo'):
             
             id = int(photo_el.get('id'))
             secret = photo_el.get('secret')
             
             # Reference existing photo or create new
             try:
                 photo = Photo.objects.get(id=id)
                 print 'Updating photo %s' % id
             except Photo.DoesNotExist:
                 print 'Creating a new photo %s' % id
                 photo = Photo()
                 photo.id = id
                 photo.active = True
                 photo.secret = secret
             
             # Request more information on the photo
             info_el = flickr.photos_getinfo(
                 user_id=account['user_id'],
                 photo_id=id, secret=secret)
             photoinfo_el = info_el.find('photo')
             photo.infoxml = tostring(photoinfo_el)
             
             # Parse xml
             photo.username = photoinfo_el.find('owner').get('username')
             photo.dateuploaded = datetime.fromtimestamp(float(
                 photoinfo_el.get('dateuploaded')))
             datetaken = photoinfo_el.find('dates').get('taken', '')
             photo.date = datetime.strptime(datetaken, '%Y-%m-%d %H:%M:%S')
             photo.description = photoinfo_el.findtext('description')
             photo.title = photoinfo_el.findtext('title')
             photo.farm = int(photoinfo_el.get('farm'))
             photo.server = int(photoinfo_el.get('server'))
             photo.xml = tostring(photo_el)
             photo.owner = photo_el.get('owner')
             
             # Parse optional properties, always delete from object when 
             # none exist - they may be newly removed
             location = photoinfo_el.find('location')
             if location:
                 photo.latitude = location.get('latitude')
                 photo.longitude = location.get('longitude')
             else:
                 photo.longitude = None
                 photo.latitude = None
                 
             photo.save()
def video_notification(request):
    """Receive video completion notifications from encoding.com

    A URL pointing to this view should be sent to encoding.com with transcode
    requests so that the hit this page and trigger EncodedVideo model updates
    """
    logger = logging.getLogger('vod_aws.views.video_notification')

    # Only handle POST requests, otherwise just show a blank page
    if request.method == 'POST':
        try:
            result = fromstring(request.POST['xml'])
        except KeyError:
            logger.error('request POST contained no XML data')
            return HttpResponseBadRequest()
        except ExpatError, e:
            logger.error('request POST xml parse error: %s' % (e.message))
            return HttpResponseBadRequest()

        # At this point, we should have XML that looks like this:
        #=======================================================================
        # <result>
        #    <mediaid>[MediaID]</mediaid>
        #    <source>[SourceFile]</source>
        #    <status>[MediaStatus]</status>
        #    <description>[ ErrorDescription]</description> <!-- Only in case of Status = Error -->
        #    <format>
        #        <output>[OutputFormat]</output>
        #        <destination>[DestFile]</destination> <!-- Only in case of Status = Finished -->
        #        <status>[TaskStatus]</status>
        #        <description>[ErrorDescription]</description> <!-- Only in case of Status = Error -->
        #        <suggestion>[ErrorSuggestion]</suggestion> <!-- Only in case of Status = Error -->
        #    </format>
        #    <format>
        #        ...
        #    </format>
        # </result>
        #
        # Note the lowercase 'mediaid', instead of 'MediaID' as it is elsewhere
        #=======================================================================

        # TODO: Currently this only acts on videos for which we've been notified.
        #  Doing a batch "GetStatus" on the MediaID we've been told about as well as
        #  old jobs that (for whatever reason) we don't know about would be a good idea

        # status can be "Error" or "Finished"
        status = result.findtext('status')
        if status == 'Finished':
            mediaid = result.findtext('mediaid')
            logger.info('Encoding.com job with MediaID %s is finished.' % mediaid)
            vod_aws.tasks.process_video_notification.delay(mediaid)
        elif status == 'Error':
            # Error message is result.find('description').text
            # Log the XML to get more debugging from encoding.com
            logger.error('Encoding.com reported an error: "%s". Enable DEBUG logging for full XML.' % result.findtext('description'))
            logger.debug(tostring(result))
        else:
            logger.error('Unknown status %s from encoding.com. Enable DEBUG logging for full XML.' % status)
            logger.debug(tostring(result))
	def test_himpact3(self):
		root = fromstring(himpact3)
		model = createHumanImpact(root)
		elem = Element('test')
		buildHumanImpact(elem, model)
		rootstring = tostring(root)
		elemstring = tostring(elem[0])
		self.assert_(rootstring == elemstring)
	def test_eimpact3(self):
		root = fromstring(eimpact3)
		model = createEconomicImpact(root)
		elem = Element('test')
		buildEconomicImpact(elem, model)
		rootstring = tostring(root)
		elemstring = tostring(elem[0])
		self.assert_(rootstring == elemstring)
	def test_impact2(self) :
		root = fromstring(impact2)
		model = createImpact(root)
		elem = Element('test')
		buildImpact(elem, model)
		rootstring = tostring(root)
		elemstring = tostring(elem[0])
		self.assert_(rootstring == elemstring)
	def test_crisisInfo3(self):
		root = fromstring(cInfo3)
		model = createCrisisInfo(root)
		elem = Element('test')
		buildCrisisInfo(elem, model)
		rootstring = tostring(root)
		elemstring = tostring(elem[0])
		self.assert_(rootstring == elemstring)
	def test_location3(self):
		root = fromstring(location3)
		model = createLocationInfo(root)
		elem = Element('test')
		buildLocation(elem, model)
		rootstring = tostring(root)
		elemstring = tostring(elem[0])
		self.assert_(rootstring == elemstring)
	def test_link1(self):
		root = fromstring(p_imageLink)
		model = createLink('primaryImage', root)
		elem = Element('primaryImage')
		buildLink(elem, model.site, model.title, model.url, model.description)
		rootstring = tostring(root)
		elemstring = tostring(elem)
		self.assert_(rootstring == elemstring)
	def test_link5(self):
		root = fromstring(extLink1)
		model = createLink('ext', root)
		elem = Element('ext')
		buildLink(elem, model.site, model.title, model.url, model.description)
		rootstring = tostring(root)
		elemstring = tostring(elem)
		self.assert_(rootstring == elemstring)
	def test_crisis4(self):
		root = fromstring(crisis4)
		model = createCrisis(root)
		elem = Element('crisis')
		buildCrisisPage(elem, model)
		rootstring = tostring(root)
		elemstring = tostring(elem)
		self.assert_(rootstring == elemstring)
	def test_external_links4(self):
		root = fromstring(refs4)
		model = createRefLinks(root)
		elem = Element('test')
		buildExternalRefs(elem, model)
		rootstring = tostring(root)
		elemstring = tostring(elem[0])
		self.assert_(rootstring == elemstring)
	def test_date2(self):
		root = fromstring(cDate2)
		model = createDate(root)
		elem = Element('test')
		buildDate(elem, model, 'time')
		rootstring = tostring(root)
		elemstring = tostring(elem[0])
		self.assert_(rootstring == elemstring)
	def test_address3(self):
		root = fromstring(address3)
		model = createFullAddress(root)
		elem = Element('test')
		buildAddress(elem, model)
		rootstring = tostring(root)
		elemstring = tostring(elem[0])
		self.assert_(rootstring == elemstring)
Esempio n. 31
0
def main():
    # For py2exe builds
    freeze_support()

    # Handle SIGINT to terminate processes
    signal.signal(signal.SIGINT, sigint_handler)

    start_time = time()
    #--PLUGINS INITIALIZATION--
    sslyze_plugins = PluginsFinder()
    available_plugins = sslyze_plugins.get_plugins()
    available_commands = sslyze_plugins.get_commands()
    # Create the command line parser and the list of available options
    sslyze_parser = CommandLineParser(available_plugins, PROJECT_VERSION)

    try:  # Parse the command line
        (command_list, target_list,
         shared_settings) = sslyze_parser.parse_command_line()
    except CommandLineParsingError as e:
        print e.get_error_msg()
        return

    if not shared_settings['quiet'] and shared_settings['xml_file'] != '-':
        print '\n\n\n' + _format_title('Available plugins')
        print ''
        for plugin in available_plugins:
            print '  ' + plugin.__name__
        print '\n\n'

    #--PROCESSES INITIALIZATION--
    # Three processes per target from MIN_PROCESSES up to MAX_PROCESSES
    nb_processes = max(MIN_PROCESSES, min(MAX_PROCESSES, len(target_list) * 3))
    if command_list.https_tunnel:
        nb_processes = 1  # Let's not kill the proxy

    task_queue = JoinableQueue()  # Processes get tasks from task_queue and
    result_queue = JoinableQueue(
    )  # put the result of each task in result_queue

    # Spawn a pool of processes, and pass them the queues
    for _ in xrange(nb_processes):
        priority_queue = JoinableQueue()  # Each process gets a priority queue
        p = WorkerProcess(priority_queue, task_queue, result_queue, available_commands, \
                          shared_settings)
        p.start()
        process_list.append(
            (p,
             priority_queue))  # Keep track of each process and priority_queue

    #--TESTING SECTION--
    # Figure out which hosts are up and fill the task queue with work to do
    if not shared_settings['quiet'] and shared_settings['xml_file'] != '-':
        print _format_title('Checking host(s) availability')

    targets_OK = []
    targets_ERR = []

    # Each server gets assigned a priority queue for aggressive commands
    # so that they're never run in parallel against this single server
    cycle_priority_queues = cycle(process_list)
    target_results = ServersConnectivityTester.test_server_list(
        target_list, shared_settings)
    for target in target_results:
        if target is None:
            break  # None is a sentinel here

        # Send tasks to worker processes
        targets_OK.append(target)
        (_, current_priority_queue) = cycle_priority_queues.next()

        for command in available_commands:
            if getattr(command_list, command):
                args = command_list.__dict__[command]

                if command in sslyze_plugins.get_aggressive_commands():
                    # Aggressive commands should not be run in parallel against
                    # a given server so we use the priority queues to prevent this
                    current_priority_queue.put((target, command, args))
                else:
                    # Normal commands get put in the standard/shared queue
                    task_queue.put((target, command, args))

    for exception in target_results:
        targets_ERR.append(exception)

    if not shared_settings['quiet'] and shared_settings['xml_file'] != '-':
        print ServersConnectivityTester.get_printable_result(
            targets_OK, targets_ERR)
        print '\n\n'

    # Put a 'None' sentinel in the queue to let the each process know when every
    # task has been completed
    for (proc, priority_queue) in process_list:
        task_queue.put(None)  # One sentinel in the task_queue per proc
        priority_queue.put(None)  # One sentinel in each priority_queue

    # Keep track of how many tasks have to be performed for each target
    task_num = 0
    for command in available_commands:
        if getattr(command_list, command):
            task_num += 1

    # --REPORTING SECTION--
    processes_running = nb_processes

    # XML output
    xml_output_list = []

    # Each host has a list of results
    result_dict = {}
    for target in targets_OK:
        result_dict[target] = []

    # If all processes have stopped, all the work is done
    while processes_running:
        result = result_queue.get()

        if result is None:  # Getting None means that one process was done
            processes_running -= 1

        else:  # Getting an actual result
            (target, command, plugin_result) = result
            result_dict[target].append((command, plugin_result))

            if len(result_dict[target]) == task_num:  # Done with this target
                # Print the results and update the xml doc
                if shared_settings['xml_file']:
                    xml_output_list.append(
                        _format_xml_target_result(target, result_dict[target]))
                    if not shared_settings[
                            'quiet'] and shared_settings['xml_file'] != '-':
                        print _format_txt_target_result(
                            target, result_dict[target])
                else:
                    print _format_txt_target_result(target,
                                                    result_dict[target])

        result_queue.task_done()

    # --TERMINATE--

    # Make sure all the processes had time to terminate
    task_queue.join()
    result_queue.join()
    #[process.join() for process in process_list] # Causes interpreter shutdown errors
    exec_time = time() - start_time

    # Output XML doc to a file if needed
    if shared_settings['xml_file']:
        result_xml_attr = {
            'httpsTunnel': str(shared_settings['https_tunnel_host']),
            'totalScanTime': str(exec_time),
            'defaultTimeout': str(shared_settings['timeout']),
            'startTLS': str(shared_settings['starttls'])
        }

        result_xml = Element('results', attrib=result_xml_attr)

        # Sort results in alphabetical order to make the XML files (somewhat) diff-able
        xml_output_list.sort(key=lambda xml_elem: xml_elem.attrib['host'])
        for xml_element in xml_output_list:
            result_xml.append(xml_element)

        xml_final_doc = Element('document',
                                title="SSLyze Scan Results",
                                SSLyzeVersion=PROJECT_VERSION,
                                SSLyzeWeb=PROJECT_URL)
        # Add the list of invalid targets
        xml_final_doc.append(
            ServersConnectivityTester.get_xml_result(targets_ERR))
        # Add the output of the plugins
        xml_final_doc.append(result_xml)

        # Remove characters that are illegal for XML
        # https://lsimons.wordpress.com/2011/03/17/stripping-illegal-characters-out-of-xml-in-python/
        xml_final_string = tostring(xml_final_doc, encoding='UTF-8')
        illegal_xml_chars_RE = re.compile(
            u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
        xml_sanitized_final_string = illegal_xml_chars_RE.sub(
            '', xml_final_string)

        # Hack: Prettify the XML file so it's (somewhat) diff-able
        xml_final_pretty = minidom.parseString(
            xml_sanitized_final_string).toprettyxml(indent="  ",
                                                    encoding="utf-8")

        if shared_settings['xml_file'] == '-':
            # Print XML output to the console if needed
            print xml_final_pretty
        else:
            # Otherwise save the XML output to the console
            with open(shared_settings['xml_file'], 'w') as xml_file:
                xml_file.write(xml_final_pretty)

    if not shared_settings['quiet'] and shared_settings['xml_file'] != '-':
        print _format_title('Scan Completed in {0:.2f} s'.format(exec_time))
Esempio n. 32
0
File: figs.py Progetto: euske/slides
 def tostring(self):
     return tostring(self.toxml())
 def __str__(self):
     return tostring(self.xml(), encoding='utf-8')
Esempio n. 34
0
def create_annotation(xml_file, from_fasst):
    """Create annotations by importing from FASST sleep scoring file.

    Parameters
    ----------
    xml_file : path to xml file
        annotation file that will be created
    from_fasst : path to FASST file
        .mat file containing the scores

    Returns
    -------
    instance of Annotations

    TODO
    ----
    Merge create_annotation and create_empty_annotations
    """
    xml_file = Path(xml_file)
    try:
        mat = loadmat(str(from_fasst),
                      variable_names='D',
                      struct_as_record=False,
                      squeeze_me=True)
    except ValueError:
        raise UnrecognizedFormat(
            str(from_fasst) + ' does not look like a FASST .mat file')

    D = mat['D']
    info = D.other.info
    score = D.other.CRC.score

    microsecond, second = modf(info.hour[2])
    start_time = datetime(*info.date, int(info.hour[0]), int(info.hour[1]),
                          int(second), int(microsecond * 1e6))
    first_sec = score[3, 0][0]
    last_sec = score[0, 0].shape[0] * score[2, 0]

    root = Element('annotations')
    root.set('version', VERSION)

    info = SubElement(root, 'dataset')
    x = SubElement(info, 'filename')
    x.text = D.other.info.fname
    x = SubElement(info, 'path')  # not to be relied on
    x.text = D.other.info.fname
    x = SubElement(info, 'start_time')
    x.text = start_time.isoformat()

    x = SubElement(info, 'first_second')
    x.text = str(int(first_sec))
    x = SubElement(info, 'last_second')
    x.text = str(int(last_sec))

    xml = parseString(tostring(root))
    with xml_file.open('w') as f:
        f.write(xml.toxml())

    annot = Annotations(xml_file)

    n_raters = score.shape[1]
    for i_rater in range(n_raters):
        rater_name = score[1, i_rater]
        epoch_length = int(score[2, i_rater])
        annot.add_rater(rater_name, epoch_length=epoch_length)

        for epoch_start, epoch in enumerate(score[0, i_rater]):
            if isnan(epoch):
                continue
            annot.set_stage_for_epoch(epoch_start * epoch_length,
                                      FASST_STAGE_KEY[int(epoch)],
                                      save=False)

    annot.save()

    return annot
Esempio n. 35
0
    def to_svg(self, file=None, canvas_shape=None):
        """Convert the current layer state to an SVG.

        Parameters
        ----------
        file : path-like object, optional
            An object representing a file system path. A path-like object is
            either a str or bytes object representing a path, or an object
            implementing the `os.PathLike` protocol. If passed the svg will be
            written to this file
        canvas_shape : 4-tuple, optional
            View box of SVG canvas to be generated specified as `min-x`,
            `min-y`, `width` and `height`. If not specified, calculated
            from the last two dimensions of the layer.

        Returns
        ----------
        svg : string
            SVG representation of the layer.
        """

        if canvas_shape is None:
            min_shape = [r[0] for r in self.dims.range[-2:]]
            max_shape = [r[1] for r in self.dims.range[-2:]]
            shape = np.subtract(max_shape, min_shape)
        else:
            shape = canvas_shape[2:]
            min_shape = canvas_shape[:2]

        props = {
            'xmlns': 'http://www.w3.org/2000/svg',
            'xmlns:xlink': 'http://www.w3.org/1999/xlink',
        }

        xml = Element(
            'svg',
            height=f'{shape[0]}',
            width=f'{shape[1]}',
            version='1.1',
            **props,
        )

        transform = f'translate({-min_shape[1]} {-min_shape[0]})'
        xml_transform = Element('g', transform=transform)

        xml_list = self.to_xml_list()
        for x in xml_list:
            xml_transform.append(x)
        xml.append(xml_transform)

        svg = ('<?xml version=\"1.0\" standalone=\"no\"?>\n' +
               '<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n' +
               '\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n' +
               tostring(xml, encoding='unicode', method='xml'))

        if file:
            # Save svg to file
            with open(file, 'w') as f:
                f.write(svg)

        return svg
Esempio n. 36
0
def main():
    # Load maintainer mapping (imported from dist-git).
    # TODO: check that mapping exist / error.
    print("Loading maintainer mapping...")
    with open(SCM_MAINTAINER_MAPPING) as raw:
        maintainer_mapping = json.load(raw)

    # Group databases files.
    databases = {}
    db_pattern = re.compile('^(fedora|epel)-([\w|-]+)_(primary|filelists|other).sqlite$')
    for db in os.listdir(DBS_DIR):
        if (not db_pattern.match(db)):
            sys.exit("Invalid object in {}: {}".format(DBS_DIR, db))

        (product, branch, db_type) = db_pattern.findall(db)[0]
        release_branch = "{}-{}".format(product, branch)
        if release_branch in databases:
            databases[release_branch][db_type] = db
        else:
            databases[release_branch] = { db_type: db }

    # Build internal package metadata structure / cache.
    packages = {}
    srpm_pattern = re.compile("^(.+)-.+-.+.src.rpm$")
    changelog_mail_pattern = re.compile("<(.+@.+)>")
    release_branch_pattern = re.compile("^([fedora|epel]+-[\w|\d]+)-?([a-z|-]+)?$")
    for release_branch in databases.keys():
        print("> Processing database files for {}.".format(release_branch))

        for db_type in ["primary", "filelists", "other"]:
            if db_type not in databases[release_branch]:
                sys.exit("No {} database for {}.".format(db_type, release_branch))

        (_, primary) = open_db(databases[release_branch]["primary"])
        (_, filelist) = open_db(databases[release_branch]["filelists"])
        (_, other) = open_db(databases[release_branch]["other"])

        for raw in primary.execute('SELECT * FROM packages'):
            pkg = packages.get(raw["name"])
            revision = "{}-{}".format(raw["version"], raw["release"])
            first_pkg_encounter = False

            # Register unknown packages.
            if pkg == None:
                pkg = Package(raw["name"])
                packages[pkg.name] = pkg
                first_pkg_encounter = True

            # Override package metadata with rawhide (= lastest) values.
            if first_pkg_encounter or release_branch == "rawhide":
                pkg.summary = raw["summary"]
                pkg.description = raw["description"]
                pkg.upstream = raw["url"]
                pkg.license = raw["rpm_license"]
                pkg.maintainers = maintainer_mapping["rpms"].get(pkg.name, [])

            # Handle subpackage specific case.
            (srpm_name) = srpm_pattern.findall(raw["rpm_sourcerpm"])[0]
            if pkg.name != srpm_name:
                pkg.subpackage_of = srpm_name

            # XXX: we do not resolve files and changelog here because storing
            # them in the packages hash would require multiple GBs of RAM
            # (roughly 1GB per repository).

            # Always register branch-specific metadata.
            (release, branch) = release_branch_pattern.findall(release_branch)[0]
            if branch == "":
                branch = "base"

            pkg.set_release(release, raw["pkgKey"], branch, revision)

    # Set license and maintainers for subpackages. We have to wait for all
    # packages to have been processed since subpackage might have been
    # processed before its parent.
    print(">> Handling subpackages...")
    for pkg in packages.values():
        if pkg.subpackage_of != None:
            parent = packages.get(pkg.subpackage_of)
            if parent != None:
                parent.subpackages += [pkg.name]
                pkg.maintainers = packages[pkg.subpackage_of].maintainers

    print(">>> {} packages have been extracted.".format(len(packages)))

    print("Sending data to Solr index...")

    pkg_xml = Element('add')
    pkg_count = 0
    max_pkg_count = len(packages)

    # Submit packages to Solr index, 500 at a time.
    for pkg in packages.values():
        pkg_el = Element('doc')

        pkg_el_name = Element('field', { "name": "id" })
        pkg_el_name.text = pkg.name
        pkg_el.append(pkg_el_name)

        pkg_el_desc = Element('field', { "name": "description" })
        pkg_el_desc.text = pkg.description
        pkg_el.append(pkg_el_desc)

        pkg_xml.append(pkg_el)

        pkg_count += 1
        if (pkg_count % 500 == 0 or pkg_count == max_pkg_count):
            requests.post(f"{SOLR_URL}solr/{SOLR_CORE}/update?commit={str(pkg_count == max_pkg_count).lower()}", data=tostring(pkg_xml), headers={'Content-Type': 'application/xml'})
            pkg_xml.clear()
            # print("Submitted {}/{} packages.".format(pkg_count, max_pkg_count))

    print("DONE.")
    print("> {} packages submitted to solr.".format(len(packages)))
Esempio n. 37
0
#! /usr/bin/env python
#-*- coding:utf-8 -*-
# version : Python 2.7.13

from xml.etree.ElementTree import Element
from xml.etree.ElementTree import tostring


def dict_to_xml(tag, d):
    elem = Element(tag)
    for key, val in d.items():
        child = Element(key)
        child.text = str(val)
        elem.append(child)
    return elem


d1 = {'id': 1, 'name': 'test1'}
e = dict_to_xml('student', d1)

print tostring(e)
Esempio n. 38
0
 def __str__(self):
     if self.prettyprint:
         self.indent(self.xml)
     s = tostring(self.xml).decode(self.encoding)
     return s
Esempio n. 39
0
# -*- coding: utf-8 -*-import itertoolsimport osimport plistlibimport unicodedataimport sys
from xml.etree.ElementTree import Element, SubElement, tostring
"""You should run your script via /bin/bash with all escape options ticked.The command line should bepython yourscript.py "{query}" arg2 arg3 ..."""UNESCAPE_CHARACTERS = u""" ;()"""
_MAX_RESULTS_DEFAULT = 9
preferences = plistlib.readPlist('info.plist')bundleid = preferences['bundleid']
class Item(object): @classmethod def unicode(cls, value): try:            items = iter(value.items()) except AttributeError: return unicode(value) else: return dict(map(unicode, item) for item in items)
 def __init__(self, attributes, title, subtitle, icon=None): self.attributes = attributes self.title = title self.subtitle = subtitle self.icon = icon
 def __str__(self): return tostring(self.xml()).decode('utf-8')
 def xml(self):        item = Element(u'item', self.unicode(self.attributes)) for attribute in (u'title', u'subtitle', u'icon'):            value = getattr(self, attribute) if value is None: continue if len(value) == 2 and isinstance(value[1], dict):                (value, attributes) = value else:                attributes = {}            SubElement(item, attribute, self.unicode(attributes)).text = self.unicode(value) return item
def args(characters=None): return tuple(unescape(decode(arg), characters) for arg in sys.argv[1:])
def config(): return _create('config')
def decode(s): return unicodedata.normalize('NFD', s.decode('utf-8'))
def uid(uid): return u'-'.join(map(str, (bundleid, uid)))
def unescape(query, characters=None): for character in (UNESCAPE_CHARACTERS if (characters is None) else characters):        query = query.replace('\\%s' % character, character) return query
def work(volatile):    path = { True: '~/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data', False: '~/Library/Application Support/Alfred 2/Workflow Data'    }[bool(volatile)] return _create(os.path.join(os.path.expanduser(path), bundleid))
def write(text):    sys.stdout.write(text)
def xml(items, maxresults=_MAX_RESULTS_DEFAULT):    root = Element('items') for item in itertools.islice(items, maxresults):        root.append(item.xml()) return tostring(root, encoding='utf-8')
def _create(path): if not os.path.isdir(path):        os.mkdir(path) if not os.access(path, os.W_OK): raise IOError('No write access: %s' % path) return path
Esempio n. 40
0
def extract_proxy_info(tr_el):
    """
    Yields an (ip:port, proxy type, anon level) tuple
    from the provided table row element.

    :type tr_el: xml.etree.ElementTree.Element
    :rtype: (unicode, unicode, unicode)
    """
    port = None
    kind = None
    level = None

    for td_num, td_el in enumerate(tr_el):
        if not td_el.text:
            continue
        if re.match(r'(?m)\s*\d+\s*', td_el.text):
            if port is not None:
                raise Exception('Cannot cope with multiple ports: port=%s text=<<%s>>'
                                % (port, td_el.text))
            port = int(re.sub(r'\s*', '', td_el.text))
        elif re.match(r'HTTPS?|SOCKS[45]', td_el.text):
            kind = td_el.text
        elif 7 == td_num:  # re.match(r'(?i)low|high')
            level = td_el.text

    if port is None:
        raise ValueError('Expected port, found None')

    # we have to grab the style's parent because ElementTree
    # doesn't permit .parentNode and "dom" doesn't have .findall :-/
    #: :type: list[xml.etree.ElementTree.Element]
    style_parent_nl = tr_el.findall('.//*[style]')
    if not style_parent_nl:
        LOG.error('Expected "style" parent, found None in %s', tostring(tr_el))
        return
    if 1 != len(style_parent_nl):
        LOG.warning('Expected only one style parent, found %d of them',
                    len(style_parent_nl))
    style_parent = style_parent_nl[0]

    #: :type: xml.etree.ElementTree.Element
    sty = style_parent.find('.//style')
    if sty is None:
        LOG.warning('Expected "style" child, found None: %s',
                    tostring(style_parent))
        return
    bad_classes = find_display_none_classes(sty.text)
    LOG.debug('style[%s] -= %s', sty.text, bad_classes)

    #: :type: list[unicode]
    parts = []

    for ip_el in style_parent:
        LOG.debug('IP_EL=((%s))', tostring(ip_el))
        if 'style' == ip_el.tag:
            # don't continue or you'll eat the .tail text
            # which very well could contain a number or dot
            pass
        elif 'class' in ip_el.attrib:
            sp_class = ip_el.attrib['class']
            css = []
            if sp_class:
                css = sp_class.split(' ')
            ok = reduce(lambda a, b: a and b,
                        [x not in bad_classes for x in css],
                        True)
            if ok:
                LOG.debug('#class(%s)=%s', css, ip_el.text)
                parts.append(ip_el.text)
        elif 'style' in ip_el.attrib:
            st = ip_el.attrib['style']
            if 'x' not in find_display_none_classes('.x{%s}' % st):
                LOG.debug('#style(%s)=%s', st, ip_el.text)
                parts.append(ip_el.text)
        else:
            if ip_el.text:
                LOG.warning('??=<<%s>>' % ip_el.text)
        if ip_el.tail:
            parts.append(ip_el.tail)

    if not parts:
        LOG.warning('Your TD contained no IP parts')
        return

    ip_addr = u''.join(parts)
    if not re.match(r'\d+\.\d+\.\d+\.\d+', ip_addr):
        raise ValueError('That does not appear to be an IP: %s' % ip_addr)
    ip_port = '%s:%d' % (ip_addr, port)
    LOG.debug('ip=%s', ip_port)
    return ip_port, kind, level
Esempio n. 41
0
 def _dump_tree(self, filename='/Users/rsenseman/Desktop/dump.xml'):
     with open(filename, 'w') as f:
         f.write(tostring(self.tree.getroot(), encoding='utf8').decode('utf8'))
     return None
Esempio n. 42
0
#random number function
def rn1():
    random.seed(int(datetime.now().strftime("%f")) * 1000)
    rn1 = random.random() * 1000000000
    return int(rn1)


#ID input from user
def getID():
    ID = str(input("\nEnter ID: \n"))
    ID = ID.upper()
    return "ftp://*****:*****@10.10.70.200/UMS/VOD/" + ID + "/" + ID + ".xml"


#WSDL URL
client = Client('http://10.226.187.29:5954/bme/services/AMSService?WSDL')

#RAW SOAP message check
messagexml = client.create_message(client.service,
                                   'notify',
                                   notifyRequest={
                                       'MD5CcheckSum': "",
                                       'NO': rn1(),
                                       'URL': getID()
                                   })
xml1 = tostring(messagexml, encoding="unicode")
print(xml1)

#POST SOAP message
#print(client.service.notify(notifyRequest={'MD5CcheckSum':"", 'NO':rn1(), 'URL':getID()"}))
Esempio n. 43
0
        'SSID_end':     header.find('SSID')+4,
        'BSSID_beg':    header.find('BSSID'),
        'BSSID_end':    header.find('RSSI')-1,
        'RSSI_beg':     header.find('RSSI'),
        'RSSI_end':     header.find('CHANNEL')-1,
        'SECURITY_beg': header.find('SECURITY')
    }

output_results = []

for item in cmd_results:
    output_result = {}
    output_result['SSID']        =      item[0:formatter['SSID_end']].strip()
    output_result['BSSID']       =      item[formatter['BSSID_beg']:formatter['BSSID_end']].strip()
    output_result['RSSI']        =      item[formatter['RSSI_beg']:formatter['RSSI_end']].strip()
    output_result['SECURITY']    =      item[formatter['SECURITY_beg']:].strip()
    output_results.append(output_result)

#Generate XML
items = Element('items')
for result in output_results:
    item = SubElement(items, 'item', {'autocomplete':result['SSID'], 'uid':result['BSSID'], 'arg':result['SSID']})
    title = SubElement(item, 'title')
    title.text = result['SSID']
    subtitle = SubElement(item, 'subtitle')
    subtitle.text = 'RSSI:'+result['RSSI']+'\t\t\t\t'+'Security:'+result['SECURITY']
    icon = SubElement(item, 'icon')
    icon.text = 'icon.png'

print tostring(items)
Esempio n. 44
0
 def print_xml(self):
     '''
     print string version of the elements
     '''
     print tostring(self._xml_root)
Esempio n. 45
0
def dict_to_xml(tag, d):
    '''
    Turn a simple dict of key/value pairs into XML
    '''
    elem = Element(tag)
    for key, val in d.items():
        child = Element(key)
        child.text = str(val)
        elem.append(child)
    return elem


# An example
s = {'name': 'GOOG', 'shares': 100, 'price': 490.1}
e = dict_to_xml('stock', s)
print(e)

print()

print(tostring(e))

print()

e.set('_id', '1234')
print(tostring(e))

print()

print(escape('<spam>'))
print(unescape(_))
Esempio n. 46
0
def _read_option(section_name, opt):
    """
    Reads an option (inside a section) and returns the name and the value.
    """

    opt_name = opt.tag.lower()

    if section_name == 'open-scap':
        if opt.attrib:
            opt_value = {}
            for a in opt.attrib:
                opt_value[a] = opt.attrib[a]
            # profiles
            profiles_list = []
            for profiles in opt.iter():
                profiles_list.append(profiles.text)

            if profiles_list:
                opt_value['profiles'] = profiles_list
        else:
            opt_value = opt.text
    elif section_name == 'syscheck' and opt_name == 'directories':
        opt_value = []

        json_attribs = {}
        for a in opt.attrib:
            json_attribs[a] = opt.attrib[a]

        if opt.text:
            for path in opt.text.split(','):
                json_path = json_attribs.copy()
                json_path['path'] = path.strip()
                opt_value.append(json_path)
    elif section_name == 'syscheck' and opt_name in ('synchronization',
                                                     'whodata'):
        opt_value = {}
        for child in opt:
            child_section, child_config = _read_option(child.tag.lower(),
                                                       child)
            opt_value[child_section] = child_config.split(
                ',') if child_config.find(',') > 0 else child_config
    elif (section_name == 'cluster' and opt_name == 'nodes') or \
            (section_name == 'sca' and opt_name == 'policies'):
        opt_value = [child.text for child in opt]
    elif section_name == 'labels' and opt_name == 'label':
        opt_value = {'value': opt.text}
        for a in opt.attrib:
            opt_value[a] = opt.attrib[a]
    elif section_name == 'localfile' and opt_name == 'query':
        # Remove new lines, empty spaces and backslashes
        regex = rf'<{opt_name}>(.*)</{opt_name}>'
        opt_value = re.match(
            regex,
            re.sub(
                '(?:(\n) +)', '',
                tostring(opt, encoding='unicode').replace('\\<', '<').replace(
                    '\\>', '>')).strip()).group(1)
    elif section_name == 'remote' and opt_name == 'protocol':
        opt_value = [elem.strip() for elem in opt.text.split(',')]
    else:
        if opt.attrib:
            opt_value = {}
            for a in opt.attrib:
                opt_value[a] = opt.attrib[a]
            if list(opt):
                for child in opt:
                    child_section, child_config = _read_option(
                        child.tag.lower(), child)
                    opt_value[child_section] = child_config
            else:
                opt_value['item'] = opt.text
        else:
            opt_value = opt.text

    return opt_name, _replace_custom_values(opt_value)
Esempio n. 47
0
def _prettify(elem):
    """Return a pretty-printed XML string for the Element."""
    raw_string = tostring(elem, 'utf-8')
    reparsed = minidom.parseString(raw_string)
    return reparsed.toprettyxml(indent="  ")
Esempio n. 48
0
def main():
    # For py2exe builds
    freeze_support()

    # Handle SIGINT to terminate processes
    signal.signal(signal.SIGINT, sigint_handler)

    start_time = time()
    #--PLUGINS INITIALIZATION--
    sslyze_plugins = PluginsFinder()
    available_plugins = sslyze_plugins.get_plugins()
    available_commands = sslyze_plugins.get_commands()

    # Create the command line parser and the list of available options
    sslyze_parser = CommandLineParser(available_plugins, __version__)

    online_servers_list = []
    invalid_servers_list = []

    # Parse the command line
    try:
        good_server_list, bad_server_list, args_command_list = sslyze_parser.parse_command_line()
        invalid_servers_list.extend(bad_server_list)
    except CommandLineParsingError as e:
        print e.get_error_msg()
        return

    should_print_text_results = not args_command_list.quiet and args_command_list.xml_file != '-'  \
        and args_command_list.json_file != '-'
    if should_print_text_results:
        print '\n\n\n' + _format_title('Available plugins')
        for plugin in available_plugins:
            print '  ' + plugin.__name__
        print '\n\n'


    #--PROCESSES INITIALIZATION--
    if args_command_list.https_tunnel:
        # Maximum one process to not kill the proxy
        plugins_process_pool = PluginsProcessPool(sslyze_plugins, args_command_list.nb_retries,
                                                  args_command_list.timeout, max_processes_nb=1)
    else:
        plugins_process_pool = PluginsProcessPool(sslyze_plugins, args_command_list.nb_retries,
                                                  args_command_list.timeout)

    #--TESTING SECTION--
    # Figure out which hosts are up and fill the task queue with work to do
    if should_print_text_results:
        print _format_title('Checking host(s) availability')

    connectivity_tester = ServersConnectivityTester(good_server_list)
    connectivity_tester.start_connectivity_testing(network_timeout=args_command_list.timeout)

    SERVER_OK_FORMAT = u'   {host}:{port:<25} => {ip_address} {client_auth_msg}'
    SERVER_INVALID_FORMAT = u'   {server_string:<35} => WARNING: {error_msg}; discarding corresponding tasks.'

    # Store and print servers we were able to connect to
    for server_connectivity_info in connectivity_tester.get_reachable_servers():
        online_servers_list.append(server_connectivity_info)
        if should_print_text_results:
            client_auth_msg = ''
            client_auth_requirement = server_connectivity_info.client_auth_requirement
            if client_auth_requirement == ClientAuthenticationServerConfigurationEnum.REQUIRED:
                client_auth_msg = '  WARNING: Server REQUIRED client authentication, specific plugins will fail.'
            elif client_auth_requirement == ClientAuthenticationServerConfigurationEnum.OPTIONAL:
                client_auth_msg = '  WARNING: Server requested optional client authentication'

            print SERVER_OK_FORMAT.format(host=server_connectivity_info.hostname, port=server_connectivity_info.port,
                                          ip_address=server_connectivity_info.ip_address,
                                          client_auth_msg=client_auth_msg)

        # Send tasks to worker processes
        for plugin_command in available_commands:
            if getattr(args_command_list, plugin_command):
                # Get this plugin's options if there's any
                plugin_options_dict = {}
                for option in available_commands[plugin_command].get_interface().get_options():
                    # Was this option set ?
                    if getattr(args_command_list,option.dest):
                        plugin_options_dict[option.dest] = getattr(args_command_list, option.dest)

                plugins_process_pool.queue_plugin_task(server_connectivity_info, plugin_command, plugin_options_dict)


    for tentative_server_info, exception in connectivity_tester.get_invalid_servers():
        invalid_servers_list.append((tentative_server_info.server_string, exception))


    # Print servers we were NOT able to connect to
    if should_print_text_results:
        for server_string, exception in invalid_servers_list:
            if isinstance(exception, ServerConnectivityError):
                print SERVER_INVALID_FORMAT.format(server_string=server_string, error_msg=exception.error_msg)
            else:
                # Unexpected bug in SSLyze
                raise exception
        print '\n\n'

    # Keep track of how many tasks have to be performed for each target
    task_num = 0
    for command in available_commands:
        if getattr(args_command_list, command):
            task_num += 1


    # --REPORTING SECTION--
    # XML output
    xml_output_list = []

    # Each host has a list of results
    result_dict = {}
    # We cannot use the server_info object directly as its address will change due to multiprocessing
    RESULT_KEY_FORMAT = u'{hostname}:{ip_address}:{port}'.format
    for server_info in online_servers_list:
        result_dict[RESULT_KEY_FORMAT(hostname=server_info.hostname, ip_address=server_info.ip_address,
                                      port=server_info.port)] = []

    # Process the results as they come
    for plugin_result in plugins_process_pool.get_results():
        server_info = plugin_result.server_info
        result_dict[RESULT_KEY_FORMAT(hostname=server_info.hostname, ip_address=server_info.ip_address,
                                      port=server_info.port)].append(plugin_result)

        result_list = result_dict[RESULT_KEY_FORMAT(hostname=server_info.hostname, ip_address=server_info.ip_address,
                                                    port=server_info.port)]

        if len(result_list) == task_num:
            # Done with this server; print the results and update the xml doc
            if args_command_list.xml_file:
                xml_output_list.append(_format_xml_target_result(server_info, result_list))

            if should_print_text_results:
                print _format_txt_target_result(server_info, result_list)


    # --TERMINATE--
    exec_time = time()-start_time

    # Output JSON to a file if needed
    if args_command_list.json_file:
        json_output = {'total_scan_time': str(exec_time),
                       'network_timeout': str(args_command_list.timeout),
                       'network_max_retries': str(args_command_list.nb_retries),
                       'invalid_targets': [],
                       'accepted_targets': []}

        # Add the list of invalid targets
        for server_string, exception in invalid_servers_list:
            if isinstance(exception, ServerConnectivityError):
                json_output['invalid_targets'].append({server_string: exception.error_msg})
            else:
                # Unexpected bug in SSLyze
                raise exception

        # Add the output of the plugins for each server
        for host_str, plugin_result_list in result_dict.iteritems():
            server_info = plugin_result_list[0].server_info
            json_output['accepted_targets'].append(_format_json_result(server_info, plugin_result_list))

        final_json_output = json.dumps(json_output, default=_object_to_json_dict, sort_keys=True, indent=4)
        if args_command_list.json_file == '-':
            # Print XML output to the console if needed
            print final_json_output
        else:
            # Otherwise save the XML output to the console
            with open(args_command_list.json_file, 'w') as json_file:
                json_file.write(final_json_output)


    # Output XML doc to a file if needed
    if args_command_list.xml_file:
        result_xml_attr = {'totalScanTime': str(exec_time),
                           'networkTimeout': str(args_command_list.timeout),
                           'networkMaxRetries': str(args_command_list.nb_retries)}
        result_xml = Element('results', attrib = result_xml_attr)

        # Sort results in alphabetical order to make the XML files (somewhat) diff-able
        xml_output_list.sort(key=lambda xml_elem: xml_elem.attrib['host'])
        for xml_element in xml_output_list:
            result_xml.append(xml_element)

        xml_final_doc = Element('document', title="SSLyze Scan Results", SSLyzeVersion=__version__,
                                SSLyzeWeb=PROJECT_URL)

        # Add the list of invalid targets
        invalid_targets_xml = Element('invalidTargets')
        for server_string, exception in invalid_servers_list:
            if isinstance(exception, ServerConnectivityError):
                error_xml = Element('invalidTarget', error=exception.error_msg)
                error_xml.text = server_string
                invalid_targets_xml.append(error_xml)
            else:
                # Unexpected bug in SSLyze
                raise exception
        xml_final_doc.append(invalid_targets_xml)

        # Add the output of the plugins
        xml_final_doc.append(result_xml)

        # Remove characters that are illegal for XML
        # https://lsimons.wordpress.com/2011/03/17/stripping-illegal-characters-out-of-xml-in-python/
        xml_final_string = tostring(xml_final_doc, encoding='UTF-8')
        illegal_xml_chars_RE = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
        xml_sanitized_final_string = illegal_xml_chars_RE.sub('', xml_final_string)

        # Hack: Prettify the XML file so it's (somewhat) diff-able
        xml_final_pretty = minidom.parseString(xml_sanitized_final_string).toprettyxml(indent="  ", encoding="utf-8" )

        if args_command_list.xml_file == '-':
            # Print XML output to the console if needed
            print xml_final_pretty
        else:
            # Otherwise save the XML output to the console
            with open(args_command_list.xml_file, 'w') as xml_file:
                xml_file.write(xml_final_pretty)


    if should_print_text_results:
        print _format_title('Scan Completed in {0:.2f} s'.format(exec_time))
Esempio n. 49
0
 def tostring(self, *args, **kwargs):
     return tostring(self)
Esempio n. 50
0
import xml.etree.ElementTree
from xml.etree import ElementTree as et
from xml.etree.ElementTree import fromstring, tostring

path = '/home/pfb16181/NetBeansProjects/extractTitleAbstractPubMedData_generateWordDoc2vec/data/pubmed_filter_2017_2018_2019/'
temp_list = []
for filename in os.listdir(path):
    print('parsing '+filename)
    temp_str = ''
    if not filename.endswith('.xml'): continue
    fullname = os.path.join(path, filename)

    et = xml.etree.ElementTree.parse(fullname)
    
    tree = et.parse(fullname)
    string_tree = tostring(tree)
    n_string_tree = str(string_tree, 'utf-8')
    root = xml.etree.ElementTree.fromstring(n_string_tree)
    
    try:
        temp_str += et.find('.//ArticleTitle').text
        temp = root.findall('.//AbstractText')
        for j in temp:
            temp_str += j.text
    except:
        print('error')
        pass

    text_file = open("/home/pfb16181/NetBeansProjects/extractTitleAbstractPubMedData_generateWordDoc2vec/parsed_data/parsed_pubmed_filter_2017_2018_2019/"+
                                                                                                           "".join(filename.partition('.xml')[0].split())+".txt", "w")
    text_file.write(temp_str)
Esempio n. 51
0
 def __str__(self): return tostring(self.xml()).decode('utf-8')
 def xml(self):        item = Element(u'item', self.unicode(self.attributes)) for attribute in (u'title', u'subtitle', u'icon'):            value = getattr(self, attribute) if value is None: continue if len(value) == 2 and isinstance(value[1], dict):                (value, attributes) = value else:                attributes = {}            SubElement(item, attribute, self.unicode(attributes)).text = self.unicode(value) return item
Esempio n. 52
0
    '''
    Turn a simple dict of key/value pairs into XML
    '''
    elem = Element(tag)
    for key, val in d.items():
        child = Element(key)
        child.text = str(val)
        elem.append(child)
    return elem

s = { 'name': 'GOOG', 'shares': 100, 'price':490.1 }
e = dict_to_xml('stock', s)
e

from xml.etree.ElementTree import tostring
tostring(e)

e.set('_id','1234')
tostring(e)

def dict_to_xml_str(tag, d):
    '''
    Turn a simple dict of key/value pairs into XML
    '''
    parts = ['<{}>'.format(tag)]
    for key, val in d.items():
        parts.append('<{0}>{1}</{0}>'.format(key,val))
    parts.append('</{}>'.format(tag))
    return ''.join(parts)

d = { 'name' : '<spam>' }
Esempio n. 53
0
def populate05(inputFile):
    #cleaning XML file
    try:
        f = open(inputFile, 'r')
    except Exception as e:
        return
    #mdb.connect('localhost', 'username', 'password', 'database');
    con = mdb.connect('localhost', 'root', 'password', 'Patents')

    with con:
        cur = con.cursor()
        cur.execute(
            "CREATE TABLE IF NOT EXISTS USPatents(patentNumber INT, abstract TEXT, inventor TEXT, \
            currentUSClass TEXT, primaryExaminer TEXT, assistantExaminer TEXT, attorney TEXT, claims MEDIUMTEXT, description MEDIUMTEXT)"
        )

        cur.execute(
            "CREATE TABLE IF NOT EXISTS InternationalClass(patentNumber VARCHAR(8), interClass TEXT)"
        )

        cur.execute("CREATE TABLE IF NOT EXISTS \
             FurtherUSClass(patentNumber INT, furtherUSClass TEXT)")

        cur.execute("CREATE TABLE IF NOT EXISTS \
             FieldOfResearch(patentNumber INT, class TEXT)")

        cur.execute("CREATE TABLE IF NOT EXISTS \
            PublicationReference(patentNumber INT, country TEXT, kind TEXT, name TEXT, date BIGINT)"
                    )

        cur.execute("CREATE TABLE IF NOT EXISTS \
            ApplicationReference(patentNumber INT, docNumber TEXT, country TEXT, date BIGINT)"
                    )

        cur.execute("CREATE TABLE IF NOT EXISTS \
            ReferPatcit(patentNumber INT, docNumber TEXT, country TEXT, kind TEXT, category TEXT, name TEXT, date BIGINT)"
                    )

        cur.execute("CREATE TABLE IF NOT EXISTS \
            ReferNplcit(patentNumber INT, value TEXT, category TEXT)")

    xmld = '<TOP>\n'
    codecs.register_error('spacer', lambda ex: (u' ', ex.start + 1))
    for _t in f:
        _t = _t.replace("&", "")
        _t = _t.decode('utf8', 'spacer')
        if _t.find('<?xml') == -1 and _t.find('<!DOCTYPE') == -1:
            xmld = xmld + _t
        try:
            if _t.find('</us-patent-grant>') > -1:
                #print _t
                xmld = xmld + '</TOP>'
                g = open("tmp", "w")
                g.write(xmld)
                g.close()
                xmld = '<TOP>\n'

                #start parsing
                doc = ElementTree()
                doc.parse("tmp")
                root = doc.getroot()

                #loop for main patent
                for n in root.iter("us-patent-grant"):
                    #patent number
                    patentNumber = 0
                    #appplication Number and publication Number
                    for ar in n.iter("publication-reference"):
                        dateFiled = None
                        country = ''
                        kind = ''
                        name = ''
                        for j in ar.iter("document-id"):
                            for k in j.iter("doc-number"):
                                if k.text.isdigit():
                                    patentNumber = int(k.text)
                            if not patentNumber:
                                continue
                            for k in j.iter("date"):
                                if k.text.isdigit():
                                    dateFiled = int(k.text)
                            for k in j.iter("country"):
                                country = k.text
                            for k in j.iter("kind"):
                                kind = k.text
                            for k in j.iter("name"):
                                name = k.text
                            with con:
                                cur = con.cursor()
                                cur.execute(
                                    "INSERT INTO PublicationReference(patentNumber, country, kind, name, date) VALUES(%s, %s, %s, %s, %s)",
                                    (patentNumber, country, kind, name,
                                     dateFiled))
                    if not patentNumber:
                        continue

                    for ar in n.iter("application-reference"):
                        docNumber = 0
                        dateFiled = None
                        country = ''
                        for j in ar.iter("document-id"):
                            for k in j.iter("doc-number"):
                                docNumber = k.text
                            for k in j.iter("date"):
                                if k.text.isdigit():
                                    dateFiled = int(k.text)
                            for k in j.iter("country"):
                                country = k.text
                            with con:
                                cur = con.cursor()
                                cur.execute(
                                    "INSERT INTO ApplicationReference(patentNumber, docNumber, country, date) VALUES(%s, %s, %s, %s)",
                                    (patentNumber, docNumber, country,
                                     dateFiled))

                    #inventor
                    inventor = ''

                    for ar in n.iter("parties"):
                        for pr in ar.iter("applicants"):
                            for j in pr.iter("applicant"):
                                for k in j.iter("addressbook"):
                                    firstName = ''
                                    lastName = ''
                                    address = ''

                                    for ln in k.iter("last-name"):
                                        if ln.text: lastName = ln.text
                                    for ln in k.iter("first-name"):
                                        if ln.text: firstName = ln.text

                                    for add in k.iter("address"):
                                        for street in add.iter("street"):
                                            if street.text:
                                                address = street.text
                                        for city in add.iter("city"):
                                            if city.text:
                                                address += ' - ' + city.text
                                        for country in add.iter("country"):
                                            if country.text:
                                                address += ' - ' + country.text
                                    inventor += (firstName + ' ' + lastName +
                                                 ' (' + address + ');')

                    #international
                    for ic in n.iter("classification-locarno"):
                        for t in ic.iterfind("main-classification"):
                            inter = t.text
                            with con:
                                cur = con.cursor()
                                cur.execute(
                                    "INSERT INTO InternationalClass(patentNumber, interClass) VALUES(%s, %s)",
                                    (patentNumber, inter))

                    #field of research
                    for rs in n.iter("field-of-search"):
                        for k in rs.iter("classification-national"):
                            fieldOfResearch = ''
                            for j in k.iter("main-classification"):
                                fieldOfResearch = j.text
                            with con:
                                cur = con.cursor()
                                cur.execute(
                                    "INSERT INTO FieldOfResearch(patentNumber, class) VALUES(%s, %s)",
                                    (patentNumber, fieldOfResearch.strip()))

                    #examiner
                    primaryExaminer = ''
                    assitantExaminer = ''

                    for E in n.iter("examiners"):
                        for pr in E.iter("primary-examiner"):
                            firstName = ''
                            lastName = ''
                            for ln in pr.iter("last-name"):
                                lastName = ln.text
                            for ln in pr.iter("first-name"):
                                firstName = ln.text
                            if firstName == None: firstName = ''
                            if lastName == None: lastName = ''
                            primaryExaminer += lastName + ' ' + firstName + '; '

                        for pr in E.iter("assistant-examiner"):
                            firstName = ''
                            lastName = ''
                            for ln in pr.iter("last-name"):
                                lastName = ln.text
                            for ln in pr.iter("first-name"):
                                firstName = ln.text

                            if firstName == None: firstName = ''
                            if lastName == None: lastName = ''
                            assitantExaminer += lastName + ' ' + firstName + '; '

                    #Attorney, Agent or Firm,
                    attorney = ''
                    p = n.find(
                        "us-bibliographic-data-grant/parties/agents/agent/addressbook"
                    )
                    if p:
                        for ar in p.iter("orgname"):
                            attorney += ar.text + ';'

                    #Abstract text
                    abstr = ''
                    for abst in n.iter("abstract"):
                        p = abst.find("p")
                        _p = tostring(p)
                        abstr = convertToHTMLView(_p.encode('UTF-8'))

                    #US Class
                    #furtherUSClass
                    usClass = ''
                    p = n.find(
                        "us-bibliographic-data-grant/classification-national")
                    for ma in p.iter("main-classification"):
                        if ma.text:
                            usClass = ma.text
                    for fu in p.iter("further-classification"):
                        if fu.text:
                            furtherClass = fu.text.strip()
                            with con:
                                cur = con.cursor()
                                cur.execute(
                                    "INSERT INTO FurtherUSClass(patentNumber, furtherUSClass) VALUES(%s, %s)",
                                    (patentNumber, furtherClass))

                    #patent references
                    for _n in n.iter("references-cited"):
                        for j in _n.iter("citation"):
                            for tmp in j.iter("patcit"):
                                country = ''
                                kind = ''
                                name = ''
                                dateFiled = ''
                                for k in tmp.iter("document-id"):
                                    _t = k.find("country")
                                    if _t.text == "US":
                                        country = "US"
                                        t = k.find("kind")
                                        if t: kind = t.text
                                        t = k.find("date")
                                        if t.text.isdigit():
                                            dateFiled = int(t.text)
                                        t = k.find("name")
                                        if t: name = t.text
                                        number = k.find("doc-number")
                                        if number.text:
                                            num = number.text
                                            with con:
                                                cur = con.cursor()
                                                cur.execute(
                                                    "INSERT INTO ReferPatcit(patentNumber, docNumber, country, kind, name, date) VALUES(%s, %s, %s, %s, %s, %s)",
                                                    (patentNumber, num,
                                                     country, kind, name,
                                                     dateFiled))
                            value = ''
                            for tmp in j.iter("nplcit"):
                                cat = j.find("category")
                                t = tmp.find("othercit")
                                value = tostring(t)
                                if value:
                                    with con:
                                        cur = con.cursor()
                                        cur.execute(
                                            "INSERT INTO ReferNplcit(patentNumber, value, category) VALUES(%s, %s, %s)",
                                            (patentNumber,
                                             convertToHTMLView(
                                                 value.encode("UTF-8")),
                                             cat.text))

                    #claims
                    claim = ''
                    cl = n.find("claims")
                    if cl:
                        _cl = tostring(cl)
                        claim = convertToHTMLView(_cl.encode('UTF-8')).strip()

                    #descriptions
                    description = ''
                    des = n.find("description")
                    if des:
                        _des = tostring(des)
                        description = convertToHTMLView(
                            _des.encode('UTF-8')).strip()

                    with con:
                        cur = con.cursor()
                        cur.execute(
                            "Select * from USPatents where patentNumber = %s",
                            (patentNumber))
                        if cur.rowcount == 0:
                            cur.execute(
                                "INSERT INTO USPatents(patentNumber, abstract, inventor, currentUSClass,\
                        primaryExaminer, assistantExaminer, attorney, claims, description) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
                                (patentNumber, abstr, inventor.encode('UTF-8'),
                                 usClass.encode('UTF-8'),
                                 primaryExaminer.encode('UTF-8'),
                                 assitantExaminer.encode('UTF-8'),
                                 attorney.encode('UTF-8'), claim, description))

        except Exception as e:
            print e
Esempio n. 54
0
    }).text = fileinfo['guid']
    SubElement(
        item,
        'pubDate').text = fileinfo['pubdate'].strftime("%a, %d %b %Y %T %z")
    SubElement(item, 'itunes:duration').text = time.strftime(
        "%H:%M:%S", time.gmtime(int(fileinfo['duration'])))

    chapters = SubElement(item,
                          'psc:chapters',
                          attrib={
                              'version': "1.2",
                              'xmlns:psc': 'http://podlove.org/simple-chapters'
                          })
    for chapter in sorted(fileinfo['chapters'], key=lambda x: x['start']):
        SubElement(chapters,
                   'psc:chapter',
                   attrib={
                       'start':
                       time.strftime("%H:%M:%S",
                                     time.gmtime(chapter['start'])),
                       'title':
                       chapter['title']
                   })

### save feed
file = open(path.join(DIR, 'podcast.xml'), "w")

file.write(tostring(root, encoding='UTF-8',
                    method='xml').decode())  # xml in single line
#file.write(xml.dom.minidom.parseString(tostring(root, encoding='UTF-8', method='xml').decode()).toprettyxml())	# pretty-print, requires xml.dom.minidom
Esempio n. 55
0
def animal(request,pk):   
   t = Template("""
  <head>
<link rel="stylesheet" type="text/css" href="/static/style.css" />
</head>

   <div id= 'notepad'>
 

      

<div id= 'content'>

    <div id= 'text'>
      <h3>Animal's common name:</h3>
<p class="sansseriff"><div>{{ animal.common_names }}</div></p>
    
    <h3>Species ID:</h3>
<p class="sansseriff"><div>{{ animal.species_ID }}</div></p>
  
    <h3>Animal's scientific name:</h3>
<p class="sansseriff"><div>{{ animal.scientific_name }}</div></p>
  
    <h3>Animal's other names:</h3>
{% if not animal.other_names %}-{% endif %}
<p class="sansseriff"><div>{{ animal.other_names }}</div></p>
  
    <h3>Summary of the animal:</h3>
{% if not animal.summary %}-{% endif %}
<p class="sansseriff"><div>{{ animal.summary }}</div></p>
  
    <h3>Animal's GE score:</h3>
<p class="sansseriff"><div>{{ animal.ge_score }}</div></p>


    <h3>Possibly exinct?:</h3>
<p class="sansseriff"><div>{{ animal.extinct }}</div></p>
  
    <h3>Animal's origin country:</h3>
{% if not animal.country_found_in %}-{% endif %}
<p class="sansseriff"><div>{{ animal.country_found_in }}</div></p>
</div><!--text-->
<div id= 'funstuff'>
<img src="{{image}}" />
      
{% if not animal.country_found_in %} 
   {% else %}
   <iframe id= 'map' width="400" height="300" frameborder="0" scrolling="no" marginheight="0" marginwidth="0" src="http://maps.google.com/maps?f=q&amp;source=s_q&amp;hl=en&amp;geocode=&amp;q=uk&amp;aq=&amp;ie=UTF8&amp;hq=&amp;hnear={{location}}&amp;t=m&amp;z=5&amp;output=embed"></iframe>
{% endif %}

<img src="/static/status.png" class= 'status'/>
</div><!--endfun -->
</div><!--content-->
</div><!--notepad-->

   """)
   
   animal= Animal.objects.get(pk=pk)
   
   # this part is used to carry out a search of the animal thats being selected by the user using the search results from wikimedia 
   url= urllib2.urlopen('https://commons.wikimedia.org/w/api.php?action=query&generator=search&gsrnamespace=6&gsrsearch=%22'+ urllib.quote_plus(animal.common_names ) +'%22&gsrlimit=1&prop=imageinfo&iiprop=url&format=xml' )
   
   tree = ElementTree()
   
   tree.parse(url)
   
   print tostring(tree.getroot())
   # this tells 
   try:
    images= tree.find('query').find('pages').find('page').find('imageinfo').find('ii').attrib['url']
   except AttributeError:  
    images= ""
   # this is the variable thats used to store the country an animal is originally found in which is then used to find a location in which the animal was originally found in using an embedded google maps image 
   location= urllib.quote_plus(animal.country_found_in ) 

   c = Context({"animal": animal, "image": images, "location": location})
   
   # this is used to display the image of the animal thats found onto the page 
 
   print images 

   return HttpResponse(t.render(c))
Esempio n. 56
0
    def getNfo(self, movie_info={}, data={}):

        # return imdb url only
        if self.conf('meta_url_only'):
            return 'http://www.imdb.com/title/%s/' % toUnicode(
                data['library']['identifier'])

        nfoxml = Element('movie')

        # Title
        try:
            el = SubElement(nfoxml, 'title')
            el.text = toUnicode(getTitle(data['library']))
        except:
            pass

        # IMDB id
        try:
            el = SubElement(nfoxml, 'id')
            el.text = toUnicode(data['library']['identifier'])
        except:
            pass

        # Runtime
        try:
            runtime = SubElement(nfoxml, 'runtime')
            runtime.text = '%s min' % movie_info.get('runtime')
        except:
            pass

        # Other values
        types = [
            'year', 'mpaa', 'originaltitle:original_title', 'outline', 'plot',
            'tagline', 'premiered:released'
        ]
        for type in types:

            if ':' in type:
                name, type = type.split(':')
            else:
                name = type

            try:
                if data['library'].get(type):
                    el = SubElement(nfoxml, name)
                    el.text = toUnicode(movie_info.get(type, ''))
            except:
                pass

        # Rating
        for rating_type in ['imdb', 'rotten', 'tmdb']:
            try:
                r, v = movie_info['rating'][rating_type]
                rating = SubElement(nfoxml, 'rating')
                rating.text = str(r)
                votes = SubElement(nfoxml, 'votes')
                votes.text = str(v)
                break
            except:
                log.debug('Failed adding rating info from %s: %s',
                          (rating_type, traceback.format_exc()))

        # Genre
        for genre in movie_info.get('genres', []):
            genres = SubElement(nfoxml, 'genre')
            genres.text = toUnicode(genre)

        # Actors
        for actor in movie_info.get('actors', []):
            actors = SubElement(nfoxml, 'actor')
            name = SubElement(actors, 'name')
            name.text = toUnicode(actor)

        # Directors
        for director_name in movie_info.get('directors', []):
            director = SubElement(nfoxml, 'director')
            director.text = toUnicode(director_name)

        # Writers
        for writer in movie_info.get('writers', []):
            writers = SubElement(nfoxml, 'credits')
            writers.text = toUnicode(writer)

        # Clean up the xml and return it
        nfoxml = xml.dom.minidom.parseString(tostring(nfoxml))
        xml_string = nfoxml.toprettyxml(indent='  ')
        text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
        xml_string = text_re.sub('>\g<1></', xml_string)

        return xml_string.encode('utf-8')
Esempio n. 57
0
    },
    '0137143419':{
        'title': 'Python Fundamentals',
        'year': 2009,
    },
}

books = Element('books')
for isbn,info in BOOKS.iteritems():
    book = SubElement(books, 'book')
    info.setdefault('authors', 'Wesley Chun')
    info.setdefault('edition', 1)
    for key, val in info.iteritems():
        SubElement(book, key).text = ', '.join(str(val) .split(':'))

xml = tostring(books)
print  '*** RAW XML ***'
print xml

print '\n*** PRETTY-PRINTED XML ***'
dom = parseString(xml)
print dom.toprettyxml('  ')

print '*** FLAT STRUCTURE ***'
for elmt in books.getiterator():
    print elmt.tag, '-', elmt.text

print '\n*** TITLES ONLY ***'
for book in books.findall('.//title'):
    print book.text
Esempio n. 58
0
                         "parameter",
                         name="BrowserType1",
                         value="firefox")
    node2 = SubElement(child, "classes")
    if test_labels[test] != []:
        node3 = SubElement(node2,
                           'class',
                           name="msol." + test_labels[test_names[i]][0] + "." +
                           class_names[i])
    else:
        print(
            "Found a test case without label in JIRA. Please add class name as label."
        )
        sys.exit(1)

rough_string = tostring(root, 'utf-8')
reparsed = minidom.parseString(rough_string)
pretty_out = reparsed.toprettyxml(indent="\t")

with open("first.xml", "w") as f1:
    f1.write(pretty_out)
f1.close()

with open("first.xml") as f2:
    with open("testng.xml", "w") as f3:
        for line in f2:
            if "classname" in line:
                output_line = line.replace("classname", "class-name")
            elif "threadcount" in line:
                output_line = line.replace("threadcount", "thread-count")
            else:
Esempio n. 59
0
    def parse(self, pdf):
        try:
            fp = open(pdf.path, 'rb')
        except IOError as e:
            logging.error("PDFMinerParser.parse unable to open PDF: %s" % e)
            return

        parser = PDFParser(fp)
        doc = PDFDocument(parser)

        if doc.found_eof and doc.eof_distance > 3:
            pdf.blob = parser.read_from_end(doc.eof_distance)

        visited = set()

        self.treebuild.start("pdf", {"path": pdf.path})

        for xref in doc.xrefs:
            for objid in xref.get_objids():

                if objid in visited:
                    continue

                visited.add(objid)

                obj_attrs = {"id": str(objid), "type": "normal"}
                obj_data = ''
                obj_xml = self.treebuild.start("object", obj_attrs)

                try:
                    self.dump(doc.getobj(objid))
                except pdftypes.PDFObjectNotFound as e:
                    obj_xml.set("type", "malformed")
                    obj_data = parser.read_n_from(xref.get_pos(objid)[1], 4096)
                    obj_data = obj_data.replace('<', '0x3C')
                except TypeError:
                    obj_xml.set("type", "unknown")
                    obj_data = parser.read_n_from(xref.get_pos(objid)[1], 512)
                except Exception as e:
                    obj_xml.set("type", "exception")
                    obj_data = parser.read_n_from(xref.get_pos(objid)[1], 512)
                    self.add_xml_node("exception", {}, e.message)

                self.treebuild.data(obj_data)
                try:
                    self.treebuild.end("object")
                except AssertionError as e:
                    logging.error("Parse end object error: %s" % e)
                    sys.stderr.write("%s\n" % tostring(obj_xml))

            self.treebuild.start("trailer", {})
            self.dump(xref.trailer)
            self.treebuild.end("trailer")

        self.treebuild.end("pdf")

        pdf.xml = self.treebuild.close()

        pdf.errors = doc.errors
        pdf.bytes_read = parser.BYTES
        pdf.parsed = True
        fp.close()
Esempio n. 60
0
    def search(self, params = {}, header = 0, count = False):
        if not isinstance(params, dict):
            raise biomart.BiomartException("'params' argument must be a dict")

        if self.verbose:
            print("[BiomartDataset:'%s'] Searching using following params:" % self.name)
            pprint.pprint(params)

        # read filters and attributes from params
        filters = params.get('filters', {})
        attributes = params.get('attributes', [])

        # check filters
        for filter_name, filter_value in filters.items():
            dataset_filter = self.filters.get(filter_name, None)

            if not dataset_filter:
                self.show_filters()
                raise biomart.BiomartException("The filter '%s' does not exist." % filter_name)

            if len(dataset_filter.accepted_values) > 0 and filter_value not in dataset_filter.accepted_values:
                error_msg = "The value '%s' for filter '%s' cannot be used." % (filter_value, filter_name)
                error_msg += " Use one of: [%s]" % ", ".join(map(str, dataset_filter.accepted_values))
                raise biomart.BiomartException(error_msg)

        # check attributes unless we're only counting
        if not count:
            # no attributes given, use default attributes
            if not attributes:
                attributes = [a.name for a in self.attributes.values() if a.is_default]

            # if no default attributes have been defined, raise an exception
            if not attributes:
                raise biomart.BiomartException("at least one attribute is required, none given")

            for attribute_name in attributes:
                if attribute_name not in self.attributes.keys():
                    self.show_attributes()
                    raise biomart.BiomartException("The attribute '%s' does not exist." % attribute_name)

            # selected attributes must belong to the same attribute page.
            if len(set([self.attributes[a].attribute_page for a in attributes])) > 1:
                self.show_attributes()
                raise biomart.BiomartException("You must use attributes that belong to the same attribute page.")

        # filters and attributes looks ok, start building the XML query
        root = Element('Query')
        root.attrib.update({
            'virtualSchemaName': 'default',  # TODO: use database virtualSchemaName instead (if any error)
            'formatter': 'TSV',
            'header': str(header),
            'uniqueRows': '1',
            'datasetConfigVersion': '0.6',
            'count': count is True and '1' or ''
        })

        dataset = SubElement(root, "Dataset")
        dataset.attrib.update({
            'name': self.name,
            'interface': self.interface
        })

        # Add filters to the XML query
        for filter_name, filter_value in filters.items():
            dataset_filter = self.filters[filter_name]

            filter_elem = SubElement(dataset, "Filter")
            filter_elem.set('name', filter_name)

            if 'boolean_list' == dataset_filter.filter_type:
                if filter_value is True or filter_value.lower() in ('included', 'only'):
                    filter_elem.set('excluded', '0')
                elif filter_value is False or filter_value.lower() == 'excluded':
                    filter_elem.set('excluded', '1')
            else:
                if isinstance(filter_value, list) or isinstance(filter_value, tuple):
                    filter_value = ",".join(map(str, filter_value))
                filter_elem.set('value', str(filter_value))

        # Add attributes to the XML query, unless we're only counting
        if not count:
            for attribute_name in attributes:
                attribute_elem = SubElement(dataset, "Attribute")
                attribute_elem.set('name', str(attribute_name))

        if self.verbose:
            print("[BiomartDataset] search query:\n%s" % tostring(root))

        return self.server.get_request(query = tostring(root))