Ejemplo n.º 1
0
def doSomethingWithResult(response):
    if response is None:
        sys.stderr.write("KO\n")
        return "KO"
    else:
        tree = ET.fromstring(response.text.encode('utf-8'))
        sys.stderr.write("XML response: "+str(response.text.encode('utf-8'))+"\n")

        status = "Free"
        # arrgh, namespaces!!
        elems=tree.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}BusyType")
        for elem in elems:
            if status == "Free" :
                status=elem.text
                sys.stderr.write("Change status to: "+str(status)+"\n")

        tree2=ET.fromstring(response.request.body.encode('utf-8'))
        elems=tree2.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}Address")
        for e in elems:
            room=e.text


        elems=tree.findall(".//faultcode")
        if elems:
            print("Error occured")
            status= "N/A"


        sys.stderr.write(str(datetime.datetime.now().isoformat())+": Status for room: "+str(rooms[room])+" => "+status+"\n")
        result.append((status, rooms[room], room))
        return "OK"
Ejemplo n.º 2
0
    def _kantara_log_assertion_id(self, saml_response, ticket):
        """
        Log the assertion id, which _might_ be required by Kantara.

        :param saml_response: authn response as a compact XML string
        :param ticket: Login process state

        :type saml_response: str | unicode
        :type ticket: SSOLoginData

        :return: None
        """
        printed = False
        try:
            parser = DefusedElementTree.DefusedXMLParser()
            xml = DefusedElementTree.XML(str(saml_response), parser)

            # For debugging, it is very useful to get the full SAML response pretty-printed in the logfile directly
            self.logger.debug("Created AuthNResponse :\n\n{!s}\n\n".format(DefusedElementTree.tostring(xml)))
            printed = True

            attrs = xml.attrib
            assertion = xml.find('{urn:oasis:names:tc:SAML:2.0:assertion}Assertion')
            self.logger.info('{!s}: id={!s}, in_response_to={!s}, assertion_id={!s}'.format(
                ticket.key, attrs['ID'], attrs['InResponseTo'], assertion.get('ID')))

            return DefusedElementTree.tostring(xml)
        except Exception as exc:
            self.logger.debug("Could not parse message as XML: {!r}".format(exc))
            if not printed:
                # Fall back to logging the whole response
                self.logger.info("{!s}: authn response: {!s}".format(ticket.key, saml_response))
Ejemplo n.º 3
0
def findPlayer(saveFileLocation,read_data=False):
	if read_data == False:
		root = ET.parse(saveFileLocation).getroot()
	else:
		root = ET.fromstring(saveFileLocation)
	player = root.find("player")
	return player
Ejemplo n.º 4
0
def handle_SOAP():
    upload_key = current_app.config.get('UPLOAD_KEY')

    soapaction = request.headers.get('SOAPAction')
    current_app.logger.debug("Received SOAPAction: {}".format(soapaction))

    if soapaction == '"urn:StartSession"':
        root = etree.fromstring(request.data)

        transfermode = root.find(".//transfermode").text
        transfermodetimestamp = root.find(".//transfermodetimestamp").text

        cnonce = root.find(".//cnonce").text
        macaddress = root.find(".//macaddress").text
        credential = create_credential(macaddress, cnonce, upload_key)

        # EyeFi card doesn't accept cookies, so set a global var instead
        global snonce
        snonce = make_snonce()

        return render_template('start_session.xml', transfermode=transfermode,
                               transfermodetimestamp=transfermodetimestamp,
                               credential=credential, snonce=snonce)

    elif soapaction == '"urn:GetPhotoStatus"':
        root = etree.fromstring(request.data)

        macaddress = root.find(".//macaddress").text
        credential = root.find(".//credential").text

        # Unused, here for future reference
        # filename = root.find(".//filename").text
        # filesize = root.find(".//filesize").text
        # filesignature = root.find(".//filesignature").text
        # flags = root.find(".//flags").text

        expected_cred = create_credential(macaddress, snonce, upload_key,
                                          from_eyefi=True)
        current_app.logger.debug("Credential: {}\n"
                                 "Expected:   {}".format(credential,
                                                         expected_cred))

        if credential == expected_cred:
            return render_template('get_photo_status.xml', fileid=1, offset=0)
        else:
            return abort(403)

    elif soapaction == '"urn:MarkLastPhotoInRoll"':
        root = etree.fromstring(request.data)

        # Unused, here for future reference
        # macaddress = root.find(".//macaddress").text
        # mergedelta = root.find(".//mergedelta").text

        return render_template("mark_last.xml")
Ejemplo n.º 5
0
def parse_junit(xml, filename):
    """Generate failed tests as a series of (name, duration, text, filename) tuples."""
    try:
        tree = ET.fromstring(xml)
    except ET.ParseError, e:
        logging.exception('parse_junit failed for %s', filename)
        try:
            tree = ET.fromstring(re.sub(r'[\x00\x80-\xFF]+', '?', xml))
        except ET.ParseError, e:
            yield 'Gubernator Internal Fatal XML Parse Error', 0.0, str(e), filename
            return
Ejemplo n.º 6
0
    def __init__(self, filename, test):
        bug_patterns = dict()
        dupes = dict()

        SEVERITY = {
            '1': 'High',
            '2': 'Medium',
            '3': 'Low'
        }

        tree = ET.parse(filename)
        root = tree.getroot()

        for pattern in root.findall('BugPattern'):
            plain_pattern = re.sub(r'<[b-z/]*?>|<a|</a>|href=', '', ET.tostring(pattern.find('Details'), method='text'))
            bug_patterns[pattern.get('type')] = plain_pattern

        for bug in root.findall('BugInstance'):
            desc = ''
            for message in bug.itertext():
                desc += message

            dupe_key = bug.get('instanceHash')

            title = bug.find('ShortMessage').text
            cwe = bug.get('cweid', default=0)
            severity = SEVERITY[bug.get('priority')]
            description = desc
            mitigation = bug_patterns[bug.get('type')]
            impact = 'N/A'
            references = 'N/A'

            if dupe_key in dupes:
                finding = dupes[dupe_key]
            else:
                finding = Finding(
                    title=title,
                    cwe=cwe,
                    severity=severity,
                    description=description,
                    mitigation=mitigation,
                    impact=impact,
                    references=references,
                    test=test,
                    active=False,
                    verified=False,
                    numerical_severity=Finding.get_numerical_severity(severity),
                    static_finding=True
                )
                dupes[dupe_key] = finding

        self.items = dupes.values()
Ejemplo n.º 7
0
 def parse_xml(self, xml, filename):
     if not xml:
         return  # can't extract results from nothing!
     try:
         tree = ET.fromstring(xml)
     except ET.ParseError, e:
         logging.exception('parse_junit failed for %s', filename)
         try:
             tree = ET.fromstring(re.sub(r'[\x00\x80-\xFF]+', '?', xml))
         except ET.ParseError, e:
             self.failed.append(
                 ('Gubernator Internal Fatal XML Parse Error', 0.0, str(e), filename, ''))
             return
Ejemplo n.º 8
0
   def extract(self, data, dependency_results):
      results_dir = tempfile.mkdtemp() + '/'
      temp_pdf_file = extraction.utils.temp_file(data)

      try:
         command_args = ['java', '-jar', config.ALGORITHMS_JAR_PATH, config.ALGORITHMS_PERL_PATH, 'f', temp_pdf_file, results_dir]
         status, stdout, stderr = extraction.utils.external_process(command_args, timeout=20)
      except subprocess.TimeoutExpired:
         shutil.rmtree(results_dir)
         raise RunnableError('Algorithms Jar timed out while processing document')
      finally:
         os.remove(temp_pdf_file)

      if status != 0:
         raise RunnableError('Algorithms Jar Failure. Possible error:\n' + stderr)

      paths = glob.glob(results_dir + '*.xml')
      if len(paths) != 1:
         raise RunnableError('Wrong number of results files from Algorithms Jar.')

      tree = safeET.parse(paths[0])
      xml_root = tree.getroot()

      shutil.rmtree(results_dir)

      return ExtractorResult(xml_result=xml_root)
Ejemplo n.º 9
0
def findRooms(prefix=None,anywhere=False):
    global rooms

    rooms={}
    xml_template = open("resolvenames_template.xml", "r").read()
    xml = Template(xml_template)

    data = unicode(xml.substitute(name=prefix))

    headers = {}
    headers["Content-type"] = "text/xml; charset=utf-8"

    response=requests.post(url,headers = headers, data= data, auth= HttpNtlmAuth(user,password))
    tree = ET.fromstring(response.text.encode('utf-8'))

    elems=tree.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}Resolution")
    for elem in elems:
        email = elem.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}EmailAddress")
        name = elem.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}DisplayName")
        sys.stderr.write("Perhaps found "+str(name[0].text)+" <"+str(email[0].text)+">\n")
        if prefix is not None:
            if len(email) > 0 and len(name) > 0 :
                if email[0].text.startswith("conf_") or email[0].text.startswith("CONF_"):
                    if name[0].text.startswith(prefix.upper()) or anywhere:
                        sys.stderr.write("Validate "+str(name[0].text)+" <"+str(email[0].text)+">\n")
                        rooms[email[0].text] = name[0].text
                    else:
                        sys.stderr.write("Not validated due to not starting with prefix: "+str(prefix.upper())+"\n")
                else:
                    sys.stderr.write("Not validated due to not starting with conf_\n")
            else:
                sys.stderr.write("Not validated due to null length\n")
        else:
            sys.stderr.write("Not validated due to prefix is none\n")
    return rooms        
Ejemplo n.º 10
0
def postSimpleMessage():

    postdata = request.body.read().decode("utf-8")

    message = "<Result><StatusCode>FAILED</StatusCode><Text>Authentication failed.</Text></Result>"

    tree = xmlParser.fromstring(postdata)

    userNameFromRequest, passwordFromRequest = auth.extractAuth(tree)

    if (auth.handleCommunityAuth(userNameFromRequest, passwordFromRequest)):

        message = "<Result><StatusCode>OK</StatusCode><Text></Text></Result>"
        handleAlerts(tree, True)

    elif auth.authenticate(userNameFromRequest, passwordFromRequest, mongohost, mongoport):

        message = "<Result><StatusCode>OK</StatusCode><Text></Text></Result>"
        handleAlerts(tree, False)
    else:
        print("Authentication failed....")

    response = {}
    headers = {'Content-type': 'application/html'}
    response['status'] = "Success"
    raise HTTPResponse(message, status=200, headers=headers)
Ejemplo n.º 11
0
def etree_title_provider(body):
    """Get the title of a page from its content body.

    This implementation uses the defusedxml wrapper for etree. If no title
    is found on the page then None is returned.

    Args:
        body (str): The content body of an xhtml page.

    Returns:
        str: The text of the first <title></title> tag or None if the title
            is not found or the body is invalid xhtml.
    """
    try:

        root = ElementTree.fromstring(body)

    except ElementTree.ParseError:

        return None

    for title in root.getiterator('title'):

        return title.text

    return None
Ejemplo n.º 12
0
  def _parse(cls, xml):
    """ Parse the XML into a Normalizer.NormalizeResult """
    try:
      node = ElementTree.fromstring(xml).find("Address")
    except ElementTree.ParseError:
      log.error("Failed to parse xml", exc_info=True)
      return NormalFactory.NORMALIZE_FAILED

    try:
      result = Normalizer.NormalizeResult(
          success    = True,
          line1      = cls._get_or_none(node, "Address2"),
          line2      = cls._get_or_none(node, "Address1"),
          city       = cls._get_or_none(node, "City"),
          state      = cls._get_or_none(node, "State"),
          postalCode = cls._get_or_none(node, "Zip5"),
          raw        = xml)
    except:
      log.error("Failed to parse", exc_info=True)
      result = Normalizer.NormalizeResult(
          success    = True,
          line1      = None,
          line2      = None,
          city       = None,
          state      = None,
          postalCode = None,
          raw        = xml)

    return result
Ejemplo n.º 13
0
 def test_has_children(self):
     root_node = ElementTree.fromstring('''
         <data dataset="countries">
             <country
                 name="Liechtenstein"
                 gdppc="141100"
             />
             <country
                 name="Singapore"
                 gdppc="59900"
             />
         </data>
     ''')
     result = utils.convert_xml_to_dict(root_node)
     expected_result = {
         'dataset': 'countries',
         'data': [
             {
                 'name': 'Liechtenstein',
                 'gdppc': 141100
             },
             {
                 'name': 'Singapore',
                 'gdppc': 59900,
             },
         ],
     }
     self.assertEqual(expected_result, result)
Ejemplo n.º 14
0
def get_print_list(username, chart, period, api_key):
    '''return LastFM XML chart as a simple list'''

    url = 'http://ws.audioscrobbler.com/2.0/?method=user.gettop%s&user=%s&period=%s&api_key=%s' % (chart, username, period, api_key)
    print url
    raw_xml = urllib2.urlopen(url)

    print_list = []
    charts = ElementTree.fromstring(raw_xml.read())

    if chart == 'artists':
        for artist in charts.findall('topartists/artist'):
            print_list.append(artist.find('name').text)
    elif chart == 'albums':
        for album in charts.findall('topalbums/album'):
            for artist in album.findall('artist'):
                print_list.append("%s|%s" % (artist.find('name').text, album.find('name').text))
    elif chart == 'tracks':
        for track in charts.findall('toptracks/track'):
            for artist in track.findall('artist'):
                print_list.append("%s|%s" % (artist.find('name').text, track.find('name').text))
    else:
        raise CLIError(Exception("unknown type %s" % chart))

    return print_list
Ejemplo n.º 15
0
def stations():
    db_stations_cache = g.mongo.db.caches.find_one({"_id": "stations"})
    bulk_op = g.mongo.db.stations.initialize_unordered_bulk_op()
    bulk_run = False
    if not db_stations_cache or db_stations_cache["cached_until"] < time.time():
        xml_stations_response = requests.get(
            "https://api.eveonline.com/eve/ConquerableStationList.xml.aspx", headers=xml_headers
        )
        # XML Parse
        try:
            xml_stations_tree = ElementTree.fromstring(xml_stations_response.text)
        except ElementTree.ParseError:
            print(xml_stations_response.text)
            return None

        # Store in database
        xml_time_pattern = "%Y-%m-%d %H:%M:%S"
        g.mongo.db.caches.update(
            {"_id": "stations"},
            {"cached_until": int(calendar.timegm(time.strptime(xml_stations_tree[2].text, xml_time_pattern)))},
            upsert=True,
        )
        for station in xml_stations_tree[1][0]:
            bulk_run = True
            bulk_op.find({"_id": int(station.attrib["stationID"])}).upsert().update(
                {"$set": {"name": station.attrib["stationName"]}}
            )
    if bulk_run:
        bulk_op.execute()
Ejemplo n.º 16
0
def is_available(room_email,start_time,end_time):
    xml_template = open("getavailibility_template.xml", "r").read()
    xml = Template(xml_template)
    headers = {}
    headers["Content-type"] = "text/xml; charset=utf-8"
    data=unicode(xml.substitute(email=room_email,starttime=start_time,endtime=end_time)).strip()
    status = "KO"
    response=requests.post(url,headers = headers, data= data, auth= HttpNtlmAuth(user,password))
    if response is not None : 
        status = "Free"
    tree = ET.fromstring(response.text.encode('utf-8'))
    # arrgh, namespaces!!
    elems=tree.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}BusyType")
    for elem in elems:
        status=elem.text

    elems=tree.findall(".//faultcode")
    if elems:
        sys.stderr.write("Error occured\n")
        sys.stderr.write("tree: "+str(tree)+"\n")
        sys.stderr.write("response: "+response.text.encode('utf-8')+"\n")
        status= "N/A"

    sys.stderr.write("Room status: "+str(status)+"\n")
    return (status == "Free")
Ejemplo n.º 17
0
def get_bugzilla_bug(bugzilla_url, bug_id):
    '''
    Read bug XML, return all fields and values in a dictionary.
    '''
    bug_xml = _fetch_bug_content(bugzilla_url, bug_id)
    tree = ElementTree.fromstring(bug_xml)

    bug_fields = {
        "long_desc": [],
        "attachment": [],
        "cc": [],
    }
    for bug in tree:
        for field in bug:
            if field.tag in ("long_desc", "attachment"):
                new = {}
                for data in field:
                    new[data.tag] = data.text
                bug_fields[field.tag].append(new)
            elif field.tag == "cc":
                bug_fields[field.tag].append(field.text)
            else:
                bug_fields[field.tag] = field.text

    return bug_fields
Ejemplo n.º 18
0
    def update_config_value(self):
        self._encode_authentication()

        set_attr_funcs = (self._san_address,
                          self._san_user,
                          self._san_password,
                          self._san_product,
                          self._san_protocol,
                          self._lun_type,
                          self._lun_ready_wait_interval,
                          self._lun_copy_wait_interval,
                          self._lun_timeout,
                          self._lun_write_type,
                          self._lun_prefetch,
                          self._lun_policy,
                          self._lun_read_cache_policy,
                          self._lun_write_cache_policy,
                          self._storage_pools,
                          self._iscsi_default_target_ip,
                          self._iscsi_info,)

        tree = ET.parse(self.conf.cinder_huawei_conf_file)
        xml_root = tree.getroot()
        for f in set_attr_funcs:
            f(xml_root)
Ejemplo n.º 19
0
def parse_notice(path):
    with open(path, 'r') as f:
        m = re.search(r'(?:<\?xml.*>\s+)?<infringement.*</infringement>', f.read(), re.IGNORECASE|re.DOTALL)
        if not m:
            raise RuntimeError('Couldn\'t find <infringement> tag in DMCA notice')

    try:
        xml = ElementTree.fromstring(m.group())
    except ElementTree.ParseError as e:
        log.error(e)
        raise RuntimeError('Could not parse DMCA notice XML') from e

    ns = {'acns': 'http://www.acns.net/ACNS'}

    try:
        ts = xml.findall('./acns:Source/acns:TimeStamp', ns)[0].text
        ip = xml.findall('./acns:Source/acns:IP_Address', ns)[0].text
        port = int(xml.findall('./acns:Source/acns:Port', ns)[0].text)
    except (IndexError, ValueError) as e:
        log.error(e)
        raise RuntimeError('Error parsing DMCA notice') from e

    try:
        ts = datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%SZ')
        ts.replace(tzinfo=pytz.utc)
    except ValueError as e:
        raise ValueError('Could not parse timestamp: %s' % ts) from e

    return (ts, ip, port)
Ejemplo n.º 20
0
def get(id_):
    text = requests.get(URL % id_, stream=True).raw.read()
    text = text.decode(utils.web.getEncoding(text) or 'utf8')
    root = ElementTree.fromstring(text)
    assert root.tag == 'root', root
    resto = root[0]
    assert resto.tag == 'resto', resto
    res = []
    for menu in resto:
        assert menu.tag == 'menu', menu
        date = menu.attrib['date']
        parsed_date = datetime.datetime.strptime(date, '%Y-%m-%d')
        day_limit = datetime.datetime.now() - datetime.timedelta(hours=14)
        if parsed_date < day_limit:
            continue
        midi = menu[0]
        assert midi.tag == 'midi', midi
        interesting = INTERESTING.get(id_, None)
        if interesting:
            meals = [x.text for x in midi
                     if x.attrib['nom'] in interesting]
        else:
            meals = [x.text for x in midi
                     if not any(y in x.text.lower() for y in BLACKLIST)]
        meals = [x.strip().replace('\n', ' ; ').strip() for x in meals
                 if x.strip()]
        res.append((date, meals))
    return res
Ejemplo n.º 21
0
def clear_metadata(instream, outstream):
    try:
        with zipfile.ZipFile(instream) as inzip:
            opf_path = _get_path_of_content_opf(inzip)
            opf_content = _read_content_opf(inzip, opf_path)

            removed_a_node = False
            try:
                root = defused_etree.fromstring(opf_content)
                for main_element in root:
                    logger.debug("main el %s " % main_element.tag)
                    if re.match(".*metadata$", main_element.tag):
                        logger.debug("Found metadata tag, cleaning")

                        while list(main_element):  # do not remove using a for loop
                            # - this will skip elements in python 2.7.5!
                            node_to_remove = list(main_element)[0]
                            logger.debug("Removing node %s" % node_to_remove.tag)
                            main_element.remove(node_to_remove)
                            removed_a_node = True
            except defused_etree.ParseError, e:
                logger.error("Caught a parse error while trying to clear epub metadata: %s" % repr(e))
                raise ValueError("Invalid EPUB syntax")

            if removed_a_node:
                logger.debug("Writing a new epub file")
                with zipfile.ZipFile(outstream, 'w') as outzip:
                    try:
                        _copy_zip_contents(inzip, outzip, [opf_path])
                    except zipfile.BadZipfile, e:
                        raise ValueError("Caught a BadZipFile exception: %s" % repr(e))

                    new_content = ElementTree.tostring(root)
                    _write_content_opf(outzip, opf_path, new_content)
Ejemplo n.º 22
0
def autosetup_ihc_products(hass: HomeAssistantType, config, ihc_controller,
                           controller_id):
    """Auto setup of IHC products from the IHC project file."""
    from defusedxml import ElementTree

    project_xml = ihc_controller.get_project()
    if not project_xml:
        _LOGGER.error("Unable to read project from IHC controller")
        return False
    project = ElementTree.fromstring(project_xml)

    # If an auto setup file exist in the configuration it will override
    yaml_path = hass.config.path(AUTO_SETUP_YAML)
    if not os.path.isfile(yaml_path):
        yaml_path = os.path.join(os.path.dirname(__file__), AUTO_SETUP_YAML)
    yaml = load_yaml_config_file(yaml_path)
    try:
        auto_setup_conf = AUTO_SETUP_SCHEMA(yaml)
    except vol.Invalid as exception:
        _LOGGER.error("Invalid IHC auto setup data: %s", exception)
        return False
    groups = project.findall('.//group')
    for component in IHC_PLATFORMS:
        component_setup = auto_setup_conf[component]
        discovery_info = get_discovery_info(
            component_setup, groups, controller_id)
        if discovery_info:
            discovery.load_platform(
                hass, component, DOMAIN, discovery_info, config)
    return True
Ejemplo n.º 23
0
    def parse(self, raw_data, filename=None):
        """ Parse the specified replay XML

        Args:
            raw_data:  Raw XML to be parsed

        Returns:
            None

        Raises:
            AssertionError: If the XML file has more than top-level children
                (Expected: pov and doctype)
            AssertionError: If the first child is not a Doctype instance
            AssertionError: If the doctype does not specify the replay.dtd
            AssertionError: If the second child is not named 'pov'
            AssertionError: If the 'pov' element has more than two elements
            AssertionError: If the 'pov' element does not contain a 'cbid'
                element
            AssertionError: If the 'cbid' element value is blank
        """

        self.filename = filename

        tree = ET.fromstring(raw_data)
        assert tree.tag == 'pov'
        assert len(tree) in [2, 3]

        assert tree[0].tag == 'cbid'
        assert len(tree[0].tag) > 0
        self.name = tree[0].text

        assert tree[1].tag in ['seed', 'replay']

        seed_tree = None
        replay_tree = None
        if tree[1].tag == 'seed':
            seed_tree = tree[1]
            replay_tree = tree[2]
        else:
            seed_tree = None
            replay_tree = tree[1]

        if seed_tree is not None:
            assert len(seed_tree.tag) > 0
            seed = seed_tree.text
            assert len(seed) == 96
            if self.seed is not None:
                print "# Seed is set by XML and command line, using XML seed"
            self.seed = seed.decode('hex')

        parse_fields = {
            'decl': self.parse_decl,
            'read': self.parse_read,
            'write': self.parse_write,
            'delay': self.parse_delay,
        }

        for replay_element in replay_tree:
            assert replay_element.tag in parse_fields
            parse_fields[replay_element.tag](replay_element)
Ejemplo n.º 24
0
    def check_feed(cls):
        """ Return a generator over the latest uploads to CPAN

        by querying an RSS feed.
        """

        url = "https://metacpan.org/feed/recent"

        try:
            response = cls.call_url(url)
        except Exception:  # pragma: no cover
            raise AnityaPluginException("Could not contact %s" % url)

        try:
            root = ET.fromstring(response.text)
        except ET.ParseError:
            raise AnityaPluginException("No XML returned by %s" % url)

        for item in root.iter(tag="{http://purl.org/rss/1.0/}item"):
            title = item.find("{http://purl.org/rss/1.0/}title")
            try:
                name, version = title.text.rsplit("-", 1)
            except ValueError:
                _log.info("Unable to parse CPAN package %s into a name and version")
            homepage = "https://metacpan.org/release/%s/" % name
            yield name, homepage, cls.name, version
Ejemplo n.º 25
0
Archivo: xml.py Proyecto: jbau/edx-ora2
def update_from_xml_str(oa_block, xml, **kwargs):
    """
    Update the OpenAssessment XBlock's content from an XML string definition.
    Parses the string using a library that avoids some known security vulnerabilities in etree.

    Args:
        oa_block (OpenAssessmentBlock): The open assessment block to update.
        xml (unicode): The XML definition of the XBlock's content.

    Kwargs:
        same as `update_from_xml`

    Returns:
        OpenAssessmentBlock

    Raises:
        UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
        InvalidRubricError: The rubric was not semantically valid.
        InvalidAssessmentsError: The assessments are not semantically valid.
    """
    # Parse the XML content definition
    # Use the defusedxml library implementation to avoid known security vulnerabilities in ElementTree:
    # http://docs.python.org/2/library/xml.html#xml-vulnerabilities
    try:
        root = safe_etree.fromstring(xml.encode('utf-8'))
    except (ValueError, safe_etree.ParseError):
        raise UpdateFromXmlError(_("An error occurred while parsing the XML content."))

    return update_from_xml(oa_block, root, **kwargs)
Ejemplo n.º 26
0
 def get_dom():
     # Load the XML on first use and keep it in memory in a global
     # variable. This is perhaps not the best design.
     global XML_DOM
     if XML_DOM is None:
         XML_DOM = ET.parse(XML_FILE)
     return XML_DOM
Ejemplo n.º 27
0
    def setUp(self):
        TestFunctional.setUp(self)

        # Set pbshook frequency to 10 seconds
        self.server.manager(MGR_CMD_SET, PBS_HOOK,
                            {'enabled': 'true', 'freq': 10},
                            id='PBS_alps_inventory_check', expect=True)

        momA = self.moms.values()[0]
        if not momA.is_cray():
            self.skipTest("%s: not a cray mom." % (momA.shortname))
        mom_config = momA.parse_config()
        if '$alps_client' not in mom_config:
            self.skipTest("alps_client not set in mom config.")

        if '$vnode_per_numa_node' in mom_config:
            momA.unset_mom_config('$vnode_per_numa_node', False)

        momA.add_config({'$logevent': '0xffffffff'})

        # check if required BASIL version available on the machine.
        for ver in self.basil_version:
            xml_out = self.query_alps(ver, 'QUERY', 'ENGINE')
            xml_tree = ET.parse(xml_out)
            os.remove(xml_out)
            response = xml_tree.find(".//ResponseData")
            status = response.attrib['status']
            if status == "SUCCESS":
                self.available_version = ver
                break
        if self.available_version == "":
            self.skipTest("No supported basil version found on the platform.")

        # Reset nodes
        self.reset_nodes(momA.shortname)
Ejemplo n.º 28
0
 def download_translations(self, source, language, text, unit, user):
     """Download list of possible translations from the service."""
     translations = []
     xp_translated = self.MS_TM_XPATH + 'TranslatedText'
     xp_confidence = self.MS_TM_XPATH + 'ConfidenceLevel'
     xp_original = self.MS_TM_XPATH + 'OriginalText'
     resp = self.soap_req(
         'GetTranslations',
         uuid=uuid4(),
         text=text,
         from_lang=source,
         to_lang=language,
         max_result=20,
     )
     root = ElementTree.fromstring(resp.read())
     results = root.find(self.MS_TM_XPATH + 'GetTranslationsResult')
     if results is not None:
         for translation in results:
             translations.append((
                 translation.find(xp_translated).text,
                 int(translation.find(xp_confidence).text),
                 self.name,
                 translation.find(xp_original).text,
             ))
     return translations
Ejemplo n.º 29
0
    def test_parseissuexml_with_issue_has_finding(self):
        single_finding = """<?xml version="1.0" encoding="utf-8"?>
        <!--XML Export of VCG Results for directory: C:\Projects\WebGoat.Net. Scanned for C# security issues.-->
        <CodeIssueCollection>
        <CodeIssue>
        <Priority>6</Priority>
        <Severity>Suspicious Comment</Severity>
        <Title>Comment Indicates Potentially Unfinished Code</Title>
        <Description>The comment includes some wording which indicates that the developer regards
        it as unfinished or does not trust it to work correctly.</Description>
        <FileName>Findings.xml</FileName>
        <Line>21</Line>
        <CodeLine>TODO: Check the Code</CodeLine>
        <Checked>False</Checked>
        <CheckColour>LawnGreen</CheckColour>
        </CodeIssue>
        </CodeIssueCollection>"""

        vcgscan = ElementTree.fromstring(single_finding)
        finding = self.parser.parse_issue(vcgscan.findall('CodeIssue')[0],
                                          Test())
        self.assertEqual('Info', finding.severity)
        self.assertEqual('S4', finding.numerical_severity)
        self.assertEqual('Comment Indicates Potentially Unfinished Code',
                         finding.title)
Ejemplo n.º 30
0
def maybe_xml_to_string(message, logger=None):
    """
    Try to parse message as an XML string, and then return it pretty-printed.

    If message couldn't be parsed, return string representation of it instead.

    This is used to (debug-)log SAML requests/responses in a readable way.

    :param message: XML string typically
    :param logger: logging logger
    :return: something ready for logging
    :rtype: string
    """
    if isinstance(message, six.binary_type):
        # message is returned as binary from pysaml2 in python3
        message = message.decode('utf-8')
    message = str(message)
    try:
        from defusedxml import ElementTree as DefusedElementTree
        parser = DefusedElementTree.DefusedXMLParser()
        xml = DefusedElementTree.XML(message, parser)
        return DefusedElementTree.tostring(xml)
    except Exception as exc:
        if logger is not None:
            logger.debug("Could not parse message of type {!r} as XML: {!r}".format(type(message), exc))
        return message
Ejemplo n.º 31
0
def process_ssm_run_command(event):
    """
    Processes the results from running an SSM command on a managed instance.
    """
    event_dict = event.to_dict()
    instance_id = event_dict['detail']['instance-id']
    command_name = event_dict['detail']['document-name']
    command_status = event_dict['detail']['status']
    cw_client = boto3.client('cloudwatch', config=MSAM_BOTO3_CONFIG)
    log_client = boto3.client('logs', config=MSAM_BOTO3_CONFIG)
    dimension_name = "Instance ID"
    metric_name = command_name
    status = 0

    try:
        # test to make sure stream names are always of this format, esp if you create your own SSM document
        log_stream_name = event_dict['detail']['command-id'] + "/" + instance_id + "/aws-runShellScript/stdout"

        response = log_client.get_log_events(
                logGroupName=SSM_LOG_GROUP_NAME,
                logStreamName=log_stream_name,
            )
        #print(response)
        if command_status == "Success":
            # process document name (command)
            if "MSAMElementalLiveStatus" in command_name:
                metric_name = "MSAMElementalLiveStatus"
                for log_event in response['events']:
                    if "running" in log_event['message']:
                        status = 1
                        break
            elif "MSAMSsmSystemStatus" in command_name:
                metric_name = "MSAMSsmSystemStatus"
                status = 1
            elif "MSAMElementalLiveActiveAlerts" in command_name:
                metric_name = "MSAMElementalLiveActiveAlerts"
                root = ET.fromstring(response['events'][0]['message'])
                status = len(list(root))
                if status == 1 and root[0].tag == "empty":
                    status = 0
            else:
                if "MSAMElementalLiveCompletedEvents" in command_name:
                    metric_name = "MSAMElementalLiveCompletedEvents"
                elif "MSAMElementalLiveErroredEvents" in command_name:
                    metric_name = "MSAMElementalLiveErroredEvents"
                elif "MSAMElementalLiveRunningEvents" in command_name:
                    metric_name = "MSAMElementalLiveRunningEvents"
                root = ET.fromstring(response['events'][0]['message'])
                status = len(root.findall("./live_event"))
        else:
            # for the elemental live status, the command itself returns a failure if process is not running at all
            # which is different than when a command fails to execute altogether
            if command_status == "Failed" and "MSAMElementalLiveStatus" in command_name:
                for log_event in response['events']:
                    if "Not Running" in log_event['message'] or "Active: failed" in log_event['message']:
                        metric_name = "MSAMElementalLiveStatus"
                        break
            else:
                # log if command has timed out or failed
                print("SSM Command Status: Command %s sent to instance %s has %s" % (command_name, instance_id, command_status))
                # create a metric for it
                status = 1
                metric_name = "MSAMSsmCommand"+command_status

        cw_client.put_metric_data(
            Namespace = SSM_LOG_GROUP_NAME,
            MetricData = [
                {
                    'MetricName': metric_name,
                    'Dimensions': [
                        {
                            'Name' : dimension_name,
                            'Value' : instance_id
                        },
                    ],
                    "Value": status,
                    "Unit": "Count"
                }
            ]
        )
    except ClientError as error:
        print(error)
        print("SSM Command Status: Command %s sent to instance %s has status %s" % (command_name, instance_id, command_status))
        print("Log stream name is %s" % (log_stream_name))
Ejemplo n.º 32
0
def load(file_object, annotations):
    from defusedxml import ElementTree
    context = ElementTree.iterparse(file_object, events=("start", "end"))
    context = iter(context)
    ev, _ = next(context)

    supported_shapes = ('box', 'polygon', 'polyline', 'points', 'cuboid')

    track = None
    shape = None
    tag = None
    image_is_opened = False
    attributes = None
    for ev, el in context:
        if ev == 'start':
            if el.tag == 'track':
                track = annotations.Track(
                    label=el.attrib['label'],
                    group=int(el.attrib.get('group_id', 0)),
                    source=el.attrib.get('source', 'manual'),
                    shapes=[],
                )
            elif el.tag == 'image':
                image_is_opened = True
                frame_id = annotations.abs_frame_id(match_dm_item(
                    DatasetItem(id=el.attrib['id'], image=el.attrib['name']),
                    annotations))
            elif el.tag in supported_shapes and (track is not None or image_is_opened):
                attributes = []
                shape = {
                    'attributes': attributes,
                    'points': [],
                }
            elif el.tag == 'tag' and image_is_opened:
                attributes = []
                tag = {
                    'frame': frame_id,
                    'label': el.attrib['label'],
                    'group': int(el.attrib.get('group_id', 0)),
                    'attributes': attributes,
                    'source': str(el.attrib.get('source', 'manual'))
                }
        elif ev == 'end':
            if el.tag == 'attribute' and attributes is not None:
                attributes.append(annotations.Attribute(
                    name=el.attrib['name'],
                    value=el.text or "",
                ))
            if el.tag in supported_shapes:
                if track is not None:
                    shape['frame'] = el.attrib['frame']
                    shape['outside'] = el.attrib['outside'] == "1"
                    shape['keyframe'] = el.attrib['keyframe'] == "1"
                else:
                    shape['frame'] = frame_id
                    shape['label'] = el.attrib['label']
                    shape['group'] = int(el.attrib.get('group_id', 0))
                    shape['source'] = str(el.attrib.get('source', 'manual'))

                shape['type'] = 'rectangle' if el.tag == 'box' else el.tag
                shape['occluded'] = el.attrib['occluded'] == '1'
                shape['z_order'] = int(el.attrib.get('z_order', 0))

                if el.tag == 'box':
                    shape['points'].append(el.attrib['xtl'])
                    shape['points'].append(el.attrib['ytl'])
                    shape['points'].append(el.attrib['xbr'])
                    shape['points'].append(el.attrib['ybr'])
                elif el.tag == 'cuboid':
                    shape['points'].append(el.attrib['xtl1'])
                    shape['points'].append(el.attrib['ytl1'])
                    shape['points'].append(el.attrib['xbl1'])
                    shape['points'].append(el.attrib['ybl1'])
                    shape['points'].append(el.attrib['xtr1'])
                    shape['points'].append(el.attrib['ytr1'])
                    shape['points'].append(el.attrib['xbr1'])
                    shape['points'].append(el.attrib['ybr1'])

                    shape['points'].append(el.attrib['xtl2'])
                    shape['points'].append(el.attrib['ytl2'])
                    shape['points'].append(el.attrib['xbl2'])
                    shape['points'].append(el.attrib['ybl2'])
                    shape['points'].append(el.attrib['xtr2'])
                    shape['points'].append(el.attrib['ytr2'])
                    shape['points'].append(el.attrib['xbr2'])
                    shape['points'].append(el.attrib['ybr2'])
                else:
                    for pair in el.attrib['points'].split(';'):
                        shape['points'].extend(map(float, pair.split(',')))

                if track is not None:
                    if shape["keyframe"]:
                        track.shapes.append(annotations.TrackedShape(**shape))
                else:
                    annotations.add_shape(annotations.LabeledShape(**shape))
                shape = None

            elif el.tag == 'track':
                annotations.add_track(track)
                track = None
            elif el.tag == 'image':
                image_is_opened = False
            elif el.tag == 'tag':
                annotations.add_tag(annotations.Tag(**tag))
                tag = None
            el.clear()
Ejemplo n.º 33
0
    def __init__(self, task_data=None):
        try:
            task_xml = ElementTree.fromstring(task_data, forbid_dtd=True)
        except ElementTree.ParseError:
            raise FormatError("Job file is not XML format")

        task_xml = self._strip_namespace(task_xml)

        # Load RegistrationInfo data
        self.uri = self._get_element_data(task_xml, "RegistrationInfo/URI")
        self.security_descriptor = self._get_element_data(
            task_xml, "RegistrationInfo/SecurityDescriptor")
        self.source = self._get_element_data(task_xml,
                                             "RegistrationInfo/Source")
        self.date = self._get_element_data(task_xml, "RegistrationInfo/Date")
        self.author = self._get_element_data(task_xml,
                                             "RegistrationInfo/Author")
        self.version = self._get_element_data(task_xml,
                                              "RegistrationInfo/Version")
        self.description = self._get_element_data(
            task_xml, "RegistrationInfo/Description")
        self.documentation = self._get_element_data(
            task_xml, "RegistrationInfo/Documentation")

        # Load Principal data
        self.principal_id = task_xml.find("Principals/Principal").get("id")
        self.user_id = self._get_element_data(task_xml,
                                              "Principals/Principal/UserId")
        self.logon_type = self._get_element_data(
            task_xml, "Principals/Principal/LogonType")
        self.group_id = self._get_element_data(task_xml,
                                               "Principals/Principal/GroupId")
        self.display_name = self._get_element_data(
            task_xml, "Principals/Principal/DisplayName")
        self.run_level = self._get_element_data(
            task_xml, "Principals/Principal/RunLevel")
        self.process_token_sid_type = self._get_element_data(
            task_xml, "Principals/Principal/ProcessTokenSidType")
        self.required_privileges = self._get_list_element_data(
            task_xml, "Principals/Principal/RequiredPrivileges/Privilege")

        # Load Settings data
        self.allow_start_on_demand = self._get_element_data(
            task_xml, "AllowStartOnDemand")
        self.disallow_start_on_batteries = self._get_element_data(
            task_xml, "DisallowStartIfOnBatteries")
        self.stop_on_batteries = self._get_element_data(
            task_xml, "StopIfGoingOnBatteries")
        self.allow_hard_terminate = self._get_element_data(
            task_xml, "AllowHardTerminate")
        self.start_when_available = self._get_element_data(
            task_xml, "StartWhenAvailable")
        self.network_profile_name = self._get_element_data(
            task_xml, "NetworkProfileName")
        self.run_only_on_network = self._get_element_data(
            task_xml, "RunOnlyIfNetworkAvailable")
        self.wake_to_run = self._get_element_data(task_xml, "WakeToRun")
        self.enabled = self._get_element_data(task_xml, "Enabled")
        self.hidden = self._get_element_data(task_xml, "Hidden")
        self.delete_expired = self._get_element_data(task_xml,
                                                     "DeleteExpiredTaskAfter")
        self.execution_time_limit = self._get_element_data(
            task_xml, "ExecutionTimeLimit")
        self.run_only_idle = self._get_element_data(task_xml, "RunOnlyIfIdle")
        self.unified_scheduling_engine = self._get_element_data(
            task_xml, "UseUnifiedSchedulingEngine")
        self.disallow_start_on_remote_app_session = self._get_element_data(
            task_xml, "DisallowStartOnRemoteAppSession")
        self.multiple_instances_policy = self._get_element_data(
            task_xml, "MultipleInstancesPolicy")
        self.priority = self._get_element_data(task_xml, "Priority")
        self.idle_duration = self._get_element_data(task_xml,
                                                    "IdleSettings/Duration")
        self.idle_wait_timeout = self._get_element_data(
            task_xml, "IdleSettings/WaitTimeout")
        self.idle_stop_on_idle_end = self._get_element_data(
            task_xml, "IdleSettings/StopOnIdleEnd")
        self.idle_restart_on_idle = self._get_element_data(
            task_xml, "IdleSettings/RestartOnIdle")
        self.network_name = self._get_element_data(task_xml,
                                                   "NetworkSettings/Name")
        self.network_id = self._get_element_data(task_xml,
                                                 "NetworkSettings/Id")
        self.restart_on_fail_interval = self._get_element_data(
            task_xml, "RestartOnFailure/Interval")
        self.restart_on_fail_count = self._get_element_data(
            task_xml, "RestartOnFailure/Count")

        # Load Data data
        self.data = self._get_raw_xml(task_xml, "Data")

        # Load Triggers data
        self.triggers = self._get_triggers(task_xml)

        # Load Actions data
        self.actions = self._get_actions(task_xml)
Ejemplo n.º 34
0
 def helperParse(self, document):
     tree = _ET.parse(document)
     root = tree.getroot()
     return root
Ejemplo n.º 35
0
 def get_findings(self, xml_output, test):
     tree = ElementTree.parse(xml_output)
     vuln_definitions = self.get_vuln_definitions(tree)
     return self.get_items(tree, vuln_definitions, test)
Ejemplo n.º 36
0
 def description(self):
     return ET.fromstring(self.dom.XMLDesc(0))
Ejemplo n.º 37
0
    def __load_xml(self):
        xml_tree = self.xml_tree or ET.parse(self.path_to_xml)
        xml_root = xml_tree.getroot()
        xml_layers = {}
        xml_edges = []
        statistics = {}

        Edge = namedtuple('edge',
                          ['from_layer', 'from_port', 'to_layer', 'to_port'])

        # Create graph with operations only
        self.graph = Graph()
        self.graph.graph['hashes'] = {}

        self.graph.graph['ir_version'] = int(
            xml_root.attrib['version']) if xml_root.attrib.get(
                'version') is not None else None
        self.graph.graph['layout'] = 'NCHW'
        self.graph.name = xml_root.attrib['name'] if xml_root.attrib.get(
            'name') is not None else None

        # Parse XML
        for child in xml_root:
            if child.tag == 'layers':
                for layer in child:
                    layer_id, layer_attrs = self.__load_layer(layer)
                    xml_layers.update({layer_id: layer_attrs})
            elif child.tag == 'edges':
                for edge in child:
                    xml_edges.append(
                        Edge(edge.attrib['from-layer'],
                             int(edge.attrib['from-port']),
                             edge.attrib['to-layer'],
                             int(edge.attrib['to-port'])))
            elif child.tag == 'statistics':
                layers = child.findall('layer')
                for layer in layers:
                    statistics[layer.find('name').text] = {
                        'min': layer.find('min').text,
                        'max': layer.find('max').text
                    }
            elif child.tag == 'meta_data':
                for elem in child:
                    if elem.tag == 'cli_parameters':
                        for det in elem:
                            if det.tag != 'unset':
                                value = det.attrib['value']
                                if value in ['True', 'False']:
                                    value = False if value == 'False' else True
                                self.meta_data[det.tag] = value
                            else:
                                self.meta_data[det.tag] = det.attrib[
                                    'unset_cli_parameters'].split(',_')
            elif child.tag == 'quantization_parameters':
                # Section with Post Optimization Toolkit parameters
                self.meta_data['quantization_parameters'] = dict()
                for elem in child:
                    if elem.tag == 'config':
                        self.meta_data['quantization_parameters'][
                            'config'] = elem.text
                    elif elem.tag in ['version', 'cli_params']:
                        self.meta_data['quantization_parameters'][
                            elem.tag] = elem.attrib['value']

        self.graph.graph['cmd_params'] = Namespace(
            **self.meta_data)  # TODO check what we need all this attrs

        if len(statistics):
            self.graph.graph['statistics'] = statistics

        for layer in xml_layers.keys():
            self.graph.add_node(layer, **xml_layers[layer])

        xml_edges.sort(key=lambda x: x.to_layer)

        for edge in xml_edges:
            self.graph.add_edges_from([(edge.from_layer, edge.to_layer, {
                'from_port': edge.from_port,
                'to_port': edge.to_port
            })])

        # Insert data nodes between op nodes and insert data nodes with weights
        nodes = list(self.graph.nodes())
        for node in nodes:
            out_edges = Node(self.graph, node).get_outputs()
            data_nodes = {}
            for port in self.graph.node[node]['ports']:
                data = self.graph.unique_id(prefix='data_')
                self.graph.add_node(
                    data, **{
                        'kind': 'data',
                        'shape': self.graph.node[node]['ports'][port][0],
                        'value': None
                    })
                self.graph.add_edges_from([(node, data, {'out': port})])
                data_nodes.update({port: data})

            for out_node, edge_attrs in out_edges:
                self.graph.remove_edge(node, out_node)
                if edge_attrs['from_port'] in data_nodes:
                    data = data_nodes[edge_attrs['from_port']]
                else:
                    raise RuntimeError(
                        "SMTH wrong with IR! There is an edge from not existing port"
                    )
                self.graph.add_edges_from([(data, out_node, {
                    'in': edge_attrs['to_port']
                })])
Ejemplo n.º 38
0
UPDATE = 1
COOLDOWN_VOTE = 5
RAPPEL = 0
MIN_BEFORE_COOLDOWN = 1
CURRENT_ID = 0
CHANNEL = None
GUILD = None
ISSUE_RESULTS = None

BANNED_HOURS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 22, 23, 24]
ROLE_PING = "671696364056477707"
EMOJI_VOTE = ["☑️", "✅"]
EMOJI = [":apple:", ":pineapple:", ":kiwi:", ":cherries:", ":banana:", ":eggplant:", ":tomato:", ":corn:", ":carrot:"]
NATION = 'controlistania'
PATH = 'vote.yml'
RESULTS_XML = DT.parse("test_result.xml")
INPUT_XML = DT.parse("test_input.xml")

load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
PASSWORD = os.getenv('PASSWORD')

bot = commands.Bot(command_prefix='!')

# ----------------------------- LECTURE DES FICHIERS

# Charge la liste des rangs et des bannieres lors du lancement de l'application
with open("list_data.yml") as f:
    data = yaml.load(f, Loader=yaml.FullLoader)
    LIST_RANK_ID = data["ranks"]
    BANNER_TITLES = data["banners"]
Ejemplo n.º 39
0
 def __init__(self, file, test):
     tree = ElementTree.parse(file)
     items = list()
     for detail in tree.iterfind('//detaillist/detail'):
         # finding details
         title = detail.findtext('name')
         # date = detail.findtext('date') # can be used for Finding.date?
         cve = detail.findtext('./cve/id')
         url = detail.findtext(
             './referencelist/reference/[type=\'solution\']/../url')
         description = detail.findtext('description')
         mitigation = detail.findtext('solution')
         impact = detail.findtext('information')
         cvss_score = detail.findtext('cvss_v3_score') or detail.findtext(
             'cvss_score')
         if not cvss_score:
             cvss_score = 0
         if cvss_score:
             score = float(cvss_score)
             if score < 4:
                 severity = 'Low'
             elif score < 7:
                 severity = 'Medium'
             elif score < 9:
                 severity = 'High'
             else:
                 severity = 'Critical'
         else:
             risk = int(detail.findtext('risk'))
             if risk == 0:
                 severity = 'Low'
             elif risk == 1:
                 severity = 'Medium'
             elif risk == 2:
                 severity = 'High'
             else:
                 severity = 'Critical'
         cvss_description = detail.findtext('cvss_vector_description')
         cvss_vector = detail.findtext('cvss_v3_vector') or detail.findtext(
             'cvss_vector')
         severity_justification = "{}\n{}".format(cvss_score,
                                                  cvss_description)
         finding = Finding(title=title,
                           test=test,
                           cve=cve,
                           url=url,
                           description=description,
                           mitigation=mitigation,
                           impact=impact,
                           severity=severity,
                           numerical_severity=cvss_score,
                           severity_justification=severity_justification)
         # endpoint details
         host = detail.findtext('ip')
         if host:
             protocol = detail.findtext('./portinfo/service')
             try:
                 port = int(detail.findtext('./portinfo/portnumber'))
             except ValueError as ve:
                 print("General port given. Assigning 0 as default.")
                 port = 0
             finding.unsaved_endpoints.append(
                 Endpoint(protocol=protocol, host=host, port=port))
         items.append(finding)
     self._items = items
Ejemplo n.º 40
0
def OpenVAS_xml_upload(request):
    """
    OpenVAS XML file upload.
    :param request:
    :return:
    """
    all_project = project_db.objects.all()
    if request.method == "POST":
        project_id = request.POST.get("project_id")
        scanner = request.POST.get("scanner")
        xml_file = request.FILES['xmlfile']
        scan_ip = request.POST.get("scan_url")
        scan_id = uuid.uuid4()
        scan_status = "100"
        if scanner == "openvas":
            date_time = datetime.now()
            scan_dump = scan_save_db(scan_ip=scan_ip,
                                     scan_id=scan_id,
                                     date_time=date_time,
                                     project_id=project_id,
                                     scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            OpenVas_Parser.xml_parser(project_id=project_id,
                                      scan_id=scan_id,
                                      root=root_xml)
            return HttpResponseRedirect("/networkscanners/")
        elif scanner == "nessus":
            date_time = datetime.now()
            scan_dump = nessus_scan_db(scan_ip=scan_ip,
                                       scan_id=scan_id,
                                       date_time=date_time,
                                       project_id=project_id,
                                       scan_status=scan_status)
            scan_dump.save()
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            Nessus_Parser.nessus_parser(
                root=root_xml,
                scan_id=scan_id,
                project_id=project_id,
            )
            return HttpResponseRedirect("/networkscanners/nessus_scan")
        elif scanner == "nmap":
            # date_time = datetime.now()
            # scan_dump = nessus_scan_db(
            #     scan_ip=scan_ip,
            #     scan_id=scan_id,
            #     date_time=date_time,
            #     project_id=project_id,
            #     scan_status=scan_status
            # )
            # scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            nmap_parser.xml_parser(
                root=root_xml,
                scan_id=scan_id,
                project_id=project_id,
            )
            return HttpResponseRedirect("/tools/nmap_scan/")

    return render(request, 'net_upload_xml.html', {'all_project': all_project})
Ejemplo n.º 41
0
def add_image(plot,
              plot_object,
              generate_code=False,
              sphinx=False,
              url_prefix="",
              dataset=None,
              level=None,
              itime=None,
              vtime=None,
              simple_naming=False):
    """
    Adds the images to the plots folder and generates the html codes to display them
    """
    global end
    # Import here due to some circular import issue if imported too soon
    from mslib.index import SCRIPT_NAME

    if not os.path.exists(STATIC_LOCATION) and not sphinx:
        os.mkdir(STATIC_LOCATION)

    l_type = "Linear" if isinstance(plot_object, AbstractLinearSectionStyle) else \
        "Side" if isinstance(plot_object, AbstractVerticalSectionStyle) else "Top"

    filename = f"{l_type}_{dataset}{plot_object.name}-" + (
        f"{level}it{itime}vt{vtime}".replace(" ", "_").replace(
            ":", "_").replace("-", "_") if not simple_naming else "")

    if plot:
        location = DOCS_LOCATION if sphinx else STATIC_LOCATION
        if not os.path.exists(os.path.join(location, "plots")):
            os.mkdir(os.path.join(location, "plots"))
        if l_type == "Linear":
            create_linear_plot(
                etree.fromstring(plot),
                os.path.join(location, "plots", filename + ".png"))
        else:
            with Image.open(io.BytesIO(plot)) as image:
                image.save(os.path.join(location, "plots", filename + ".png"),
                           format="PNG")

    end = end.replace("files = [", f"files = [\"{filename}.png\",")\
        .replace(",];", "];")
    img_path = f"../_static/{filename}.png" if sphinx \
        else f"{url_prefix}/static/plots/{filename}.png"
    code_path = f"code/{l_type}_{dataset}{plot_object.name}.html" if sphinx \
        else f"{url_prefix if url_prefix else ''}{SCRIPT_NAME}mss/code/{l_type}_{dataset}{plot_object.name}.md"

    if generate_code:
        if f"{l_type}_{dataset}{plot_object.name}" not in plot_htmls:
            plot_htmls[f"{l_type}_{dataset}{plot_object.name}"] = \
                plot_html_begin + get_plot_details(plot_object, l_type, sphinx, img_path, code_path, dataset)
        markdown = plot_htmls[f"{l_type}_{dataset}{plot_object.name}"]
        if level:
            markdown = add_levels([level], None, markdown)
        if vtime:
            markdown = add_times(itime, [vtime], markdown)
        plot_htmls[f"{l_type}_{dataset}{plot_object.name}"] = markdown

    id = img_path.split(f"-{level}")[0]
    if not any([id in html for html in plots[l_type]]):
        plots[l_type].append(
            image_md(
                img_path, plot_object.name,
                code_path if generate_code else None,
                f"{plot_object.title}" + (f"<br>{plot_object.abstract}"
                                          if plot_object.abstract else "")))
Ejemplo n.º 42
0
    def post(self, request, format=None):

        project_id = request.data.get("project_id")
        scanner = request.data.get("scanner")
        xml_file = request.data.get("filename")
        scan_url = request.data.get("scan_url")
        scan_id = uuid.uuid4()
        scan_status = "100"
        print xml_file
        print scanner
        if scanner == "zap_scan":
            date_time = datetime.datetime.now()
            scan_dump = zap_scans_db(scan_url=scan_url,
                                     scan_scanid=scan_id,
                                     date_time=date_time,
                                     project_id=project_id,
                                     vul_status=scan_status,
                                     rescan='No')
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            zap_xml_parser.xml_parser(project_id=project_id,
                                      scan_id=scan_id,
                                      root=root_xml)
            return Response({
                "message": "ZAP Scan Data Uploaded",
                "scanner": scanner,
                "project_id": project_id,
                "scan_id": scan_id
            })
        elif scanner == "burp_scan":
            date_time = datetime.datetime.now()
            scan_dump = burp_scan_db(url=scan_url,
                                     scan_id=scan_id,
                                     date_time=date_time,
                                     project_id=project_id,
                                     scan_status=scan_status)
            scan_dump.save()
            # Burp scan XML parser
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            do_xml_data = burp_plugin.burp_scans(project_id, scan_url, scan_id)
            do_xml_data.burp_scan_data(root_xml)
            return Response({
                "message": "Burp Scan Data Uploaded",
                "project_id": project_id,
                "scan_id": scan_id,
                "scanner": scanner
            })

        elif scanner == "arachni":
            date_time = datetime.datetime.now()
            scan_dump = arachni_scan_db(url=scan_url,
                                        scan_id=scan_id,
                                        date_time=date_time,
                                        project_id=project_id,
                                        scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            arachni_xml_parser.xml_parser(project_id=project_id,
                                          scan_id=scan_id,
                                          root=root_xml)
            return Response({
                "message": "Scan Data Uploaded",
                "project_id": project_id,
                "scan_id": scan_id,
                "scanner": scanner
            })

        elif scanner == 'netsparker':
            date_time = datetime.datetime.now()
            scan_dump = netsparker_scan_db(url=scan_url,
                                           scan_id=scan_id,
                                           date_time=date_time,
                                           project_id=project_id,
                                           scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            netsparker_xml_parser.xml_parser(project_id=project_id,
                                             scan_id=scan_id,
                                             root=root_xml)
            return Response({
                "message": "Scan Data Uploaded",
                "project_id": project_id,
                "scan_id": scan_id,
                "scanner": scanner
            })
        elif scanner == 'webinspect':
            date_time = datetime.datetime.now()
            scan_dump = webinspect_scan_db(url=scan_url,
                                           scan_id=scan_id,
                                           date_time=date_time,
                                           project_id=project_id,
                                           scan_status=scan_status)
            scan_dump.save()
            tree = ET.parse(xml_file)
            root_xml = tree.getroot()
            webinspect_xml_parser.xml_parser(project_id=project_id,
                                             scan_id=scan_id,
                                             root=root_xml)
            return Response({
                "message": "Scan Data Uploaded",
                "project_id": project_id,
                "scan_id": scan_id,
                "scanner": scanner
            })

        elif scanner == 'banditscan':
            date_time = datetime.datetime.now()
            scan_dump = bandit_scan_db(project_name=scan_url,
                                       scan_id=scan_id,
                                       date_time=date_time,
                                       project_id=project_id,
                                       scan_status=scan_status)
            scan_dump.save()
            data = json.loads(xml_file)
            bandit_report_json(data=data,
                               project_id=project_id,
                               scan_id=scan_id)
            return Response({
                "message": "Scan Data Uploaded",
                "project_id": project_id,
                "scan_id": scan_id,
                "scanner": scanner
            })

        elif scanner == 'dependencycheck':
            date_time = datetime.datetime.now()
            scan_dump = dependencycheck_scan_db(project_name=scan_url,
                                                scan_id=scan_id,
                                                date_time=date_time,
                                                project_id=project_id,
                                                scan_status=scan_status)
            scan_dump.save()
            data = etree.parse(xml_file)
            dependencycheck_report_parser.xml_parser(project_id=project_id,
                                                     scan_id=scan_id,
                                                     data=data)
            return Response({
                "message": "Scan Data Uploaded",
                "project_id": project_id,
                "scan_id": scan_id,
                "scanner": scanner
            })

        return Response({"message": "Scan Data Uploaded"})
Ejemplo n.º 43
0
 def get_domain_description(self):
     return ET.fromstring(self.dom.XMLDesc())
Ejemplo n.º 44
0
    def get_findings(self, filename, test):
        mitigation_patterns = dict()
        reference_patterns = dict()
        dupes = dict()

        SEVERITY = {'1': 'High', '2': 'Medium', '3': 'Low'}

        tree = ET.parse(filename)
        root = tree.getroot()

        html_parser = html2text.HTML2Text()
        html_parser.ignore_links = False

        # Parse <BugPattern> tags
        for pattern in root.findall('BugPattern'):
            # Parse <BugPattern>...<Details> html content
            html_text = html_parser.handle(
                ET.tostring(pattern.find('Details'),
                            method='text').decode('utf-8'))

            # Parse mitigation from html
            mitigation = ''
            i = 0
            for line in html_text.splitlines():
                i += 1
                # Break loop when references are reached
                if 'Reference' in line:
                    break
                # Add a string before the code indicating that it's just an example, NOT the actual scanned code
                if ('Vulnerable Code:'
                        in line) or ('Insecure configuration:'
                                     in line) or ('Code at risk:' in line):
                    mitigation += '\n\n#### Example\n'
                # Add line to mitigation
                mitigation += line + '\n'
            # Add mitigations to dictionary
            mitigation_patterns[pattern.get('type')] = mitigation

            # Parse references from html
            reference = ''
            #   Sometimes there's a breakline in the middle of the reference,
            #   so the splitlines method ends up breaking it in two.
            #   We solve this problem by joining all references and adding breaklines with regex.
            # Start loop where the previous loop ended
            for line in html_text.splitlines()[i:]:
                # Concatenate all references in one big string
                reference += line + ' '
            # Add breakline between each reference
            #   regex: turns ')  [' into ')\n['
            #      ')': reference ends
            #      '[': reference starts
            reference = re.sub(r'(?<=\))(.*?)(?=\[)', '\n', reference)
            # Add references to dictionary
            reference_patterns[pattern.get('type')] = reference

        # Parse <BugInstance> tags
        for bug in root.findall('BugInstance'):
            desc = ''
            for message in bug.itertext():
                desc += message + '\n'

            shortmessage_extract = bug.find('ShortMessage')
            if shortmessage_extract is not None:
                title = shortmessage_extract.text
            else:
                title = bug.get('type')
            severity = SEVERITY[bug.get('priority')]
            description = desc

            finding = Finding(title=title,
                              cwe=int(bug.get('cweid', default=0)),
                              severity=severity,
                              description=description,
                              test=test,
                              static_finding=True,
                              dynamic_finding=False,
                              nb_occurences=1)

            # find the source line and file on the buginstance
            source_extract = bug.find('SourceLine')
            if source_extract is not None:
                finding.file_path = source_extract.get("sourcepath")
                finding.sast_source_object = source_extract.get("classname")
                finding.sast_source_file_path = source_extract.get(
                    "sourcepath")
                if 'start' in source_extract.attrib and source_extract.get(
                        "start").isdigit():
                    finding.line = int(source_extract.get("start"))
                    finding.sast_source_line = int(source_extract.get("start"))

            if bug.get('type') in mitigation_patterns:
                finding.mitigation = mitigation_patterns[bug.get('type')]
                finding.references = reference_patterns[bug.get('type')]

            if 'instanceHash' in bug.attrib:
                dupe_key = bug.get('instanceHash')
            else:
                dupe_key = "|".join([
                    'no_instance_hash',
                    title,
                    description,
                ])

            if dupe_key in dupes:
                find = dupes[dupe_key]
                find.nb_occurences += 1
            else:
                dupes[dupe_key] = finding

        return list(dupes.values())
Ejemplo n.º 45
0
    def __init__(self, file, test):
        self.dupes = dict()
        self.items = ()
        if file is None:
            return

        tree = ET.parse(file)
        # get root of tree.
        root = tree.getroot()
        if 'document' not in root.tag:
            raise NamespaceErr(
                "This doesn't seem to be a valid sslyze xml file.")

        results = root.find('results')
        for target in results:
            url = target.attrib['host']
            port = target.attrib['port']
            parsedUrl = urlparse(url)
            protocol = parsedUrl.scheme
            query = parsedUrl.query
            fragment = parsedUrl.fragment
            path = parsedUrl.path
            try:
                (host, port) = parsedUrl.netloc.split(':')
            except:
                host = parsedUrl.netloc
            for element in target:
                title = ""
                severity = ""
                description = ""
                severity = "Info"
                weak_cipher = {}
                if element.tag == 'heartbleed':
                    heartbleed_element = element.find('openSslHeartbleed')
                    if 'isVulnerable' in heartbleed_element.attrib:
                        if heartbleed_element.attrib['isVulnerable'] == 'True':
                            title = element.attrib['title'] + " | " + url
                            description = "**heartbleed** : Vulnerable" + "\n\n" + \
                                        "**title** : " + element.attrib['title']
                if element.tag == 'openssl_ccs':
                    openssl_ccs_element = element.find('openSslCcsInjection')
                    if 'isVulnerable' in openssl_ccs_element.attrib:
                        if openssl_ccs_element.attrib[
                                'isVulnerable'] == 'True':
                            title = element.attrib['title'] + " | " + url
                            description = "**openssl_ccs** : Vulnerable" + "\n\n" + \
                                        "**title** : " + element.attrib['title']
                if element.tag == 'reneg':
                    reneg_element = element.find('sessionRenegotiation')
                    if 'isSecure' in reneg_element.attrib:
                        if reneg_element.attrib['isSecure'] == 'False':
                            title = element.attrib['title'] + " | " + url
                            description = "**Session Renegotiation** : Vulnerable" + "\n\n" + \
                                        "**title** : " + element.attrib['title']
                if element.tag in PROTOCOLS and element.attrib[
                        'isProtocolSupported'] == "True":
                    weak_cipher[element.tag] = []
                    for ciphers in element:
                        if ciphers.tag == 'preferredCipherSuite' or ciphers.tag == 'acceptedCipherSuites':
                            for cipher in ciphers:
                                if cipher.attrib['name'] in WEAK_CIPHER_LIST:
                                    if not cipher.attrib['name'] in weak_cipher[
                                            element.tag]:
                                        weak_cipher[element.tag].append(
                                            cipher.attrib['name'])
                    if len(weak_cipher[element.tag]) > 0:
                        title = element.tag + " | " + "Weak Ciphers" + " | " + url
                        description = "**Protocol** : " + element.tag + "\n\n" + \
                                    "**Weak Ciphers** : " + ",\n\n".join(weak_cipher[element.tag])
                if title and description is not None:
                    dupe_key = hashlib.md5(
                        str(description + title).encode('utf-8')).hexdigest()
                    if dupe_key in self.dupes:
                        finding = self.dupes[dupe_key]
                        if finding.references:
                            finding.references = finding.references
                        self.dupes[dupe_key] = finding
                    else:
                        self.dupes[dupe_key] = True

                        finding = Finding(
                            title=title,
                            test=test,
                            active=False,
                            verified=False,
                            description=description,
                            severity=severity,
                            numerical_severity=Finding.get_numerical_severity(
                                severity),
                            dynamic_finding=True,
                        )
                        finding.unsaved_endpoints = list()
                        self.dupes[dupe_key] = finding

                        if url is not None:
                            finding.unsaved_endpoints.append(
                                Endpoint(
                                    host=host,
                                    port=port,
                                    path=path,
                                    protocol=protocol,
                                    query=query,
                                    fragment=fragment,
                                ))
                self.items = self.dupes.values()
Ejemplo n.º 46
0
    def monitorForData(self, queue):
        '''
        updated receive all
        '''
        try:
            for client in self.clientInformationArray:
                sock = client.socket
                try:
                    try:
                        BUFF_SIZE = 8087
                        data = b''
                    except Exception as e:
                        logger.error(
                            loggingConstants.
                            CLIENTRECEPTIONHANDLERMONITORFORDATAERRORA +
                            str(e))
                        #self.returnReceivedData(client, b'', queue)
                        #self.clientInformationArray.remove(client)
                    try:
                        sock.settimeout(0.001)
                        part = sock.recv(BUFF_SIZE)
                    except socket.timeout as e:
                        continue
                    except BrokenPipeError as e:
                        #self.clientInformationArray.remove(client)
                        self.returnReceivedData(client, b'', queue)
                        continue
                    except Exception as e:
                        logger.error(
                            "Exception other than broken pipe in monitor for data function "
                            + str(e))
                        self.returnReceivedData(client, b'', queue)
                        #self.clientInformationArray.remove(client)
                        continue
                    try:
                        if part == b'' or part == None:
                            self.returnReceivedData(client, b'', queue)
                            #self.clientInformationArray.remove(client)
                            continue
                        else:
                            try:
                                timeout = time.time() + 1
                                while time.time() < timeout:
                                    try:
                                        event = etree.fromstring(part)
                                        if event.tag == "event":
                                            self.returnReceivedData(
                                                client, part, queue)
                                            break
                                        else:
                                            break
                                    except:
                                        try:
                                            sock.settimeout(0.1)
                                            part += sock.recv(BUFF_SIZE)
                                        except socket.timeout as e:
                                            logger.error(
                                                'there has been an exception in client reception handler '
                                                + str(e))
                                            break
                                        except BrokenPipeError as e:
                                            self.clientInformationArray.remove(
                                                client)
                                            break
                                        except Exception as e:
                                            logger.error(
                                                "Exception other than broken pipe in monitor for data function"
                                            )
                                            #self.returnReceivedData(client, b'', queue)
                                            break
                            except Exception as e:
                                logger.error('error in buffer ' + str(e))
                                return -1

                    except Exception as e:
                        logger.error(
                            loggingConstants.
                            CLIENTRECEPTIONHANDLERMONITORFORDATAERRORC +
                            str(e))
                        #self.returnReceivedData(client, b'', queue)
                        #self.clientInformationArray.remove(client)
                        return -1

                except Exception as e:
                    logger.error(loggingConstants.
                                 CLIENTRECEPTIONHANDLERMONITORFORDATAERRORD +
                                 str(e))
                    #self.returnReceivedData(client, b'', queue)
                    return -1
            return 1
        except Exception as e:
            logger.error('exception in monitor for data ' + str(e))
            return -1
Ejemplo n.º 47
0
def wallet_journal(keys=None):
    """

    :param keys: [("personal", key_id, vcode), (), ...] or None for jf_wallet
    :return:
    """

    with open("configs/base.json", "r") as base_config_file:
        base_config = json.load(base_config_file)
    if not keys:
        # Default Refreshes
        keys = [("jf_wallet", secrets["jf_key_id"], secrets["jf_vcode"])]
    bulk_op = g.mongo.db.wallet_journal.initialize_unordered_bulk_op()
    bulk_run = False
    for service in keys:
        if service[0] == "jf_wallet":
            db_wallet_journal_cache = g.mongo.db.caches.find_one({"_id": service[0]})
        else:
            db_wallet_journal_cache = g.mongo.db.key_caches.find_one({"_id": "wallet_journal"})
        if not db_wallet_journal_cache or db_wallet_journal_cache.get("cached_until", 0) < time.time():
            if service[0] == "jf_wallet":
                xml_wallet_journal_payload = {
                    "keyID": service[1],
                    "vCode": service[2],
                    "accountKey": base_config["jf_account_key"]
                }
            else:
                xml_wallet_journal_payload = {
                    "keyID": service[1],
                    "vCode": service[2]
                }
            xml_wallet_journal_response = requests.get("https://api.eveonline.com/corp/WalletJournal.xml.aspx",
                                                       data=xml_wallet_journal_payload, headers=xml_headers)
            # XML Parse
            try:
                xml_wallet_journal_tree = ElementTree.fromstring(xml_wallet_journal_response.text)
            except ElementTree.ParseError:
                print(xml_wallet_journal_response.text)
                return None

            # Store in database
            xml_time_pattern = "%Y-%m-%d %H:%M:%S"
            g.mongo.db.caches.update({"_id": service[0]}, {"cached_until": int(calendar.timegm(
                time.strptime(xml_wallet_journal_tree[2].text, xml_time_pattern))),
                "cached_str": xml_wallet_journal_tree[2].text}, upsert=True)
            for transaction in xml_wallet_journal_tree[1][0]:
                bulk_run = True
                bulk_op.find({"_id": int(transaction.attrib["refID"]), "service": service[0]}).upsert().update(
                    {
                        "$set": {
                            "ref_type_id": int(transaction.attrib["refTypeID"]),
                            "owner_name_1": transaction.attrib["ownerName1"],
                            "owner_id_1": int(transaction.attrib["ownerID1"]),
                            "owner_name_2": transaction.attrib["ownerName2"],
                            "owner_id_2": int(transaction.attrib["ownerID2"]),
                            "amount": float(transaction.attrib["amount"]),
                            "reason": transaction.attrib["reason"]
                        }
                    })

    if bulk_run:
        bulk_op.execute()
Ejemplo n.º 48
0
 def _parse_xml(self, xml_str: str) -> Any:
     try:
         xml = ET.fromstring(xml_str)
         return xml
     except ET.ParseError:
         raise PcsNoStatusException('Broken XML was given')
Ejemplo n.º 49
0
def contracts(keys=None, celery_time=0):
    """

    :param keys: [("jf_service" or "personal", key_id, vcode, character_id), (), ...]
    :param celery_time: Set to the next run time instance
    :return:
    """
    if celery_time:
        g.mongo.db.caches.update({"_id": "jf_service"},
                                 {"$set": {
                                     "next_check": time.strftime("%Y-%m-%d %H:%M:%S",
                                                                 time.gmtime(int(time.time()) + celery_time))}})

    invalid_apis = set()

    if not keys:
        # Default Refreshes
        keys = [("jf_service", secrets["jf_key_id"], secrets["jf_vcode"])]
    bulk_op = g.mongo.db.contracts.initialize_unordered_bulk_op()
    bulk_run = False
    for service in keys:
        if service[0] == "personal":
            # If service is personal, uses key_caches database for cache values instead
            db_cache = g.mongo.db.key_caches.find_one({"_id": service[3]})
            cache_time = db_cache.get("contracts", 0) if db_cache else 0
        else:
            db_cache = g.mongo.db.caches.find_one({"_id": service[0]})
            cache_time = db_cache.get("cached_until", 0) if db_cache else 0
        if not db_cache or cache_time < time.time():

            # Clean contract history
            month_ago = int(time.time()) - 2629743  # Services are 1 month
            two_weeks_ago = int(time.time()) - 1512000  # Personals are 2 1/2 weeks
            filter_time = month_ago
            if service[0] == "personal":
                filter_time = two_weeks_ago

            if service[0] == "personal":
                xml_contracts_payload = {
                    "keyID": service[1],
                    "vCode": service[2],
                    "characterID": service[3]
                }
                xml_contracts_response = requests.get("https://api.eveonline.com/char/Contracts.xml.aspx",
                                                      data=xml_contracts_payload, headers=xml_headers)
            else:
                xml_contracts_payload = {
                    "keyID": service[1],
                    "vCode": service[2]
                }
                xml_contracts_response = requests.get("https://api.eveonline.com/Corp/Contracts.xml.aspx",
                                                      data=xml_contracts_payload, headers=xml_headers)

            # XML Parse
            try:
                xml_contracts_tree = ElementTree.fromstring(xml_contracts_response.text)
            except ElementTree.ParseError:
                print(xml_contracts_response.text)
                return list(invalid_apis)

            # Store in database
            xml_time_pattern = "%Y-%m-%d %H:%M:%S"

            if service[0] == "personal":
                g.mongo.db.key_caches.update({"_id": int(service[3])}, {"$set": {
                    "contracts": int(
                        calendar.timegm(time.strptime(xml_contracts_tree[2].text, xml_time_pattern))),
                    "contracts_str": xml_contracts_tree[2].text,
                    "key": int(service[1])}
                }, upsert=True)
            else:
                g.mongo.db.caches.update({"_id": service[0]}, {"$set": {"cached_until": int(
                    calendar.timegm(time.strptime(xml_contracts_tree[2].text, xml_time_pattern))),
                    "cached_str": xml_contracts_tree[2].text}}, upsert=True)

            if xml_contracts_tree[1].tag == "error":
                print(xml_contracts_tree[1].attrib["code"], xml_contracts_tree[1].text, service[1])
                conversions.invalidate_key([service[1]], session["CharacterOwnerHash"])
                invalid_apis.add(service[1])
            else:
                for contract in xml_contracts_tree[1][0]:
                    issue_time = int(calendar.timegm(time.strptime(contract.attrib["dateIssued"], xml_time_pattern)))
                    if issue_time > filter_time:
                        bulk_run = True
                        bulk_op.find({
                            "_id.id": int(contract.attrib["contractID"]), "_id.service": service[0]
                        }).upsert().update(
                            {
                                "$set": {
                                    "issuer_id": int(contract.attrib["issuerID"]),
                                    "assignee_id": int(contract.attrib["assigneeID"]),
                                    "acceptor_id": int(contract.attrib["acceptorID"]),
                                    "start_station_id": int(contract.attrib["startStationID"]),
                                    "end_station_id": int(contract.attrib["endStationID"]),
                                    "type": contract.attrib["type"],
                                    "status": contract.attrib["status"],
                                    "title": contract.attrib["title"],
                                    "for_corp": int(contract.attrib["forCorp"]),
                                    "date_issued": contract.attrib["dateIssued"],
                                    "date_expired": contract.attrib["dateExpired"],
                                    "date_accepted": contract.attrib["dateAccepted"],
                                    "num_days": int(contract.attrib["numDays"]),
                                    "date_completed": contract.attrib["dateCompleted"],
                                    "price": float(contract.attrib["price"]),
                                    "reward": float(contract.attrib["reward"]),
                                    "collateral": float(contract.attrib["collateral"]),
                                    "volume": float(contract.attrib["volume"]),
                                    "issued_int": issue_time
                                }
                            })
    if bulk_run:
        try:
            bulk_op.execute()
        except BulkWriteError as bulk_op_error:
            print("error", bulk_op_error.details)

    return list(invalid_apis)
Ejemplo n.º 50
0
def api_keys(api_key_list, unassociated=False, dashboard_id=None, verify_mask=True):
    """

    :param verify_mask: Choose whether to reject or expire non-conforming access masks
    :param api_key_list: [(key_id, vcode), (), ...]
    :param unassociated: True to add to unassociated API keys
    :param dashboard_id: Set the associated dashboard id. Defaults to the session variable.
    :return:
    """
    if unassociated:
        api_owner = "unassociated"
    elif dashboard_id:
        api_owner = dashboard_id
    else:
        api_owner = session["CharacterOwnerHash"]

    with open("configs/base.json", "r") as base_config_file:
        base_config = json.load(base_config_file)

    errors_list = []
    bulk_op = g.mongo.db.api_keys.initialize_ordered_bulk_op()
    bulk_run = False

    for key_id, vcode in api_key_list:
        db_api_cache = g.mongo.db.api_keys.find_one({"_id": api_owner,
                                                     "keys.key_id": {"$eq": int(key_id)}})
        cache_timer = 0
        if db_api_cache and api_owner != "unassociated":
            cache_timer_list = [key["cached_until"] for key in db_api_cache["keys"] if key["key_id"] == int(key_id)]
            cache_timer = max(cache_timer_list)
        elif api_owner == "unassociated":
            cache_timer = 0
        if not db_api_cache or cache_timer < time.time():

            xml_contracts_payload = {
                "keyID": key_id,
                "vCode": vcode
            }
            xml_api_key_response = requests.get("https://api.eveonline.com/account/APIKeyInfo.xml.aspx",
                                                data=xml_contracts_payload, headers=xml_headers)
            # XML Parse
            try:
                xml_api_key_tree = ElementTree.fromstring(xml_api_key_response.text)
            except ElementTree.ParseError:
                print(xml_api_key_response.text)
                return errors_list

            # Store in database
            xml_time_pattern = "%Y-%m-%d %H:%M:%S"
            failed = False
            expired = False
            if xml_api_key_tree[1].tag == "error":
                errors_list.append("CCP gave an error for key with id " +
                                   "{}. Ensure the key is not expired and is valid.".format(key_id))
                failed = True
            elif xml_api_key_tree[1][0].attrib["accessMask"] != str(base_config["access_mask"]):
                errors_list.append("Key with id {} is not (or no longer) a full API key.".format(key_id))
                if verify_mask:
                    failed = True
                else:
                    expired = True
            elif xml_api_key_tree[1][0].attrib["type"] != "Account":
                errors_list.append("Key with id {} is not an Account API key.".format(key_id))
                failed = True
            elif xml_api_key_tree[1][0].attrib["expires"].strip():
                errors_list.append("Key with id {} expires. Must be a non-expiring API key.".format(key_id))
                failed = True

            # Check for fail
            if failed:
                conversions.invalidate_key([key_id], api_owner)
                continue
            else:
                conversions.validate_key([key_id], api_owner, expired)

            # If same character is input, remove old keys first
            bulk_op.find({"_id": api_owner}).upsert().update(
                {
                    "$pull": {
                        "keys": {"key_id": int(key_id)}
                    }
                })
            if api_owner != "unassociated":
                # Remove keys from unassociated if found
                bulk_op.find({"_id": "unassociated"}).upsert().update(
                    {
                        "$pull": {
                            "keys": {"key_id": int(key_id)}
                        }
                    }
                )

            for api_character in xml_api_key_tree[1][0][0]:
                bulk_run = True
                update_request = {"$push": {"keys": {
                    "key_id": int(key_id),
                    "vcode": vcode,
                    "character_id": int(api_character.attrib["characterID"]),
                    "character_name": api_character.attrib["characterName"],
                    "cached_until": int(calendar.timegm(time.strptime(xml_api_key_tree[2].text,
                                                                      xml_time_pattern))),
                    "cached_str": xml_api_key_tree[2].text,
                    "corporation_id": int(api_character.attrib["corporationID"]),
                    "alliance_id": int(api_character.attrib["allianceID"]),
                    "corporation_name": api_character.attrib["corporationName"].strip(),
                    "alliance_name": api_character.attrib["allianceName"].strip()
                }}}
                if api_owner != "unassociated" or (api_owner == "unassociated" and not g.mongo.db.api_keys.find_one(
                        {"keys.key_id": {"$eq": int(key_id)}, "_id": {"$ne": "unassociated"}})):
                    bulk_op.find({"_id": api_owner}).upsert().update(update_request)

    if bulk_run:
        bulk_op.execute()

    return errors_list
Ejemplo n.º 51
0
def get_zonelog_name(modelfile):
    modelfile = ET.parse(modelfile).getroot()
    zonelog_wrapper = modelfile.findtext(".//zone-log-name")
    return zonelog_wrapper
Ejemplo n.º 52
0
    def get_findings(self, file, test):

        ImmuniScanTree = ElementTree.parse(file)
        root = ImmuniScanTree.getroot()
        # validate XML file
        if 'Vulnerabilities' not in root.tag:
            raise NamespaceErr("This does not look like a valid expected Immuniweb XML file.")

        dupes = dict()

        for vulnerability in root.iter("Vulnerability"):
            """
                The Tags available in XML File are:
                ID, Name, Date, Status,
                Type, CWE_ID, CVE_ID, CVSSv3,
                Risk, URL, Description, PoC
            """
            mitigation = "N/A"
            impact = "N/A"
            title = vulnerability.find('Name').text
            reference = vulnerability.find('ID').text
            cwe = ''.join(i for i in vulnerability.find('CWE-ID').text if i.isdigit())
            if cwe:
                cwe = cwe
            else:
                cwe = None
            cve = vulnerability.find('CVE-ID').text
            steps_to_reproduce = vulnerability.find('PoC').text
            # just to make sure severity is in the recognised sentence casing form
            severity = vulnerability.find('Risk').text.capitalize()
            # Set 'Warning' severity === 'Informational'
            if severity == 'Warning':
                severity = "Informational"

            description = (vulnerability.find('Description').text)
            url = vulnerability.find("URL").text
            parsedUrl = urlparse(url)
            protocol = parsedUrl.scheme
            query = parsedUrl.query
            fragment = parsedUrl.fragment
            path = parsedUrl.path
            port = ""  # Set port to empty string by default
            # Split the returned network address into host and
            try:  # If there is port number attached to host address
                host, port = parsedUrl.netloc.split(':')
            except:  # there's no port attached to address
                host = parsedUrl.netloc

            dupe_key = hashlib.md5(str(description + title + severity).encode('utf-8')).hexdigest()

            # check if finding is a duplicate
            if dupe_key in dupes:
                finding = dupes[dupe_key]  # fetch finding
                if description is not None:
                    finding.description += description
            else:  # finding is not a duplicate
                # create finding
                finding = Finding(title=title,
                    test=test,
                    cve=cve,
                    description=description,
                    severity=severity,
                    steps_to_reproduce=steps_to_reproduce,
                    cwe=cwe,
                    mitigation=mitigation,
                    impact=impact,
                    references=reference,
                    dynamic_finding=True)

                finding.unsaved_endpoints = list()
                dupes[dupe_key] = finding

                finding.unsaved_endpoints.append(Endpoint(
                        host=host, port=port,
                        path=path,
                        protocol=protocol,
                        query=query, fragment=fragment))

        return list(dupes.values())
Ejemplo n.º 53
0
def create_users(config, verify):

    # retrieve ams info
    ams_host = config.get("AMS", "ams_host")
    ams_project = config.get("AMS", "ams_project")
    ams_token = config.get("AMS", "ams_token")
    ams_email = config.get("AMS", "ams_email")
    users_role = config.get("AMS", "users_role")
    ams_consumer = config.get("AMS", "ams_consumer")
    goc_db_url_arch = config.get("AMS", "goc_db_host")
    goc_db_site_url = "https://goc.egi.eu/gocdbpi/public/?method=get_site&sitename={{sitename}}"

    # retrieve authn info
    authn_host = config.get("AUTHN", "authn_host")
    authn_service_uuid = config.get("AUTHN", "service_uuid")
    authn_token = config.get("AUTHN", "authn_token")
    authn_service_host = config.get("AUTHN", "service_host")

    # dict that acts as a cache for site contact emails
    site_contact_emails = {}

    # cert key tuple
    cert_creds = (config.get("AMS", "cert"), config.get("AMS", "cert_key"))

    conf_services = config.get("AMS", "service-types").split(",")
    for srv_type in conf_services:

        # strip any whitespaces
        srv_type = srv_type.replace(" ", "")

        # user count
        user_count = 0

        # updated bindings count
        update_binding_count = 0

        # updated bindings names
        update_bindings_names = []

        # form the goc db url
        goc_db_url = goc_db_url_arch.replace("{{service-type}}", srv_type)
        LOGGER.info("\nAccessing url: " + goc_db_url)
        LOGGER.info("\nStarted the process for service-type: " + srv_type)

        # grab the xml data from goc db
        goc_request = requests.get(url=goc_db_url,
                                   cert=cert_creds,
                                   verify=False)
        LOGGER.info(goc_request.text)

        # users from goc db that don't have a dn registered
        missing_dns = []

        # build the xml object
        root = ET.fromstring(goc_request.text)
        # iterate through the xml object's service_endpoints
        for service_endpoint in root.findall("SERVICE_ENDPOINT"):
            service_type = service_endpoint.find("SERVICE_TYPE"). \
                text.replace(".", "-")

            # grab the dn
            service_dn = service_endpoint.find("HOSTDN")
            if service_dn is None:
                missing_dns.append(service_endpoint.find("HOSTNAME").text)
                continue

            hostname = service_endpoint.find("HOSTNAME").text.replace(".", "-")
            sitename = service_endpoint.find("SITENAME").text.replace(".", "-")

            # try to get the site's contact email
            contact_email = ams_email
            # check the if we have retrieved this site's contact email before
            site_name = service_endpoint.find("SITENAME").text
            if site_name in site_contact_emails:
                contact_email = site_contact_emails[site_name]
            else:
                try:
                    # try to retrieve the site info from gocdb
                    site_url = goc_db_site_url.replace("{{sitename}}",
                                                       site_name)
                    goc_site_request = requests.get(site_url,
                                                    cert=cert_creds,
                                                    verify=False)
                    site_xml_obj = ET.fromstring(goc_site_request.text)

                    # check if the site is in production
                    in_prod = site_xml_obj.find("SITE").find(
                        "PRODUCTION_INFRASTRUCTURE")
                    if in_prod.text != 'Production':
                        raise Exception("Not in production")

                    # check for certified or uncertified
                    cert_uncert = site_xml_obj.find("SITE").find(
                        "CERTIFICATION_STATUS")
                    if cert_uncert.text != "Certified" and cert_uncert.text != "Uncertified":
                        raise Exception("Neither certified nor uncertified")

                    contact_email = site_xml_obj.find("SITE").find(
                        "CONTACT_EMAIL").text
                    site_contact_emails[site_name] = contact_email

                except Exception as e:
                    LOGGER.warning(
                        "Skipping endpoint {} under site {}, {}".format(
                            hostname, site_name, e))

            # Create AMS user
            user_binding_name = \
                service_type + "---" + hostname + "---" + sitename

            # convert the dn
            try:
                service_dn = RdnSequence(service_dn.text).__str__()
            except ValueError as ve:
                LOGGER.error("Invalid DN: {}. Exception: {}".format(
                    service_dn.text, ve))
                continue

            project = {'project': ams_project, 'roles': [users_role]}
            usr_create = {'projects': [project], 'email': contact_email}

            # create the user
            api_url = 'https://{0}/v1/projects/{1}/members/{2}?key={3}'.format(
                ams_host, ams_project, user_binding_name, ams_token)
            ams_usr_crt_req = requests.post(url=api_url,
                                            data=json.dumps(usr_create),
                                            verify=verify)
            LOGGER.info(ams_usr_crt_req.text)

            ams_user_uuid = ""

            # if the response is neither a 200(OK) nor a 409(already exists)
            # then move on to the next user
            if ams_usr_crt_req.status_code != 200 and ams_usr_crt_req.status_code != 409:
                LOGGER.critical("\nUser: "******"\nSomething went wrong while creating ams user." +
                    "\nBody data: " + str(usr_create) + "\nResponse Body: " +
                    ams_usr_crt_req.text)
                continue

            if ams_usr_crt_req.status_code == 200:
                ams_user_uuid = ams_usr_crt_req.json()["uuid"]
                # count how many users have been created
                user_count += 1

            # If the user already exists, Get user by username
            if ams_usr_crt_req.status_code == 409:
                proj_member_list_url = "https://{0}/v1/projects/{1}/members/{2}?key={3}".format(
                    ams_host, ams_project, user_binding_name, ams_token)
                ams_usr_get_req = requests.get(url=proj_member_list_url,
                                               verify=verify)

                # if the user retrieval was ok
                if ams_usr_get_req.status_code == 200:
                    LOGGER.info(
                        "\nSuccessfully retrieved user {} from ams".format(
                            user_binding_name))
                    ams_user_uuid = ams_usr_get_req.json()["uuid"]
                else:
                    LOGGER.critical("\nCould not retrieve user {} from ams."
                                    "\n Response {}".format(
                                        user_binding_name,
                                        ams_usr_get_req.text))
                    continue

            # Create the respective AUTH binding
            bd_data = {
                'service_uuid': authn_service_uuid,
                'host': authn_service_host,
                'auth_identifier': service_dn,
                'unique_key': ams_user_uuid,
                "auth_type": "x509"
            }

            create_binding_url = "https://{0}/v1/bindings/{1}?key={2}".format(
                authn_host, user_binding_name, authn_token)

            authn_binding_crt_req = requests.post(url=create_binding_url,
                                                  data=json.dumps(bd_data),
                                                  verify=verify)
            LOGGER.info(authn_binding_crt_req.text)

            # if the response is neither a 201(Created) nor a 409(already exists)
            if authn_binding_crt_req.status_code != 201 and authn_binding_crt_req.status_code != 409:
                LOGGER.critical(
                    "Something went wrong while creating a binding." +
                    "\nBody data: " + str(bd_data) + "\nResponse: " +
                    authn_binding_crt_req.text)
                continue

            # if the binding already exists, check for an updated DN from gocdb
            if authn_binding_crt_req.status_code == 409:
                retrieve_binding_url = "https://{0}/v1/bindings/{1}?key={2}".format(
                    authn_host, user_binding_name, authn_token)
                authn_ret_bind_req = requests.get(url=retrieve_binding_url,
                                                  verify=verify)
                # if the binding retrieval was ok
                if authn_ret_bind_req.status_code == 200:
                    LOGGER.info(
                        "\nSuccessfully retrieved binding {} from authn. Checking for DN update."
                        .format(user_binding_name))
                    binding = authn_ret_bind_req.json()
                    # check if the dn has changed
                    if binding["auth_identifier"] != service_dn:
                        # update the respective binding with the new dn
                        bind_upd_req_url = "https://{0}/v1/bindings/{1}?key={2}".format(
                            authn_host, user_binding_name, authn_token)
                        upd_bd_data = {"auth_identifier": service_dn}
                        authn_bind_upd_req = requests.put(
                            url=bind_upd_req_url,
                            data=json.dumps(upd_bd_data),
                            verify=verify)
                        LOGGER.info(authn_bind_upd_req.text)
                        if authn_bind_upd_req.status_code == 200:
                            update_binding_count += 1
                            update_bindings_names.append(user_binding_name)
                else:
                    LOGGER.critical(
                        "\nCould not retrieve binding {} from authn."
                        "\n Response {}".format(user_binding_name,
                                                authn_ret_bind_req.text))
                    continue

            # since both the ams user was created or already existed AND the authn binding was created or already existed
            # move to topic and subscription creation

            # create new topic
            primary_key = service_endpoint. \
                find("PRIMARY_KEY").text.replace(' ', '')
            topic_name = 'SITE_' + sitename + '_ENDPOINT_' + primary_key
            topic_crt_req = requests.put(
                "https://" + ams_host + "/v1/projects/" + ams_project +
                "/topics/" + topic_name + "?key=" + ams_token,
                verify=verify)

            topic_authorized_users = [user_binding_name]
            if topic_crt_req.status_code != 200:
                if topic_crt_req.status_code != 409:
                    LOGGER.critical(
                        "Something went wrong while creating topic " +
                        topic_name + "\nResponse: " + topic_crt_req.text)
                    continue
                else:
                    get_topic_acl_req = requests.get(
                        "https://" + ams_host + "/v1/projects/" + ams_project +
                        "/topics/" + topic_name + ":acl?key=" + ams_token,
                        verify=verify)
                    if get_topic_acl_req.status_code == 200:
                        acl_users = json.loads(get_topic_acl_req.text)
                        topic_authorized_users = topic_authorized_users + acl_users[
                            'authorized_users']
                        # remove duplicates
                        topic_authorized_users = list(
                            set(topic_authorized_users))

            # modify the authorized users
            modify_topic_req = requests.post(
                "https://" + ams_host + "/v1/projects/" + ams_project +
                "/topics/" + topic_name + ":modifyAcl?key=" + ams_token,
                data=json.dumps({'authorized_users': topic_authorized_users}),
                verify=verify)
            LOGGER.critical("Modified ACL for topic: {0} with users {1}."
                            "Response from AMS {2}".format(
                                topic_name, str(user_binding_name),
                                modify_topic_req.text))

            # create new sub
            primary_key = service_endpoint.find("PRIMARY_KEY").text.replace(
                ' ', '')
            sub_name = 'SITE_' + sitename + '_ENDPOINT_' + primary_key
            sub_authorized_users = [ams_consumer]
            sub_data = dict()
            sub_data[
                "topic"] = "projects/" + ams_project + "/topics/" + sub_name
            sub_data["ackDeadlineSeconds"] = 100

            sub_crt_req = requests.put(
                "https://" + ams_host + "/v1/projects/" + ams_project +
                "/subscriptions/" + sub_name + "?key=" + ams_token,
                data=json.dumps(sub_data),
                verify=verify)

            if sub_crt_req.status_code != 200 and sub_crt_req.status_code != 409:
                LOGGER.critical(
                    "Something went wrong while creating subscription " +
                    sub_name + "\nResponse: " + sub_crt_req.text)

            if sub_crt_req.status_code == 409:
                get_sub_acl_req = requests.get(
                    "https://" + ams_host + "/v1/projects/" + ams_project +
                    "/subscriptions/" + sub_name + ":acl?key=" + ams_token,
                    verify=verify)
                if get_sub_acl_req.status_code == 200:
                    acl_users = json.loads(get_sub_acl_req.text)
                    sub_authorized_users = sub_authorized_users + acl_users[
                        'authorized_users']
                    # remove duplicates
                    sub_authorized_users = list(set(sub_authorized_users))

            # modify the authorized users
            modify_sub_req = requests.post(
                "https://" + ams_host + "/v1/projects/" + ams_project +
                "/subscriptions/" + sub_name + ":modifyAcl?key=" + ams_token,
                data=json.dumps({'authorized_users': sub_authorized_users}),
                verify=verify)
            LOGGER.critical(
                "Modified ACL for subscription: {0} with users {1}."
                "Response from AMS {2}".format(sub_name, sub_authorized_users,
                                               modify_sub_req.text))

        LOGGER.critical("Service Type: " + srv_type)
        LOGGER.critical("Missing DNS: " + str(missing_dns))
        LOGGER.critical("Total Users Created: " + str(user_count))
        LOGGER.critical("Total Bindings Updated: " + str(update_binding_count))
        LOGGER.critical("Updated bingings: " + str(update_bindings_names))

        LOGGER.critical("-----------------------------------------")
Ejemplo n.º 54
0
    def play(self, content_id, retry=0):
        self.log("Initializing playback... " + str(content_id))

        self.login()

        media_item = self.get_from_hbogo(
            self.API_URL_BROWSE + content_id + self.LANGUAGE_CODE, 'xml')
        if media_item is False:
            return
        media_info = self.construct_media_info(media_item.find('.//item'))

        if self.lograwdata:
            self.log("Play Media: " + ET.tostring(media_item, encoding='utf8'))

        mpd_pre_url = media_item.find(
            './/media:content[@profile="HBO-DASH-WIDEVINE"]',
            namespaces=self.NAMESPACES).get('url') + '&responseType=xml'

        mpd = self.get_from_hbogo(mpd_pre_url, 'xml')
        if mpd is False:
            return
        if self.lograwdata:
            self.log("Manifest: " + ET.tostring(mpd, encoding='utf8'))

        mpd_url = mpd.find('.//url').text
        self.log("Manifest url: " + str(mpd_url))

        media_guid = media_item.find('.//guid').text

        license_headers = 'X-Clearleap-AssetID=' + media_guid + '&X-Clearleap-DeviceId=' + self.API_DEVICE_ID + \
                          '&X-Clearleap-DeviceToken=' + self.API_DEVICE_TOKEN + '&Content-Type='

        license_url = 'https://' + self.API_HOST + '/cloffice/drm/wv/' + media_guid + '|' + license_headers + '|R{SSM}|'

        li = xbmcgui.ListItem(path=mpd_url)
        li.setArt(media_info["art"])
        li.setInfo(type="Video", infoLabels=media_info["info"])

        protocol = 'mpd'
        drm = 'com.widevine.alpha'
        from inputstreamhelper import Helper  # type: ignore
        is_helper = Helper(protocol, drm=drm)
        if is_helper.check_inputstream():
            li.setProperty('inputstreamaddon', 'inputstream.adaptive')
            li.setProperty('inputstream.adaptive.license_type', drm)
            li.setProperty('inputstream.adaptive.manifest_type', protocol)
            li.setProperty('inputstream.adaptive.license_key', license_url)

            li.setMimeType('application/dash+xml')
            li.setContentLookup(False)
            # GET SUBTITLES
            folder = xbmc.translatePath(self.addon.getAddonInfo('profile'))
            folder = folder + 'subs' + os.sep + media_guid + os.sep
            if self.addon.getSetting('forcesubs') == 'true':
                self.log(
                    "Force subtitles enabled, downloading and converting subtitles in: "
                    + str(folder))
                if not os.path.exists(os.path.dirname(folder)):
                    try:
                        os.makedirs(os.path.dirname(folder))
                    except OSError as exc:  # Guard against race condition
                        if exc.errno != errno.EEXIST:
                            raise
                try:
                    subs = media_item.findall('.//media:subTitle',
                                              namespaces=self.NAMESPACES)
                    subs_paths = []
                    for sub in subs:
                        self.log("Processing subtitle language code: " +
                                 str(sub.get('lang')) + " URL: " +
                                 str(sub.get('href')))
                        r = requests.get(sub.get('href'))
                        with open(
                                str(folder) + str(sub.get('lang')) + ".xml",
                                'wb') as f:
                            f.write(r.content)
                        ttml = Ttml2srt(
                            str(folder) + str(sub.get('lang')) + ".xml", 25)
                        srt_file = ttml.write_srt_file(
                            str(folder) + str(sub.get('lang')))
                        self.log("Subtitle converted to srt format")
                        subs_paths.append(srt_file)
                        self.log("Subtitle added: " + srt_file)
                    self.log("Setting subtitles: " + str(subs_paths))
                    li.setSubtitles(subs_paths)
                    self.log("Subtitles set")
                except Exception:
                    self.log("Unexpected error in subtitles processing: " +
                             traceback.format_exc())

            self.log("Play url: " + str(li))
            xbmcplugin.setResolvedUrl(self.handle, True, listitem=li)
        else:
            self.log("DRM problem playback not possible")
            xbmcplugin.setResolvedUrl(self.handle, False, listitem=li)
Ejemplo n.º 55
0
 def tostring(self, xml):
     return etree.tostring(xml)
Ejemplo n.º 56
0
    def __init__(self, file_name):
        assert isinstance(file_name, str)

        self.file_name = file_name
        self.file_tree = et.parse(self.file_name).getroot()
        self.read_line = 0
Ejemplo n.º 57
0
 def parse(self, content):
     try:
         return etree.fromstring(content)
     except Exception as e:
         raise XMLParseError("Error while parsing XML: %s" % e)
Ejemplo n.º 58
0
    def __init__(self, filename, test):
        fortify_scan = ElementTree.parse(filename)
        root = fortify_scan.getroot()

        # Get Date
        date_string = root.getchildren()[5].getchildren()[1].getchildren(
        )[2].text
        date_list = date_string.split()[1:4]
        date_act = "".join(date_list)
        find_date = parser.parse(date_act)

        # Get Language
        lang_string = root[8][4][2].text
        lang_need_string = re.findall(
            "^.*com.fortify.sca.Phase0HigherOrder.Languages.*$", lang_string,
            re.MULTILINE)
        lang_my_string = lang_need_string[0]
        language = lang_my_string.split('=')[1]
        if language not in self.language_list:
            self.language_list.append(language)

        # Get Category Information:
        # Abstract, Explanation, Recommendation, Tips
        cat_meta = {}
        # Get all issues
        issues = []
        meta_pair = ({}, {})
        issue_pair = ([], [])
        for ReportSection in root.findall('ReportSection'):
            if ReportSection.findtext('Title') in [
                    "Results Outline", "Issue Count by Category"
            ]:
                place = 0 if ReportSection.findtext(
                    'Title') == "Results Outline" else 1
                # Get information on the vulnerability like the Abstract, Explanation,
                # Recommendation, and Tips
                for group in ReportSection.iter("GroupingSection"):
                    title = group.findtext("groupTitle")
                    maj_attr_summary = group.find("MajorAttributeSummary")
                    if maj_attr_summary:
                        meta_info = maj_attr_summary.findall("MetaInfo")
                        meta_pair[place][title] = {
                            x.findtext("Name"): x.findtext("Value")
                            for x in meta_info
                        }
                # Collect all issues
                for issue in ReportSection.iter("Issue"):
                    issue_pair[place].append(issue)

        if len(issue_pair[0]) > len(issue_pair[1]):
            issues = issue_pair[0]
            cat_meta = meta_pair[0]
        else:
            issues = issue_pair[1]
            cat_meta = meta_pair[1]

        # All issues obtained, create a map for reference
        issue_map = {}
        for issue in issues:
            details = {
                "Category": issue.find("Category").text,
                "Folder": issue.find("Folder").text,
                "Kingdom": issue.find("Kingdom").text,
                "Abstract": issue.find("Abstract").text,
                "Friority": issue.find("Friority").text,
                "FileName": issue.find("Primary").find("FileName").text,
                "FilePath": issue.find("Primary").find("FilePath").text,
                "LineStart": issue.find("Primary").find("LineStart").text,
                "Snippet": issue.find("Primary").find("Snippet").text
            }

            if issue.find("Source"):
                source = {
                    "FileName": issue.find("Source").find("FileName").text,
                    "FilePath": issue.find("Source").find("FilePath").text,
                    "LineStart": issue.find("Source").find("LineStart").text,
                    "Snippet": issue.find("Source").find("Snippet").text
                }
                details["Source"] = source

            issue_map.update({issue.attrib['iid']: details})
        # map created

        self.items = []
        dupes = set()
        for issue_key, issue in issue_map.items():
            title = self.format_title(issue["Category"], issue["FileName"],
                                      issue["LineStart"])
            if title not in dupes:
                self.items.append(
                    Finding(title=title,
                            severity=issue["Friority"],
                            numerical_severity=Finding.get_numerical_severity(
                                issue["Friority"]),
                            file_path=issue['FilePath'],
                            line_number=int(issue['LineStart']),
                            line=int(issue['LineStart']),
                            static_finding=True,
                            active=False,
                            verified=False,
                            test=test,
                            date=find_date,
                            description=self.format_description(
                                issue, cat_meta),
                            mitigation=self.format_mitigation(issue, cat_meta),
                            unique_id_from_tool=issue_key))
                dupes.add(title)
Ejemplo n.º 59
0
                    # without_extension = name.split(".")[0]
                    # im.save("./maurdor/png/%s.png" % without_extension, "PNG", quality=100)
                except Exception as e:
                    print(e)

# now we need to update the XML GEDI files for them to point at the right image files
metadata = [f for f in os.listdir(yourpath) if f.endswith(".xml")]

for xmlfile in metadata:
    print("Processing file '{}'".format(xmlfile))

    xmlns = r"{http://lamp.cfar.umd.edu/media/projects/GEDI/}"

    try:
        # parsing the XML file
        xml = ET.parse("{}/{}".format(yourpath, xmlfile))
        root = xml.getroot()

        doc = root.find("{}DL_DOCUMENT".format(xmlns))
        if doc == None:
            print("No DL_DOCUMENT")
            raise ET.ParseError()

        pages = doc.findall("{}DL_PAGE".format(xmlns))

        # change src from .tif to .png for each page
        for page in pages:
            imgfile = page.get("src")

            filename = imgfile[:-4]
            extension = imgfile[-4:]
Ejemplo n.º 60
0
 def _get_raw_xml(data, path):
     xml = data.find(path)
     if xml is not None:
         return ElementTree.tostring(xml, encoding="utf-8")
     else:
         return ""