Exemplo n.º 1
0
        def make_result(template_filename, desc, context, status=RESULT_STATUS.success, importance=3):
            # cache templates
            if not template_filename in self.__class__.templates:
                self.__class__.templates[template_filename] = \
                    Template(open(os.path.join(os.path.dirname(__file__), template_filename)).read())
            template = self.__class__.templates[template_filename]

            res = Results(test=command.test, group=RESULT_GROUP.seo, importance=importance)
            res.output_desc = unicode(desc)
            res.output_full = template.render(Context(context))
            res.status = status
            return res
Exemplo n.º 2
0
    def check_gplus(self, command):
        template = Template(
            open(
                os.path.join(os.path.dirname(__file__),
                             'templates/gplus.html')).read())

        url = "https://plusone.google.com/u/0/_/+1/fastbutton?count=true&url=%s" % command.test.url

        rawdata = urllib2.urlopen(url).read()
        #<div id="aggregateCount" class="V1">1\xc2\xa0936</div>

        #remove non-breaking space
        rawdata = rawdata.replace("\xc2\xa0", "")

        blob = re.search(r"id\=\"aggregateCount\"[^>]*>([\d\s ]+)", rawdata)

        if not blob:
            return

        gplus1 = int(blob.group(1))

        from scanner.models import Results
        res = Results(test=command.test, group=RESULT_GROUP.seo, importance=2)
        res.output_desc = unicode(_("Google+ stats"))
        res.output_full = template.render(Context({'gplus1': gplus1}))

        if gplus1 < 10:
            res.status = RESULT_STATUS.warning
        else:
            res.status = RESULT_STATUS.success
        res.save()
Exemplo n.º 3
0
    def check_twitter(self, command):
        api_url = "http://urls.api.twitter.com/1/urls/count.json"
        args = {
            'url': command.test.url,
        }

        template = Template(
            open(
                os.path.join(os.path.dirname(__file__),
                             'templates/twitter.html')).read())

        args_enc = urllib.urlencode(args)
        rawdata = urllib.urlopen(api_url, args_enc).read()
        tw_data = json.loads(rawdata)

        from scanner.models import Results
        res = Results(test=command.test, group=RESULT_GROUP.seo, importance=2)
        res.output_desc = unicode(_("Twitter stats"))
        res.output_full = template.render(Context({'tw_data': tw_data}))

        if tw_data["count"] < 10:
            res.status = RESULT_STATUS.warning
        else:
            res.status = RESULT_STATUS.success
        res.save()
Exemplo n.º 4
0
 def render_template_for(mime, title, bytes_warning=100 * 1024):
     try:
         total_percent_saved = (float(total_bytes_saved[mime]) /
                                total_bytes[mime]) * 100.0
     except ZeroDivisionError:
         total_percent_saved = 0.0
     ctx = {
         'files': optimized_files[mime],
         'total_bytes': total_bytes[mime],
         'total_bytes_saved': total_bytes_saved[mime],
         'total_percent_saved': total_percent_saved,
     }
     template = Template(
         open(
             os.path.join(os.path.dirname(__file__),
                          'templates/%s.html' % mime)).read())
     res = Results(test=command.test,
                   group=RESULT_GROUP.performance,
                   importance=2)
     res.output_desc = unicode(title)
     res.output_full = template.render(Context(ctx))
     if total_bytes_saved[mime] < bytes_warning:
         res.status = RESULT_STATUS.success
     else:
         res.status = RESULT_STATUS.warning
     res.save()
Exemplo n.º 5
0
    def run(self, command):
        if not command.test.check_seo:
            return STATUS.success

        from scanner.models import Results
        url = command.test.url

        res = Results(test=command.test, group=RESULT_GROUP.seo, importance=2)
        res.output_desc = unicode(_("URL length"))
        res.output_full = unicode(
            _("<p>Usability of your website require that url is short and easy to remember. A descriptive URL is better recognized by search engines. A user should be able to look at the address bar (url) and make an accurate guess about the content of the page before entering it (e.g., http://www.company.com/en/products). A SEO strategy should contain a comprehensive policy of URL handling.</p> <p>SEO folks suggest that url should be no longer than 90 characters (and avg should about 75 chars).</p>"
              ))

        tmpurl = urlparse(command.test.url)
        urllength = len(tmpurl.netloc) + len(tmpurl.path) + len(tmpurl.query)

        if urllength > 80:
            res.status = RESULT_STATUS.warning
            res.output_full += unicode(
                _("Your webpage url %s length is <b>%s</b> characters. We suggest not to cross 90 chars border"
                  % (command.test.url, urllength)))
        else:
            res.status = RESULT_STATUS.success
            res.output_full += unicode(
                _("Your webpage url %s length is <b>%s</b> characters. Good!" %
                  (command.test.url, urllength)))
        res.save()

        return STATUS.success
Exemplo n.º 6
0
    def run(self, command):
        path = command.test.download_path + "/"
        efound = ""

        try:
            self.log.debug("Recursive search for plaintext emails in %s " %
                           (path))

            # We want to recurslivly grep all html files and look for something looking like email address
            filelist = []
            for root, dirs, files in os.walk(str(path)):
                for file in files:
                    if re.search('(.html)|(.php)|(.xml)|(.txt)|(.htm)|(.js)',
                                 file) is not None:
                        filelist.append(os.path.join(root, file))

            for file in filelist:
                #log.debug("Analizing file %s "%(file))
                memFile = open(file)
                for line in memFile.readlines():
                    match = re.search(
                        "[a-zA-Z0-9._%-+]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}",
                        line)
                    if match is not None:
                        efound += "%s found in %s <br />" % (match.group(),
                                                             file[len(path):])
                memFile.close()

            from scanner.models import Results
            res = Results(test=command.test,
                          group=RESULT_GROUP.mail,
                          importance=5)
            res.output_desc = unicode(_("Look for plain-text email addresses"))
            res.output_full = unicode(
                _("<p>Spammers use automatic <a href='http://en.wikipedia.org/wiki/Email_address_harvesting'>email harvesters</a> to send SPAM to email addresses found on websites. To avoid being spammed you should not put your email as plaintext on your webpage. Use some of cloaking techniques instead.</p>"
                  ))

            #TODO: list possible techniques

            if efound:
                res.status = RESULT_STATUS.warning
                res.output_full += unicode(
                    _("<p>We have found at least one email address on your website! Please review your website and remove email addresses or obfuscate them. </p>"
                      ))
            else:
                res.status = RESULT_STATUS.success
                res.output_full += unicode(
                    _("<p>OK, we did not found any plaintext e-mail addresses on your website.</p>"
                      ))
            res.save()

            #as plugin finished - its success
            return STATUS.success
        except Exception, e:
            self.log.exception("No check can be done: %s " % (e))
            return STATUS.exception
Exemplo n.º 7
0
    def run(self, command):
        if not command.test.check_seo:
            return STATUS.success
        domain = command.test.url

        query = urllib.urlencode({'q': 'link:%s' % (domain)})
        url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&hl=en&%s' % (
            query)
        search_results = urllib.urlopen(url)
        jdata = json.loads(search_results.read())

        if 'estimatedResultCount' not in jdata['responseData']['cursor']:
            self.log.debug("no estimatedResultCount")
            return STATUS.exception

        from scanner.models import Results
        res = Results(test=command.test, group=RESULT_GROUP.seo, importance=1)
        res.output_desc = unicode(_("google backlinks "))
        res.output_full = unicode(
            _(
                '<p>There are about %(number_of_sites)s sites linking to your site. <a href="%(url)s">See them!</a></p> <p><small>This data is provided by google and may be inaccurate.</small></p> '
                % {
                    "number_of_sites":
                    jdata['responseData']['cursor']['estimatedResultCount'],
                    "url":
                    jdata['responseData']['cursor']['moreResultsUrl']
                }))
        res.status = RESULT_STATUS.info
        res.save()

        #there was no exception - test finished with success
        return STATUS.success
Exemplo n.º 8
0
    def run(self, command):

        if not command.test.check_seo:
            return

        from scanner.models import Results

        chars_total = 0.0
        chars_text = 0.0
        path = str(command.test.download_path)

        for file_info in command.test.downloaded_files:
            file_path = os.path.join(path, file_info['path'])

            if 'html' not in file_info['mime']:
                continue

            self.log.debug('analizing file %s' % file_path)
            try:
                with open(file_path, 'r') as f:
                    orig = f.read()

                    chars_total += len(orig)
                    chars_text += len(clean_html(orig))
            except (IOError, OSError) as error:
                self.log.warning('error in processing %s: %s' % (file_path, error))
                continue

        try:
            ratio = (chars_text / chars_total) * 100
        except ZeroDivisionError:
            ratio = 0.0

        if ratio == 0.0:
            self.log.warning('probably something goes wrong')
            return STATUS.unsuccess

        res = Results(test=command.test, group=RESULT_GROUP.seo, importance=2)
        res.output_desc = unicode(_("Code to text ratio "))
        res.output_full = unicode(_("<p>The code to text(content) ratio represents the percentage of actual text in your web page. Our software extracts content from website and computes the ratio. Some SEO folks say that perfect ratio should be between 20% and 70%, the others say that this parameter has little to none influence on SEO.</p>"))
        if ratio > 20 and ratio < 70:
            res.status = RESULT_STATUS.success
            res.output_full += unicode(_("<p>Good, your code to text ratio is  <b>%.1f%%</b></p>" % ratio))
        else:
            res.status = RESULT_STATUS.warning
            res.output_full += unicode(_("<p>Your code to text ratio is <b>%.1f%%</b>, maybe you could review content on your website and work on the copy?</p>" % ratio))

        res.save()

        return STATUS.success
Exemplo n.º 9
0
    def run(self, command):
        if not command.test.check_security:
            return STATUS.success
        try:
            #scan website
            cmd = PATH_CLAMSCAN + " " + command.test.download_path
            args = shlex.split(cmd)
            p = subprocess.Popen(args, stdout=subprocess.PIPE)
            (output, stderrdata2) = p.communicate()
            if p.returncode != 0:
                self.log.exception("%s returned non-0 status, stderr: %s " %
                                   (PATH_CLAMSCAN, stderrdata2))
                return STATUS.exception

            numberofthreats = int(
                re.search('Infected files: (?P<kaczka>[0-9]*)',
                          output).group('kaczka'))

            from scanner.models import Results
            res = Results(test=command.test,
                          group=RESULT_GROUP.security,
                          importance=5)
            res.output_desc = unicode(_("Antivirus check"))

            if numberofthreats > 0:
                res.status = RESULT_STATUS.error
                res.output_full = unicode(
                    _("Our antivirus found <b>%s</b> infected files on your website"
                      % (numberofthreats)))
            else:
                res.status = RESULT_STATUS.success
                res.output_full = unicode(
                    _("Our antivirus claims that there are none infected files on your website."
                      ))
            res.save()

            #as plugin finished - its success
            return STATUS.success
        except OSError, e:
            self.log.error(
                "OSError, check if clamscan file is present. Details %s " %
                (e))
            return STATUS.exception
Exemplo n.º 10
0
    def run(self, command):
        if not command.test.check_security:
            return STATUS.success
        domain = command.test.url

        conn = httplib.HTTPSConnection("sb-ssl.google.com")
        conn.request("GET", sburl + domain)
        response = conn.getresponse()
        httpstatus = str(response.status)
        httpbody = str(response.read())

        from scanner.models import Results
        res = Results(test=command.test,
                      group=RESULT_GROUP.security,
                      importance=3)
        res.output_desc = unicode(_("Google Safe Browsing "))

        message = '<p class="muted"><small>For more information please visit following sites: www.antiphishing.org, StopBadware.org. <a href="http://code.google.com/apis/safebrowsing/safebrowsing_faq.html#whyAdvisory">Advisory provided by Google</a></small></p>'

        if (int(httpstatus) == 204):
            res.output_full = unicode(
                _('<p>Your domain is not listed at Google Safe Browsing Blacklist. <a href="http://www.google.com/safebrowsing/diagnostic?site=%s">Check it at google</a>. It means that probably there is no malware or phishing.</p> '
                  % domain) + message)
            res.status = RESULT_STATUS.success

        elif (int(httpstatus) == 200):
            res.output_full = unicode(
                _('<p>Your domain is listed at Google Safe Browsing Blacklist because of %(httpbody)s. <a href="http://www.google.com/safebrowsing/diagnostic?site=%(httpbody)s">Check it at google</a>. Please check your website because it is possible that there is %(httpbody)s.</p> '
                  % {
                      'domain': domain,
                      'httpbody': httpbody
                  } + message))
            res.status = RESULT_STATUS.error
        else:
            self.log.exception(
                "Google sent non expected http code:%s body:%s " %
                (httpstatus, httpbody))
            return STATUS.exception
        res.save()

        #there was no exception - test finished with success
        return STATUS.success
Exemplo n.º 11
0
    def run(self, command):
        conn = httplib.HTTPConnection(command.test.domain(),command.test.port())
        conn.request("HEAD", "/",body="",headers={  'Accept-Encoding': 'gzip,deflate,bzip2,exi',
                                                    'Referer': 'http://webcheck.me/',
                                                    })
        response = conn.getresponse()
        httpstatus =  str(response.status)


        if not httpstatus:
            self.log.exception(_("Error: Empty httpstatus provided "))
            return STATUS.exception

        if not (httpstatus.isdigit()):
            self.log.exception(_("Error: Non numerical httpstatus code "))
            return STATUS.unsuccess

        if command.test.check_performance:
            #check http encoding aceptation
            from scanner.models import Results

            encoding = response.getheader("Content-Encoding")
            res = Results(test=command.test, group = RESULT_GROUP.performance, importance=1)
            res.output_desc = unicode(_("HTTP compression"))
            if encoding:
                res.status = RESULT_STATUS.success
                res.output_full = unicode(_("<p>Server agreed to compress http data using %s method.</p>"%(unicode(encoding) ) ))
            else:
                res.status = RESULT_STATUS.warning
                res.output_full = unicode(_("<p>Server did not agree to compress http data using any method. HTTP compression can lower your site traffic volume and speed up page loading.</p>" ))

            headers = ""
            for header in response.getheaders():
                (a,b) = header
                headers += "%s: %s <br>"%(a,b)

            res.output_full += unicode(_("<p>There are different types of compression available, <a href='http://en.wikipedia.org/wiki/HTTP_compression'>wikipedia article</a> covers this subject sufficiently. Headers sent by your webserver: <code>%s</code> </p> "%(headers )))
            res.save()

        #there was no exception - test finished with success
        return STATUS.success
Exemplo n.º 12
0
    def run(self, command):

        robotsurl = urlparse.urlparse(
            command.test.url).scheme + "://" + urlparse.urlparse(
                command.test.url).netloc + "/robots.txt"
        self.log.debug("Looking for: %s " % (robotsurl))

        from scanner.models import Results
        res = Results(test=command.test, group=RESULT_GROUP.seo, importance=2)

        res.output_desc = unicode(_("robots.txt"))
        res.output_full = '<p><a href="http://www.robotstxt.org/">robots.txt</a> file is used to control automatic software (like Web Wanderers, Crawlers, or Spiders). Address of your robots.txt for your domain should be: <code>%s</code> </p> ' % (
            robotsurl)
        res.status = RESULT_STATUS.success
        output = ""
        try:
            req = urllib2.Request(robotsurl)
            req.add_header('Referer', 'http://webcheck.me/')
            result = urllib2.urlopen(req)

            linecounter = 0
            for line in result.readlines():
                linecounter += 1

                #ignore comments
                if re.match(r'^(\s)?#.*', line):
                    continue
                #ignore empty lines
                if re.match(r'^(\s)+', line):
                    continue

                if re.match(
                        r'^\s*(user-agent)|(disallow)|(allow)|(sitemap)|(crawl-delay)|(noarchive)|(noindex)|(nofollow)|(nopreview)|(nosnippet)|(index):\s.*',
                        line.lower()):
                    output += "%s<br />" % (line)
                else:
                    res.output_full += '<p>There was an error while parsing your robots.txt: <b>bad syntax</b> in line %s:<code>%s</code> </p>' % (
                        linecounter, cgi.escape(line))
                    res.status = RESULT_STATUS.error
                    break
            else:
                res.output_full += '<p>robots.txt is present:<code>%s</code></p>' % (
                    output)

        except urllib2.HTTPError, e:
            res.status = RESULT_STATUS.warning
            res.output_full += '<p>There was no robots.txt. HTTP code was:%s.</p>' % (
                e.code)
Exemplo n.º 13
0
    def run(self, command):
        domain = command.test.url

        # urllib requires bytestring
        checklink = '{}check?uri={}'.format(w3c_validator,
                                            domain.encode('utf-8'))
        result = urllib.urlopen(checklink)

        output = "status: %s, warnings: %s, errors: %s." % (
            str(result.info().getheader('x-w3c-validator-status')),
            str(result.info().getheader('x-w3c-validator-warnings')),
            str(result.info().getheader('x-w3c-validator-errors')))

        from scanner.models import Results
        res = Results(test=command.test,
                      group=RESULT_GROUP.general,
                      importance=2)
        res.output_desc = unicode(_("W3C Validation"))

        if result.info().getheader('x-w3c-validator-status') == 'Valid':
            res.status = RESULT_STATUS.success
            res.output_full = '<p>W3C Validator marks your website as <b>Valid</b>. %s <a href="%s">Check details at W3C</a></p>' % (
                output, checklink)
        else:
            res.status = RESULT_STATUS.warning
            res.output_full = '<p>W3C Validator marks your website as <b>Invalid</b>. %s <a href="%s">Check details at W3C</a></p>' % (
                output, checklink)

        #TODO
        res.output_full += unicode(
            _("<p>Complying with web standards enhances interoperability and may result in better google position </p> "
              ))

        res.save()

        #there was no exception - test finished with success
        return STATUS.success
Exemplo n.º 14
0
    def check_facebook(self, command):
        #normalized_url  string  The normalized URL for the page being shared.
        #share_count int The number of times users have shared the page on Facebook.
        #like_count  int The number of times Facebook users have "Liked" the page, or liked any comments or re-shares of this page.
        #comment_count   int The number of comments users have made on the shared story.
        #total_count int The total number of times the URL has been shared, liked, or commented on.
        #click_count int The number of times Facebook users have clicked a link to the page from a share or like.
        #comments_fbid   int The object_id associated with comments plugin comments for this url. This can be used to query for comments using the comment FQL table.
        #commentsbox_count   int The number of comments from a comments box on this URL. This only includes top level comments, not replies.

        api_url = "https://api.facebook.com/method/fql.query"
        args = {
            'query':
            "select total_count,like_count,comment_count,share_count,click_count from link_stat where url='%s'"
            % command.test.url,
            'format':
            'json',
        }

        template = Template(
            open(
                os.path.join(os.path.dirname(__file__),
                             'templates/facebook.html')).read())

        args_enc = urllib.urlencode(args)
        rawdata = urllib.urlopen(api_url, args_enc).read()
        fb_data = json.loads(rawdata)[0]

        from scanner.models import Results
        res = Results(test=command.test, group=RESULT_GROUP.seo, importance=3)
        res.output_desc = unicode(_("Facebook stats"))
        res.output_full = template.render(Context({'fb_data': fb_data}))

        if fb_data["total_count"] < 10:
            res.status = RESULT_STATUS.warning
        else:
            res.status = RESULT_STATUS.success
        res.save()
Exemplo n.º 15
0
    def run(self, command):
        from scanner.models import Results

        res = Results(test=command.test,
                      group=RESULT_GROUP.general,
                      importance=3)
        res.output_desc = unicode(
            _("<a href='http://www.surbl.org'>SURBL</a> database check"))
        res.output_full = unicode(
            _("<p>SURBLs are lists of web sites that have appeared in unsolicited messages.</p>"
              ))
        if self.isMarkedAsSpam(command.test.root_domain()):
            res.output_full += unicode(
                _("<p>Your webpage is <b>listed</b> at SURBL. Check it on <a href='http://www.surbl.org/surbl-analysis'>their site</a> </p>"
                  ))
            res.status = RESULT_STATUS.warning
        else:
            res.output_full += unicode(
                _("<p>Ok, your webpage is not listed at SURBL.</p>"))
            res.status = RESULT_STATUS.success
        res.save()

        return STATUS.success
Exemplo n.º 16
0
    def run(self, command):
        self.log.info(" * check spelling: BEGIN")
        from scanner.models import Results
        path = str(command.test.download_path)

        # search html files

        files_with_errors = []
        was_errors = False
        for file_info in command.test.downloaded_files:
            if not 'text' in file_info['mime']:
                continue
            file_path = os.path.join(path, file_info['path'])
            try:
                lang, errors = self.check_file(file_path)
            except CheckSpellingError as e:
                self.log.exception(" * Spellchecking error: %s", e)
                errors = set()
                was_errors = True
                continue
            if errors:
                errors = list(errors)

                files_with_errors.append({
                    'url': file_info['url'],
                    'detected_language': lang,
                    'spelling_errors': errors
                })

        template = Template(
            open(os.path.join(os.path.dirname(__file__),
                              'templates/msg.html')).read())
        r = Results(test=command.test,
                    group=RESULT_GROUP.general,
                    importance=1,
                    status=RESULT_STATUS.warning
                    if files_with_errors else RESULT_STATUS.success)
        r.output_desc = unicode(self.name)
        r.output_full = template.render(Context({'urls': files_with_errors}))
        r.save()

        self.log.info(' * check spelling: END')

        if was_errors:
            return STATUS.unsuccess
        return STATUS.success
Exemplo n.º 17
0
    def run(self, command):
        from scanner.models import Results
        domain = command.test.domain()
        test = command.test

        if not command.test.check_mail:
            return STATUS.success
        try:
            mxes = ""
            answers = dns.resolver.query(domain, 'MX')
            for rdata in answers:
                mxes += "MX %s <br>" % (rdata.to_text())
                #print 'Host', rdata.exchange, 'has preference', rdata.preference

            #check if all IP are public (non-private)
            records = ""
            reversemxes = ""
            noreversemxes = ""
            for mxdata in answers:
                mxips = dns.resolver.query(mxdata.exchange)
                #now we have IP
                for ip in mxips:
                    #check if address is not private
                    if IP(ip.address).iptype() == "PRIVATE":
                        records += "%s %s <br>" % (mxdata.exchange, ip)

                    #check if ip resolves intro FQDN - needed for email
                    try:
                        mx_dnsname = dns.resolver.query(
                            dns.reversename.from_address(ip.address), "PTR")
                        reversemxes += "%s(%s): %s <br />" % (
                            mxdata.exchange, ip.address, mx_dnsname[0])
                    except dns.resolver.NXDOMAIN:
                        noreversemxes += "%s(%s)<br />" % (mxdata.exchange,
                                                           ip.address)

            #check geolocation
            locations = {}
            points = []

            for server in answers:
                _temp = locations[str(server.exchange)] = geoip.city(
                    str(server.exchange))
                name = u'%s (%s)' % (
                    _temp['city'], _temp['country_name']
                ) if _temp['city'] else _temp['country_name']
                points.append((float(_temp['longitude']),
                               float(_temp['latitude']), name))
            map_image_filename = 'mailservers_geolocations.png'
            map_image_path = os.path.join(test.public_data_path,
                                          map_image_filename)
            map_image_url = os.path.join(test.public_data_url,
                                         map_image_filename)
            make_map(points,
                     size=(6, 3),
                     dpi=350 / 3.0,
                     file_path=map_image_path)
            rendered = render_to_string('scanner/serversmap.html', {
                'locations': locations,
                'map_image_url': map_image_url
            })

            res = Results(test=command.test,
                          group=RESULT_GROUP.performance,
                          importance=1)
            res.output_desc = unicode(_("Mail server(s) geo-location"))
            res.output_full = rendered + unicode(
                _("<p>Its important to have servers in different geographic locations, to increase reliability of your services.</p>"
                  ))
            res.status = RESULT_STATUS.info
            res.save()
            del res

            #check private IPs
            res = Results(test=command.test,
                          group=RESULT_GROUP.mail,
                          importance=5)
            res.output_desc = unicode(_("No private IP in MX records "))
            if not records:
                res.output_full = unicode(
                    _("<p>All your MX records are public.</p>"))
                res.status = RESULT_STATUS.success
            else:
                res.output_full = unicode(
                    _("<p>Following MX records for this domain are private: <code>%s</code>. Private IP can\'t be rached from the Internet. </p>"
                      % (records)))
                res.status = RESULT_STATUS.error
            res.save()
            del res

            res = Results(test=command.test,
                          group=RESULT_GROUP.mail,
                          importance=3)
            res.output_desc = unicode(_("Reverse Entries for MX records"))
            if not noreversemxes:
                res.output_full = unicode(
                    _("<p>All your MX records have reverse records: <code>%s</code></p>"
                      % (reversemxes)))
                res.status = RESULT_STATUS.success
            else:
                res.output_full = unicode(
                    _("<p>Following MX records for dont have reverse entries: <code>%(noreversemxes)s</code>. Folowing MX records have reverse entries: <code>%(reversemxes)s</code>. </p>"
                      % {
                          "noreversemxes": noreversemxes,
                          "reversemxes": reversemxes
                      }))
                res.status = RESULT_STATUS.error

            res.output_full += unicode(
                _("<p>All mail servers should have a reverse DNS (PTR) entry for each IP address (RFC 1912). Missing reverse DNS entries will make many mailservers reject your e-mails or mark them as SPAM. </p>"
                  ))
            res.save()
            del res

            spfrecord = ""
            try:
                answers = dns.resolver.query(domain, 'TXT')
                for rdata in answers:
                    if rdata.strings[0].startswith('v=spf1'):
                        spfrecord += rdata.strings[0]
            except dns.resolver.NoAnswer:
                pass

            res = Results(test=command.test,
                          group=RESULT_GROUP.mail,
                          importance=2)
            res.output_desc = unicode(_("SPF records"))

            res.output_full = "<p>Sender Policy Framework (SPF) is an email validation system designed to prevent email spam by detecting email spoofing, a common vulnerability, by verifying sender IP addresses. SPF allows administrators to specify which hosts are allowed to send mail from a given domain by creating a specific SPF record (or TXT record) in the Domain Name System (DNS). <a href='http://en.wikipedia.org/wiki/Sender_Policy_Framework'>More at wikipedia</a></p>"

            if spfrecord:
                res.output_full += unicode(
                    _("<p>OK, you have SPF record defined: <code>%s</code></p>"
                      % (spfrecord)))
                res.status = RESULT_STATUS.success
            else:
                res.output_full += unicode(
                    _("<p>There is no SPF defined for your domain. Consider creating one - it helps a lot dealing with SPAM.</p>"
                      ))
                res.status = RESULT_STATUS.warning
            res.save()
            del res

            return STATUS.success
        except dns.resolver.Timeout, e:
            res = Results(test=command.test,
                          group=RESULT_GROUP.general,
                          importance=3)
            res.output_desc = unicode(_("MX records"))
            res.output_full = unicode(
                _("<p>There was timeout while asking your nameservers for MX records.</p>"
                  ))
            res.status = RESULT_STATUS.error
            res.save()
            self.log.debug("Timeout while asking for MX records: %s" % str(e))
            del res
Exemplo n.º 18
0
            res.output_full = unicode(
                _("<p>There are no MX records defined for your domain. Having them is essential to be able to recieve emails for this domain.</p>"
                  ))

            if re.search("www\.", command.test.url):
                res.output_full += unicode(
                    _(" <div class='alert'>Please try to run this test again <b>without www prefix</b>.</div>"
                      ))
            res.status = RESULT_STATUS.error
            res.save()
            self.log.debug("NoAnswer while asking for MX records: %s" % str(e))
            del res

        except dns.resolver.NXDOMAIN:
            res = Results(test=command.test,
                          group=RESULT_GROUP.general,
                          importance=4)
            res.output_desc = unicode(_("MX records"))
            res.output_full = unicode(
                _("<p>The query name does not exist. Probably you should define MX entries in your DNS configuration.</p>"
                  ))
            res.status = RESULT_STATUS.error
            res.save()
            self.log.debug("NXDOMAIN while asking for MX records. ")
            del res

        except StandardError, e:
            self.log.exception("%s" % str(e))

        return STATUS.unsuccess
Exemplo n.º 19
0
    def run(self, command):
        from scanner.models import Results

        domain = command.test.domain()
        self.log.debug("Checking whois data for {}".format(domain))

        if extract_tld(domain) not in whois.TLD_RE.keys():
            self.log.debug("Whois for this tld is not supported, aborting.")
            return STATUS.exception

        # works also when subdomain is added
        data = whois.query(domain)

        if data and data.expiration_date:
            dt = data.expiration_date

            res = Results(test=command.test,
                          group=RESULT_GROUP.security,
                          importance=5)

            res.output_desc = unicode(_("Domain expiration date"))
            if dt.date() - date.today() > timedelta(days=20):
                res.output_full = unicode(
                    _("<p>Your domain will be valid until %(date)s. There is still %(days)s days to renew it.</p>"
                      % {
                          "date": dt,
                          "days": (dt.date() - date.today()).days
                      }))
                res.status = RESULT_STATUS.success
            else:
                res.output_full = unicode(
                    _("<p>Better renew your domain, its valid until %(date)s! There is only %(days)s days left.</p>"
                      % {
                          "date": dt,
                          "days": (dt - date.today()).days
                      }))
                res.status = RESULT_STATUS.error

            res.output_full += unicode(
                _("<p class='muted'> We use <a href='http://en.wikipedia.org/wiki/Whois'>WHOIS</a> data to check domain expiration date. Depending on your domain registration operator this date may be inaccurate or outdated.</p> "
                  ))
            res.save()
        else:
            self.log.debug(
                "This gTLD doesnt provide valid domain expiration date in whois database"
            )

        if data and data.creation_date:
            dt = data.creation_date

            res = Results(test=command.test,
                          group=RESULT_GROUP.seo,
                          importance=1)

            res.output_desc = unicode(_("Domain age"))
            res.output_full = unicode(
                _("<p>Your domain has been first registered %(days)s days ago (registration date: %(date)s).</p>"
                  % {
                      "date": dt,
                      "days": (date.today() - dt.date()).days
                  }))
            if date.today() - dt.date() < timedelta(days=500):
                res.output_full += unicode(
                    _("<p><b>Your domain is a fresh one. </b></p>"))
                res.status = RESULT_STATUS.warning
            else:
                res.output_full += unicode(
                    _("<p><b>Good, your domain has a long history.</b></p>"))
                res.status = RESULT_STATUS.success

            res.output_full += unicode(
                _("<p>Domain age matters to a certain extent and newer domains generally struggle to get indexed and rank high in search results for their first few months (depending on other associated ranking factors). Consider buying a second-hand domain name or register domains way before using them.</p> "
                  ))
            res.output_full += unicode(
                _("<p class='muted' > We use <a href='http://en.wikipedia.org/wiki/Whois'>WHOIS</a> data to check domain creation date. Depending on your domain registration operator this date may be inaccurate or outdated.</p> "
                  ))
            res.save()
        else:
            self.log.debug(
                "This gTLD doesnt provide valid domain creation date in whois database"
            )

        return STATUS.success
Exemplo n.º 20
0
    def run(self, command):

        from scanner.models import Results
        path = str(command.test.download_path)

        bad_requests = []
        redirects = []
        wrong_mime = []
        for request in command.test.requests:
            http_status_code = int(request["http_status_code"])
            request["http_status_code_txt"] = httplib.responses[
                http_status_code]
            if not (http_status_code > 199) & (http_status_code < 399):
                bad_requests.append(request)
            if (http_status_code > 299) & (http_status_code < 400):
                redirects.append(request)
            if request["exists"]:
                if request["httrack_mime"] != request["mime"]:
                    wrong_mime.append(request)
            #'download_date': data[0],
            #'remote_size': int(sizes[0]),  # httrack remote size
            #'local_size': int(sizes[1]),  # httrack local size
            #'flags': data[2],
            #'http_status_code': data[3],
            #'status': data[4],
            #'httrack_mime': data[5],  # httrack mime type
            #'etag': data[6],
            #'url': data[7],
            #'path': os.path.relpath(data[8], root_path) if root_path else data[8],
            #'from_url': data[9],
            #'mime': type,
            #'size': size,
            #'exists':

        template = Template(
            open(
                os.path.join(os.path.dirname(__file__),
                             'templates/bad_requests.html')).read())
        res = Results(test=command.test,
                      group=RESULT_GROUP.security,
                      importance=4)
        res.output_desc = unicode(_("HTTP problems"))
        if bad_requests:
            res.status = RESULT_STATUS.error
        else:
            res.status = RESULT_STATUS.success
        res.output_full = template.render(
            Context({'bad_requests': bad_requests}))
        res.save()

        template = Template(
            open(
                os.path.join(os.path.dirname(__file__),
                             'templates/redirects.html')).read())
        res = Results(test=command.test,
                      group=RESULT_GROUP.performance,
                      importance=2)
        res.output_desc = unicode(_("Redirects"))
        if len(redirects) > 5:
            res.status = RESULT_STATUS.warning
        else:
            res.status = RESULT_STATUS.info

        res.output_full = template.render(
            Context({
                'redirects': redirects,
                'rnumber': len(redirects)
            }))
        res.save()

        template = Template(
            open(os.path.join(os.path.dirname(__file__),
                              'templates/mime.html')).read())
        res = Results(test=command.test,
                      group=RESULT_GROUP.security,
                      importance=2)
        res.output_desc = unicode(_("MIME"))
        if len(wrong_mime) > 3:
            res.status = RESULT_STATUS.warning
        else:
            res.status = RESULT_STATUS.info
        res.output_full = template.render(Context({'wrong_mime': wrong_mime}))
        res.save()
        return STATUS.success
Exemplo n.º 21
0
    def run(self, command):
        from scanner.models import Results
        domain = command.test.domain()
        test = command.test

        try:
            #A
            records = ""
            answers = dns.resolver.query(domain, 'A')
            for rdata in answers:
                records += "A %s <br>" % (rdata.to_text())

            res = Results(test=command.test,
                          group=RESULT_GROUP.general,
                          importance=5)
            res.output_desc = unicode(_("A records (IPv4)"))
            if len(answers) > 1:
                res.output_full = unicode(
                    _("<p>Your nameserver returned %(number)s A records: <code>%(records)s</code></p>"
                      % {
                          "number": len(answers),
                          "records": records
                      }))
                res.status = RESULT_STATUS.success
            elif len(answers) == 1:
                res.output_full = unicode(
                    _("<p>Your nameserver returned %(number)s A record: <code>%(records)s</code></p> <p>Having multiple A records with different IP can load-balance traffic.</p>"
                      % {
                          "number": len(answers),
                          "records": records
                      }))
                res.status = RESULT_STATUS.success
            else:
                res.output_full = unicode(
                    _("<p>There are none A records for this domain! It means that nobody can reach your website.</p>"
                      ))
                res.status = RESULT_STATUS.error
            res.save()
            del records
            del res

            #check geolocation
            locations = {}
            points = []

            for server in answers:
                _temp = locations[str(server.address)] = geoip.city(
                    str(server.address))
                name = u'%s (%s)' % (
                    _temp['city'], _temp['country_name']
                ) if _temp['city'] else _temp['country_name']
                points.append((float(_temp['longitude']),
                               float(_temp['latitude']), name))

            # we need only 1 map probably
            map_image_filename = 'webservers_geolocations.png'
            map_image_path = os.path.join(test.public_data_path,
                                          map_image_filename)
            map_image_url = os.path.join(test.public_data_url,
                                         map_image_filename)
            make_map(points,
                     size=(6, 3),
                     dpi=350 / 3.0,
                     file_path=map_image_path)
            rendered = render_to_string('scanner/serversmap.html', {
                'locations': locations,
                'map_image_url': map_image_url
            })

            res = Results(test=command.test,
                          group=RESULT_GROUP.performance,
                          importance=1)
            res.output_desc = unicode(_("Web server(s) geo-location"))
            res.output_full = rendered + unicode(
                _("<p>It is important to have servers in different geographic locations, to increase reliability of your services.</p>"
                  ))
            res.status = RESULT_STATUS.info
            res.save()
            del locations
            del res

            #check if all IP are public (non-private)
            records = ""
            for rdata in answers:
                if IP(rdata.address).iptype() == "PRIVATE":
                    records += "%s <br" % rdata.address

            res = Results(test=command.test,
                          group=RESULT_GROUP.general,
                          importance=4)
            res.output_desc = unicode(_("No private IP in A records "))
            if not records:
                res.output_full = unicode(
                    _("<p>All your A records are public.</p>"))
                res.status = RESULT_STATUS.success
            else:
                res.output_full = unicode(
                    _("<p>Following A records for this domain are private: <code>%s</code>. Private IP can\'t be rached from the Internet. </p>"
                      % (records)))
                res.status = RESULT_STATUS.error
            res.save()
            del records
            del res

        except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
            res = Results(test=command.test,
                          group=RESULT_GROUP.general,
                          importance=5)
            res.output_desc = unicode(_("A records (IPv4)"))
            res.output_full = unicode(
                _("<p><strong>Domain not found!</strong>  Your webpage is currently unreachable, please check your DNS settings, it should consist at least one A record.</p>"
                  ))
            res.status = RESULT_STATUS.error
            res.save()
            del res

        #TODO: check AAA (ipv6)

        return STATUS.success
Exemplo n.º 22
0
    def run(self, command):

        if not command.test.check_mail:
            return STATUS.success

        from scanner.models import Results
        domain = command.test.domain()

        # list of blacklists to check
        blacklists = [
            'dsn.rfc-ignorant.org',
            'bl.spamcop.net',
            'zen.spamhaus.org',
            'dnsbl.sorbs.net',
        ]

        dnswl_cat = {
            2: "Financial services",
            3: "Email Service Providers",
            4: "Organisations (both for-profit [ie companies] and non-profit)",
            5: "Service/network providers",
            6: "Personal/private servers",
            7: "Travel/leisure industry",
            8: "Public sector/governments",
            9: "Media and Tech companies",
            10: "some special cases",
            11: "Education, academic",
            12: "Healthcare",
            13: "Manufacturing/Industrial",
            14: "Retail/Wholesale/Services",
            15: "Email Marketing Providers",
            255: "unknown",
        }

        dnswl_score = {
            0: "none <small>(only avoid outright blocking)</small>",
            1: "low <small>(reduce chance of false positives)</small>",
            2:
            "medium <small>(make sure to avoid false positives but allow override for clear cases)</small>",
            3: "high <small>(avoid override)</small>",
            255: "unknown",
        }

        try:
            #ip list of all mail servers
            mxips = []
            answers = dns.resolver.query(domain, 'MX')
            for mxdata in answers:
                mxips.append(dns.resolver.query(mxdata.exchange)[0].address)

            results = ""
            blacklisted = False

            for ip in mxips:
                for bl in blacklists:
                    tmp = str(ip).split('.')
                    tmp.reverse()
                    rev_ip = '.'.join(tmp)
                    querydomain = rev_ip + '.' + bl
                    try:
                        answers = dns.resolver.query(querydomain)
                        results += "%s <b>listed</b> on %s <br>" % (ip, bl)
                        blacklisted = True
                    except dns.resolver.NXDOMAIN:
                        results += "%s not listed on %s <br>" % (ip, bl)
                    except dns.resolver.Timeout:
                        self.log.debug("RBL Timeout: %s while checking: %s" %
                                       (bl, ip))
                results += "<br />"

            res = Results(test=command.test,
                          group=RESULT_GROUP.mail,
                          importance=4)
            res.output_desc = unicode(
                _("Mailservers on DNSBL blacklists (RBL)"))
            if not blacklisted:
                res.output_full = unicode(
                    _("<p>None of your mailservers are listed on RBL. Details: <code>%s</code></p>"
                      % (results)))
                res.status = RESULT_STATUS.success
            else:
                res.output_full = unicode(
                    _("<p>Some of your mailservers are listed on RBL blacklist. Details: <code>%s</code></p> <p> Being listed on those lists may cause that your recipietns will have your mail in SPAM folder</p>"
                      % (results)))
                res.status = RESULT_STATUS.error
            res.save()

            # whitelist
            results = ""
            whitelisted = False
            for ip in mxips:
                tmp = str(ip).split('.')
                tmp.reverse()
                rev_ip = '.'.join(tmp)
                querydomain = rev_ip + '.list.dnswl.org'
                try:
                    answer = dns.resolver.query(querydomain)[0].address

                    score = dnswl_score[int(answer.split(".")[3])]
                    category = dnswl_cat[int(answer.split(".")[2])]
                    results += "%s listed (score:%s) in category %s<br>" % (
                        ip, score, category)
                    whitelisted = True
                except dns.resolver.NXDOMAIN:
                    results += "%s <b>not listed</b><br>" % (ip)
                except dns.resolver.Timeout:
                    self.log.debug("DNSWL Timeout: %s while checking: %s" %
                                   (bl, ip))

            res = Results(test=command.test,
                          group=RESULT_GROUP.mail,
                          importance=1)
            res.output_desc = unicode(_("Mailservers on DNSWL whitelist"))
            res.output_full = unicode(
                _("<p>DNSWL is a community driven whitelist of mailservers aiming to prevent false-positives in spam filtering.</p> "
                  ))
            if not whitelisted:
                res.output_full += unicode(
                    _("<p>None of your mailservers are listed on <a href='http://www.dnswl.org/'>DNSWL</a>. Details: <code>%s</code></p> <p>Please consider <a href='http://www.dnswl.org/request.pl'>adding</a> your mailservers to DNSWL to improve your success mail delivery rate.</p>"
                      % (results)))
                res.status = RESULT_STATUS.warning
            else:
                res.output_full += unicode(
                    _("<p>Your mailservers are listed on DNSWL whitelist. Details: <code>%s</code></p>"
                      % (results)))
                res.status = RESULT_STATUS.success
            res.save()

            return STATUS.success
        except (dns.resolver.Timeout, dns.resolver.NoAnswer,
                dns.resolver.NXDOMAIN), e:
            self.log.debug("dns problem when asking for MX records: %s" %
                           str(e))
            return STATUS.unsuccess
Exemplo n.º 23
0
    def run(self, command):
        if not command.test.check_seo:
            return STATUS.success

        from scanner.models import Results
        domain = command.test.domain()

        rank = get_pagerank(domain)

        res = Results(test=command.test, group = RESULT_GROUP.seo, importance=1)
        try:
            rank = int(rank)
        except ValueError:
            rank = None
        _rank = rank if rank else _('not known')

        res.output_desc = unicode(_("Google pagerank") )
        res.output_full = unicode(_("<p>A <a href='http://en.wikipedia.org/wiki/PageRank'>PageRank</a> results from a mathematical algorithm based on the graph, the webgraph, created by all World Wide Web pages as nodes and hyperlinks as edges.</p><p>Your website pagerank is %s.</p>")) % _rank

        if rank and rank < 2:
            res.status = RESULT_STATUS.warning
        else:
            res.status = RESULT_STATUS.info
        res.save()


        (popularity_rank,reach_rank) = get_alexa_rank(domain)
        res = Results(test=command.test, group = RESULT_GROUP.seo, importance=1)
        res.output_desc = unicode(_("Alexa pagerank") )
        res.output_full = unicode(_("<p>Alexa collects statistics about visits by websites users to websites through the Alexa Toolbar. Based on the collected data, Alexa computes site ranking.</p> <ul> <li>Popularity rank: %(pop_rank)s</li> <li>Reachability rank: %(reach_rank)s</li></ul>" % {
            "pop_rank":  popularity_rank,
            "reach_rank" : reach_rank
        } ))
        if (popularity_rank < 0) | (reach_rank < 0):
            res.status = RESULT_STATUS.warning
        else:
            res.status = RESULT_STATUS.info
        res.save()

        return STATUS.success
Exemplo n.º 24
0
    def run(self, command):
        if not command.test.check_performance:
            return STATUS.success

        #domain = command.test.domain
        path = str(command.test.download_path)  # fix UTF-8 path error
        test = command.test
        self.log.debug("Recursive check image files size in %s " % path)

        optiimgs = []
        corrupted_imgs = []

        total_bytes = 0
        total_bytes_saved = 0

        optimizer = ImageOptimizer()
        optimized_files_path = os.path.join(command.test.public_data_path, self.OPTIMIZED_IMAGES_DIR_NAME)
        optimized_files_url = os.path.join(command.test.public_data_url, self.OPTIMIZED_IMAGES_DIR_NAME)
        if not os.path.exists(optimized_files_path):
            os.makedirs(optimized_files_path)

        for file_info in test.downloaded_files:
            file_path = os.path.join(test.download_path, file_info['path'])

            #mimetypes is much faster than identify, use it to filterout non-images
            if 'image' not in file_info['mime']:
                continue

            self.log.debug("File: %s size: %s" % (file_path, os.path.getsize(file_path)))

            try:
                optimized_file_path = optimizer.optimize_image(file_path, optimized_files_path)
            except CorruptFile as e:
                a = {"original_file_url": file_info['url'],
                     "error_message": str(e)}
                corrupted_imgs.append(a)
                continue

            if not optimized_file_path:
                # if optimization was not done correctly or final file
                # was larger than original
                continue
            optimized_file_url = os.path.join(optimized_files_url, os.path.basename(optimized_file_path))

            bytes_saved = os.path.getsize(file_path) - os.path.getsize(optimized_file_path)

            if bytes_saved < 1:
                continue

            self.log.debug("Optimized file is %s (new size: %s)" % (optimized_file_path, os.path.getsize(optimized_file_path)))

            try:
                percent_saved = (float(bytes_saved) / os.path.getsize(file_path)) * 100.0
            except ZeroDivisionError:
                percent_saved = 0.0
            a = {"original_file_url": file_info['url'],
                 "original_file_size": os.path.getsize(file_path),
                 "optimized_file_path": optimized_file_path,
                 "optimized_file_url": optimized_file_url,
                 "optimized_file_size": os.path.getsize(optimized_file_path),
                 "bytes_saved": bytes_saved,
                 "percent_saved": percent_saved,
                 }
            optiimgs.append(a)
            total_bytes += os.path.getsize(file_path)
            total_bytes_saved += bytes_saved
        try:
            total_percent_saved = (float(total_bytes_saved) / total_bytes) * 100.0
        except ZeroDivisionError:
            total_percent_saved = 0.0

        template = Template(open(os.path.join(os.path.dirname(__file__), 'templates/msg.html')).read())

        from scanner.models import Results
        res = Results(test=command.test, group=RESULT_GROUP.performance, importance=2)
        res.output_desc = unicode(_("Images optimalization"))
        res.output_full = template.render(
            Context({'optimized_images': optiimgs,
                     'total_bytes': total_bytes,
                     'total_bytes_saved': total_bytes_saved,
                     'total_percent_saved': total_percent_saved}))

        if total_bytes_saved < 500 * 1024:
            res.status = RESULT_STATUS.success
        else:
            res.status = RESULT_STATUS.warning
        res.save()

        if corrupted_imgs:
            template = Template(open(os.path.join(os.path.dirname(__file__), 'templates/corrupted.html')).read())
            res = Results(test=command.test, group=RESULT_GROUP.general, importance=3, status=RESULT_STATUS.warning)
            res.output_desc = unicode(_("Images validation"))
            res.output_full = template.render(Context({'corrupted_images': corrupted_imgs}))
            res.save()

        #as plugin finished - its success
        return STATUS.success
Exemplo n.º 25
0
	def run(self, command):
		url = command.test.url
		from scanner.models import Results

		timing = {}
		max_loadtime = 0

		screenshots_path = os.path.join(command.test.public_data_path, self.SCREENSHOTS_DIR_NAME)
		screenshots_url = os.path.join(command.test.public_data_url, self.SCREENSHOTS_DIR_NAME)
		if not os.path.exists(screenshots_path):
			os.makedirs(screenshots_path)

		for browser in self.browsers:
			screenshot_filename = hashlib.sha1(str(time.time())).hexdigest() + '.png'
			screenshot_file_path = os.path.join(screenshots_path, screenshot_filename)
			screenshot_url = os.path.join(screenshots_url, screenshot_filename)

			browsername = '_'.join([browser[key] for key in 'platform', 'browseName', 'version'])

			self.log.debug("Make screenshot with %s" % browser)

			signal.signal(signal.SIGALRM, alarm_handler)
			signal.alarm(3 * 60)  # 3 minutes

			try:
				if browser["browseName"] == "internet explorer":
					desired_capabilities = webdriver.DesiredCapabilities.INTERNETEXPLORER
				elif browser["browseName"] == "firefox":
					desired_capabilities = webdriver.DesiredCapabilities.FIREFOX
				elif browser["browseName"] == "chrome":
					desired_capabilities = webdriver.DesiredCapabilities.CHROME
				elif browser["browseName"] == "opera":
					desired_capabilities = webdriver.DesiredCapabilities.OPERA
				else:
					self.log.warning("Browser unknown - please check configuration")
					continue

				if "version" in browser:
					desired_capabilities['version'] = browser["version"]

				if "platform" in browser:
					desired_capabilities['platform'] = browser["platform"]

				dbrowser = webdriver.Remote(
					desired_capabilities=desired_capabilities,
					command_executor=settings.SELENIUM_HUB,
				)

				#http://seleniumhq.org/docs/04_webdriver_advanced.html
				dbrowser.implicitly_wait(30)
				time.sleep(1)
				dbrowser.get(url)

				#give a bit time for loading async-js
				time.sleep(2)

				dbrowser.get_screenshot_as_file(screenshot_file_path)

				optimizer = ImageOptimizer()
				optimized_screenshot_path = optimizer.optimize_image(screenshot_file_path, screenshots_path)
				if optimized_screenshot_path:
					# if we have optimized image we do not care about old one
					os.unlink(screenshot_file_path)
					screenshot_filename = os.path.basename(optimized_screenshot_path)
					screenshot_file_path = optimized_screenshot_path
					screenshot_url = os.path.join(screenshots_url, screenshot_filename)

				timing_data = dbrowser.execute_script("return (window.performance || window.webkitPerformance || window.mozPerformance || window.msPerformance || {}).timing;")

				if timing_data  and (browser["browseName"] != "internet explorer"):
					timing[browsername] = []
					for _time in ["navigationStart", "domainLookupStart", "domainLookupEnd", "connectStart", "requestStart", "domLoading", "domInteractive", "domComplete", "loadEventEnd"]:
						timing[browsername].append((_time, timing_data[_time] - timing_data["navigationStart"]))

					tmp_timing = timing_data["loadEventEnd"] - timing_data["navigationStart"]
					if tmp_timing > max_loadtime:
						max_loadtime = tmp_timing
				else:
					self.log.warning("There was no timing_data for %s" % (browsername))

				dbrowser.quit()
				signal.alarm(0)

				#do it after quiting browser - to save selenium time
				screenshot_thumb_path = self.crop_screenshot(screenshot_file_path)
				screenshot_thumb_url = os.path.join(screenshots_url, os.path.basename(screenshot_thumb_path))

				template = Template(open(os.path.join(os.path.dirname(__file__), 'screenshots.html')).read())
				res = Results(test=command.test, group=RESULT_GROUP.screenshot, status=RESULT_STATUS.info, output_desc=browsername)
				ctx = {'filename': screenshot_url,
					'thumb': screenshot_thumb_url,
					'browsername': browsername,
					'browserversion': dbrowser.capabilities['version']}
				res.output_full = template.render(Context(ctx))
				res.save()
				self.log.debug("Saved screenshot (result:%r))" % res)

			except WebDriverException, e:
				self.log.warning("WebDriverException: %s" % e)
			except Alarm:
				self.log.warning("Shoot timeout")
Exemplo n.º 26
0
                    openrelay +=   "<b>mailserver: %s </b><br />&nbsp; RCPT TO: [email protected] <br />&nbsp; %s %s <br /><br />"%(mx,code,msg)
                else:
                    noopenrelay +=   "<b>mailserver: %s </b><br />&nbsp; RCPT TO: [email protected] <br />&nbsp; %s %s <br /><br />"%(mx,code,msg)
                foo.quit()

            except smtplib.SMTPServerDisconnected:
                noconnect +=   "%s (timeout)<br />"%(mx)
                noconnect_count +=1
                pass

            except smtplib.socket.error, smtplib.SMTPConnectError:
                noconnect +=   "%s<br />"%(mx)
                noconnect_count +=1
                pass

        res = Results(test=command.test,group = RESULT_GROUP.mail, importance=1)
        res.output_desc = unicode(_("accept mail to postmaster@"))
        res.output_full = unicode(_("<p>According to RFC 822, RFC 1123 and RFC 2821 all mailservers should accept mail to postmaster.</p> "))

        if not nopostmaster:
            res.status = RESULT_STATUS.success
            res.output_full += unicode(_("<p>All of your mailservers accept mail to postmaster@%(domain)s: <code>%(postmaster)s</code></p>" % {
                "domain" :domain,
                "postmaster": postmaster
            } ))
        else:
            res.status = RESULT_STATUS.warning
            res.output_full += unicode(_("<p>Mailservers that do not accept mail to postmaster@%(domain)s:<code>%(nopostmaster)s</code> </p>"% {
                "domain": domain,
                "nopostmaster" :nopostmaster
            } ))
Exemplo n.º 27
0
					'thumb': screenshot_thumb_url,
					'browsername': browsername,
					'browserversion': dbrowser.capabilities['version']}
				res.output_full = template.render(Context(ctx))
				res.save()
				self.log.debug("Saved screenshot (result:%r))" % res)

			except WebDriverException, e:
				self.log.warning("WebDriverException: %s" % e)
			except Alarm:
				self.log.warning("Shoot timeout")
			finally:
				signal.alarm(0)

		if command.test.check_seo:
			res = Results(test=command.test, group=RESULT_GROUP.performance, status=RESULT_STATUS.success)
			res.output_desc = unicode(_("Webpage load time"))
			template = Template(open(os.path.join(os.path.dirname(__file__), 'pageload.html')).read())
			template_data = {
				'timing': timing,
				'max_loadtime': max_loadtime,
			}
			res.output_full = template.render(Context(template_data))
			if max_loadtime > 5000:
				res.status = RESULT_STATUS.warning
			if max_loadtime > 15000:
				res.status = RESULT_STATUS.error
			res.save()

		#there was no exception - test finished with success
		return STATUS.success