예제 #1
0
    def getVersion(self, no_pull=False):
        for version in self.getHistory():
            self.version = version
            self.buildurl = Url.join(self.apiurl, "projects",
                                     self.account_name, self.project_name,
                                     "build", self.version)
            self.buildjson = self.getJson(self.buildurl)
            if no_pull:
                try:
                    self.buildjson['build']['pullRequestId']
                    continue
                except KeyError:
                    pass
            jobs = self.buildjson["build"]["jobs"]
            if len(jobs) > 1:
                for job in jobs:
                    if "elease" in job["name"]:
                        self.jobid = job["jobId"]
            elif len(jobs) == 1:
                self.jobid = jobs[0]["jobId"]
            else:
                continue

            self.artifactsurl = Url.join(self.apiurl, "buildjobs", self.jobid,
                                         "artifacts")
            self.artifactsjson = self.getJson(self.artifactsurl)
            if len(self.artifactsjson) == 0:
                continue
            return self.version
예제 #2
0
    def run(self):

        DNSLOG_HOST = 'dseje4.ceye.io'

        # run
        for url in self.target_urls:

            # 随机标记
            sign = Random.id_generator(size=10)

            # DNSLOG 地址
            DNSLOG_HOST = '{}.{}'.format(sign, DNSLOG_HOST)

            # 生成payload
            payloads = [payload.format(DNSLOG_HOST)
                        for payload in payloads_tpl]

            # Double Quotes
            d_quotes = [
                '"{}"'.format(payload) for payload in payloads
            ]
            payloads.extend(d_quotes)

            # 生成头部payload
            headers = {}
            for k, v in headers_tpl.iteritems():
                if k == 'Referer':
                    headers[k] = v.format(url, DNSLOG_HOST)
                    continue
                headers[k] = v.format(DNSLOG_HOST)
            
            p = Pollution(payloads)

            urls = []

            for i in p.payload_generator(url):
                urls.append(i)
                print Url.urldecode(i)

            logging.info('{0} => {1}'.format(url, sign))

            print 'Payload Number:', len(urls)

            # Start
            rs = (grequests.get(u, headers=headers, allow_redirects=False)
                  for u in urls)

            grequests.map(rs, gtimeout=BILID_REQUEST_TIMEOUT)
예제 #3
0
 def getReleases(self):
     self.releasesurl = Url.join(self.apiurl, self.account_name,
                                 self.project_name, "releases")
     releases = self.getJson(self.releasesurl)
     if "message" in releases:
         raise ValueError(releases["message"])
     return releases
예제 #4
0
 def payload_generator(self, query, all_qs=False, append=True):
     ret = list()
     # 如果是url 进行分解
     if self._is_url(query):
         if self._is_query(query):
             url = query
             self.parse = Url.url_parse(url)
             query = self.parse.query
             # 一次污染所有qs
             if all_qs:
                 for payload in self.payloads:
                     qs = self._pollution_all(query, payload, append=append)
                     url = self._url_unparse(qs)
                     ret.append(url)
             else:
                 for payload in self.payloads:
                     for qs in self._pollution(query,
                                               payload,
                                               append=append):
                         url = self._url_unparse(qs)
                         ret.append(url)
     else:
         # 参数处理
         if all_qs:
             for payload in self.payloads:
                 ret.append(
                     self._pollution_all(query, payload, append=append))
         else:
             for payload in self.payloads:
                 for qs in self._pollution(query, payload, append=append):
                     ret.append(qs)
     return ret
예제 #5
0
 def getHistory(self):
     self.historyurl = Url.join(self.apiurl, "projects", self.account_name,
                                self.project_name,
                                "history?recordsNumber=100" + self.branch)
     self.json = self.getJson(self.historyurl)
     for build in self.json["builds"]:
         yield build["version"]
예제 #6
0
    def payload_generator(self, query, all_qs=False, append=True):

        ret = list()
        ret2 = list()

        self.parse = Url.url_parse(query)
        self.query = self.parse.query
        self.append = append
        # a=1&b=2
        if not self.parse.scheme or not self.parse.netloc:
            self.query = query

        if all_qs:
            for payload in self.payloads:
                ret.append(self._pollution_all(
                    self.query, payload))
        else:
            for payload in self.payloads:
                for qs in self._pollution(self.query, payload):
                    ret.append(qs)

        # 遍历组合url
        for i in ret:
            if self.parse.netloc:
                ret2.append(self._url_unparse(i))
            else:
                ret2.append(i)

        return ret2
예제 #7
0
 def getVersion(self, regex, from_page=False, index=0):
     if from_page:
         version = re.findall(regex, self.page)[index]
     else:
         self.dl_filename = Url.basename(self.dlurl)
         version = re.findall(regex, self.dl_filename)[index]
     return version
예제 #8
0
 def _url_unparse(self, qs):
     url = Url.url_unparse(
         (self.parse.scheme,
          self.parse.netloc,
          self.parse.path,
          self.parse.params,
          qs,
          self.parse.fragment)
     )
     return url
예제 #9
0
 def page_regex_url(self,
                    pageurl,
                    regex,
                    index,
                    pagetext=None,
                    try_redirect=False):
     if pagetext == None:
         req = self.requests_obj.get(url=pageurl, headers=self.headers)
         pagetext = req.text
     outurl = re.findall(regex, pagetext)[index]
     outurl = unescape(outurl)
     if outurl.startswith("/"):
         site = Url.sitename(pageurl)
         outurl = Url.join(site, outurl)
     elif not outurl.startswith("http"):
         outurl = Url.join(pageurl, outurl)
     if try_redirect:
         req = self.requests_obj.head(outurl)
         if req.status_code == 302 or req.status_code == 303:
             outurl = req.headers['Location']
     return outurl
예제 #10
0
 def getDlUrl(self, keyword=[], no_keyword=[], filetype="7z", index=0):
     match = 0
     for file_info in self.getFileList():
         if self.filename_check(file_info["file"], keyword, no_keyword,
                                filetype):
             if match == index:
                 dlurl = Url.join(self.download_prefix,
                                  file_info["file"].lstrip("/"))
                 self.version = str(file_info["date"])
                 return dlurl
             else:
                 match += 1
예제 #11
0
    def run(self):
        for url in self.target_urls:

            signs = [i.get('sign') for i in payloads]

            p = URLPollution([payload.get('payload') for payload in payloads])

            # 分解url
            parse = Url.url_parse(url)
            query = parse.query

            urls = []
            for i in p.payload_generator(url, append=False):
                urls.append(i)
                print Url.urldecode(i)

            # Start
            print 'Payload Number:', len(urls)
            rs = (grequests.get(u, headers=HEADERS, allow_redirects=False)
                  for u in urls)
            response = grequests.map(rs, gtimeout=REQUEST_TIMEOUT)
            for i in response:
                if i is not None:
                    for payload in payloads:
                        sign = payload.get('sign')
                        name = payload.get('name')
                        if sign in i.content:
                            print Url.urldecode(i.url), sign, name
                            logging.info('{0} => {1}'.format(
                                Url.urldecode(i.url), name))
예제 #12
0
    def _pollution_all(self, query, payload):

        qs = self._qs2dict(query)
        if not isinstance(qs, dict):
            return False

        for i in qs:
            if self.append:
                qs[i] += payload
            else:
                qs[i] = payload
        qs = Url.build_qs(qs)
        return qs
예제 #13
0
 def getDlUrl(self, keyword=[], no_keyword=[], filetype="7z", index=0):
     try:
         match_urls = []
         for fileinfo in self.artifactsjson:
             filename = fileinfo["fileName"]
             if self.filename_check(filename, keyword, no_keyword,
                                    filetype):
                 dlurl = Url.join(self.apiurl, "buildjobs", self.jobid,
                                  "artifacts", filename)
                 match_urls.append(dlurl)
         return match_urls[index]
     except AttributeError:
         raise  #AttributeError("you must run getVersion before you run getDlUrl")
예제 #14
0
    def _pollution(self, query, payload):
        qs = self._qs2dict(query)
        if not isinstance(qs, dict):
            return False

        ret = list()
        for i in qs.keys():
            temp = qs.copy()
            if self.append:
                temp[i] += payload
            else:
                temp[i] = payload
            temp = Url.build_qs(temp)
            ret.append(temp)
        return ret
예제 #15
0
 def _qs2dict(self, query):
     return Url.qs_parse(query)
예제 #16
0
 def _is_url(self, url):
     return (url.startswith('http://') or url.startswith('https://')) and Url.url_parse(url).query
def url():
    return Url().topic('school-districts')
예제 #18
0
def url():
    return Url().topic('schools')
예제 #19
0
 def _is_query(self, url):
     return Url.url_parse(url).query
예제 #20
0
 def __init__(self, project_name):
     self.project_name = project_name
     self.rssurl = self.rss_source % project_name
     self.download_prefix = Url.join(self.download_site, project_name)