Example #1
0
  def __init__(self):

    self.cookiejar = libcookie.libcookie(self.server)
    self.h = httplib2.Http(cache = None, timeout = self.timeout)
    self.h.follow_redirects=False
    
    if self.auth_basic != []:
      self.h.add_credentials(self.auth_basic[0], self.auth_basic[1])
Example #2
0
    def __init__(self):

        self.cookiejar = libcookie.libcookie(self.server)
        self.h = httplib2.Http(cache=None, timeout=self.timeout)
        self.h.follow_redirects = False

        if self.auth_basic != []:
            self.h.add_credentials(self.auth_basic[0], self.auth_basic[1])
Example #3
0
    def __init__(self, root):
        self.myls = lswww.lswww(root)
        self.root = self.myls.root
        self.server = urlparse.urlparse(self.root)[1]
        self.myls.verbosity(1)
        socket.setdefaulttimeout(self.timeout)

        # HttpLib2 vars
        proxy = None

        if self.proxy != "":
            (proxy_type, proxy_usr, proxy_pwd, proxy_host, proxy_port, path, query, fragment) = httplib2.parse_proxy(
                self.proxy
            )
            proxy = httplib2.ProxyInfo(proxy_type, proxy_host, proxy_port, proxy_user=proxy_usr, proxy_pass=proxy_pwd)

        self.cookiejar = libcookie.libcookie(self.server)

        self.h = httplib2.Http(cache=None, timeout=self.timeout, proxy_info=proxy)
        self.h.follow_redirects = False

        if self.auth_basic != []:
            self.h.add_credentials(self.auth_basic[0], self.auth_basic[1])
Example #4
0
    def go(self, crawlerFile):
        proxy = None

        if self.proxy != "":
            (proxy_type, proxy_usr, proxy_pwd, proxy_host, proxy_port,
             path, query, fragment) = httplib2.parse_proxy(self.proxy)
            proxy = httplib2.ProxyInfo(proxy_type, proxy_host, proxy_port,
                                       proxy_user=proxy_usr, proxy_pass=proxy_pwd)

        self.h = httplib2.Http(cache=None, timeout=self.timeout,
                               proxy_info=proxy)
        self.h.follow_redirects = False

        self.cookiejar = libcookie.libcookie(self.server)
        if os.path.isfile(self.cookie):
            self.cookiejar.loadfile(self.cookie)

        if self.auth_basic != []:
            self.h.add_credentials(self.auth_basic[0], self.auth_basic[1])

        # load of the crawler status if a file is passed to it.
        if crawlerFile != None:
            if self.persister.isDataForUrl(crawlerFile) == 1:
                self.persister.loadXML(crawlerFile)
                self.tobrowse = self.persister.getToBrose()
                # TODO: change xml file for browsed urls
                self.browsed = self.persister.getBrowsed()
                self.forms = self.persister.getForms()
                self.uploads = self.persister.getUploads()
                print _("File") + " " + crawlerFile + " " + _("loaded, the scan continues") + ":"
                if self.verbose == 2:
                    print " * " + _("URLs to browse")
                    for x in self.tobrowse:
                        print "    + " + x
                    print " * " + _("URLs browsed")
                    for x in self.browsed.keys():
                        print "    + " + x
            else:
                print _("File") + " " + crawlerFile + " " + _("not found, WSP will scan again the web site")

        # while url list isn't empty, continue browsing
        # if the user stop the scan with Ctrl+C, give him all found urls
        # and they are saved in an XML file
        try:
            while len(self.tobrowse) > 0:
                lien = self.tobrowse.pop(0)
                if (lien not in self.browsed.keys() and lien not in self.excluded):
                    headers = self.browse(lien)
                    if headers != {}:
                        if not headers.has_key("link_encoding"):
                            if self.link_encoding.has_key(lien):
                                headers["link_encoding"] = self.link_encoding[lien]
                        self.browsed[lien] = headers
                        if self.verbose == 1:
                            sys.stderr.write('.')
                        elif self.verbose == 2:
                            print lien
                if (self.scope == self.SCOPE_PAGE):
                    self.tobrowse = []
            self.saveCrawlerData()
            print ""
            print " " + _("Notice") + " "
            print "========"
            print _(
                "This scan has been saved in the file") + " " + self.persister.CRAWLER_DATA_DIR + '/' + self.server + ".xml"
            print _("You can use it to perform attacks without scanning again the web site with the \"-k\" parameter")
        except KeyboardInterrupt:
            self.saveCrawlerData()
            print ""
            print " " + _("Notice") + " "
            print "========"
            print _(
                "Scan stopped, the data has been saved in the file") + " " + self.persister.CRAWLER_DATA_DIR + '/' + self.server + ".xml"
            print _("To continue this scan, you should launch WSP with the \"-i\" parameter")
            pass
Example #5
0
    def _(s):
        return s


if len(sys.argv) < 4:
    sys.stderr.write("Usage python cookie.py <cookie_file> <url> <arg1=val1> ...\n")
    sys.exit(1)

cookiefile = sys.argv[1]
url = sys.argv[2]
data = sys.argv[3:]
liste = []
for l in data:
    liste.append(tuple(l.split("=")))
params = urllib.urlencode(liste)

txheaders = {"User-agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}

try:
    req = urllib2.Request(url, params, headers=txheaders)
    handle = urllib2.urlopen(req)
except IOError, e:
    print _("Error getting url"), url
    print e
    sys.exit(1)

lc = libcookie.libcookie(url)
lc.loadfile(cookiefile)
lc.add(handle, handle.read())
lc.save(cookiefile)