Example #1
0
class BerwickParser:
    comments_email_address = "*****@*****.**"

    def __init__(self, *args):

        self.authority_name = "Berwick-upon-Tweed Borough Council"
        self.authority_short_name = "Berwick"
        self.base_url = "http://www.berwick-upon-tweed.gov.uk/planning/register/wl/%s.htm"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_day = datetime.date(year, month, day)

        monday_before = search_day - datetime.timedelta(search_day.weekday())

        thursday = monday_before + datetime.timedelta(3)
        if search_day.weekday() > 3: # i.e. It is friday, saturday, or sunday
            # We need to add a week
            thursday = thursday + datetime.timedelta(7)

        this_url = self.base_url %(thursday.strftime(search_date_format))
        # Now get the search page
        response = urllib2.urlopen(this_url)
        soup = BeautifulSoup(response.read())

        # Each app is stored in a table of its own. The tables don't have
        # any useful attributes, so we'll find all the NavigableString objects
        # which look like " Application Number:" and then look at the 
        #tables they are in.

        nav_strings = soup.findAll(text=" Application Number:")

        for nav_string in nav_strings:
            application = PlanningApplication()

            application.council_reference = nav_string.findNext("p").string.strip()

            result_table = nav_string.findPrevious("table")

            application.date_received = datetime.datetime.strptime(result_table.find(text=" Registration Date: ").findNext("p").contents[0].strip(), reg_date_format)

            application.osgb_x = result_table.find(text=" Easting:").findNext("p").string.strip()
            application.osgb_y = result_table.find(text=" Northing:").findNext("p").string.strip()

            application.description = result_table.find(text=" Proposed Development:").findNext("p").string.strip()
            application.address = result_table.find(text=" Location:").findNext("p").string.strip()
            application.postcode = getPostcodeFromText(application.address)

            application.info_url = this_url

            application.comment_url = self.comments_email_address

            self._results.addApplication(application)

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
class CarmarthenshireParser:
    def __init__(self, *args):
        self.comments_email_address = "*****@*****.**"

        self.authority_name = "Carmarthenshire County Council"
        self.authority_short_name = "Carmarthenshire"
        self.base_url = "http://www.carmarthenshire.gov.uk/CCC_APPS/eng/plannaps/CCC_PlanningApplicationsResults.asp?datemode=range&in_lo_date=%(day)s%%2F%(month)s%%2F%(year)s&in_hi_date=%(day)s%%2F%(month)s%%2F%(year)s&SUBMIT=Search"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_day = datetime.date(year, month, day)

        # Now get the search page
        response = urllib2.urlopen(self.base_url %{"day": day,
                                                   "month": month,
                                                   "year": year,
                                                   })
        soup = BeautifulSoup(response.read())

        trs = soup.findAll("tr", valign="middle")

        count = 0
        for tr in trs:
            # The odd trs are just spacers
            if count % 2 == 0:
                application = PlanningApplication()

                tds = tr.findAll("td")
                
                application.date_received = search_day
                application.council_reference = tds[1].a.string
                application.address = tds[3].a.string
                application.postcode = getPostcodeFromText(application.address)
                
                # All the links in this <tr> go to the same place...
                application.info_url = urlparse.urljoin(self.base_url, tr.a['href'])

                # Still looking for description and comment url
                
                # For the description, we'll need the info page
                info_soup = BeautifulSoup(urllib2.urlopen(application.info_url).read())

                application.description = info_soup.find(text="Description").findNext("td").findNext("td").font.string

                # While we're here, lets get the OSGB grid ref
                application.osgb_x, application.osgb_y = info_soup.find(text="Grid Reference").findNext("td").font.string.split("-")

                # We'll have to use an email address for comments
                application.comment_url = self.comments_email_address

                self._results.addApplication(application)

            count += 1

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
Example #3
0
class CrawleyParser:
    comment_url_template = "http://www.crawley.gov.uk/stellent/idcplg?IdcService=SS_GET_PAGE&nodeId=561&pageCSS=&pAppNo=%(pAppNo)s&pAppDocName=%(pAppDocName)s"
    
    def __init__(self, *args):

        self.authority_name = "Crawley Borough Council"
        self.authority_short_name = "Crawley"
        self.base_url =   "http://www.crawley.gov.uk/stellent/idcplg?IdcService=SS_GET_PAGE&nodeId=560&is_NextRow=1&accept=yes&strCSS=null&pApplicationNo=&pProposal=&pLocation=&pPostcode=&pWard=&pDateType=received&pDayFrom=%(dayFrom)s&pMonthFrom=%(monthFrom)s&pYearFrom=%(yearFrom)s&pDayTo=%(dayTo)s&pMonthTo=%(monthTo)s&pYearTo=%(yearTo)s&submit=Search"


        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)

    def getResultsByDayMonthYear(self, day, month, year):
        search_day = datetime.date(year, month, day)
        #- Crawley only allows searches from-to, so:

        next = self.base_url %{
            "dayFrom": day,
            "monthFrom": month,
            "yearFrom": year,
            "dayTo": day,
            "monthTo": month,
            "yearTo": year,
            }
        # Now get the search page
        response = urllib2.urlopen(next)
        soup = BeautifulSoup.BeautifulSoup(response.read())
        
        if soup.table: #- Empty result set has no table
            trs = soup.table.findAll("tr")[1:] # First one is just headers    
            for tr in trs:    
                tds = tr.findAll("td")
                application = PlanningApplication()         
                application.council_reference = tds[0].a.contents[0].strip().replace("&#47;", "/")
                application.info_url = urlparse.urljoin(self.base_url, tds[0].a['href'])

                info_qs = cgi.parse_qs(urlparse.urlsplit(application.info_url)[3])

                comment_qs = {
                  "pAppNo": application.council_reference,
                  "pAppDocName": info_qs["ssDocName"][0],
                  }
                application.comment_url = self.comment_url_template %comment_qs

                application.address = tds[1].string.strip()
                if tds[2].string: #- if postcode present, append it to the address too
                    application.postcode = tds[2].string.replace("&nbsp;", " ").strip()
                    application.address += ", " + application.postcode
                application.description = tds[3].string.strip()
                application.date_received = datetime.datetime(*(time.strptime(tds[4].string.strip(), date_format)[0:6]))
                self._results.addApplication(application)
        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
Example #4
0
class CalderdaleParser:
    def __init__(self, *args):
        self.authority_name = "Calderdale Council"
        self.authority_short_name = "Calderdale"
        self.base_url = "http://www.calderdale.gov.uk/environment/planning/search-applications/planapps.jsp?status=0&date1=%(date)s&date2=%(date)s&Search=Search"
        self.info_url = "http://www.calderdale.gov.uk/environment/planning/search-applications/planapps.jsp?app=%s&Search=Search"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_date = datetime.date(year, month, day)

        next_page_url = self.base_url %{"date": search_date.strftime(date_format)}

        while next_page_url:
            try:
                response = urllib2.urlopen(next_page_url)
            except urllib2.HTTPError:
                # This is what seems to happen if there are no apps
                break

            soup = BeautifulSoup(response.read())

            next = soup.find(text="Next")
            if next:
                next_page_url = urlparse.urljoin(self.base_url, next.parent['href'])
            else:
                next_page_url = None

            # There is an <h3> for each app that we can use 
            for h3 in soup.findAll("h3", {"class": "resultsnavbar"}):
                application = PlanningApplication()

                application.date_received = search_date
                application.council_reference = h3.string.split(": ")[1]
                application.description = h3.findNext("div").find(text="Proposal:").parent.nextSibling.strip()

                application.address = ', '.join(h3.findNext("div").find(text="Address of proposal:").parent.nextSibling.strip().split("\r"))
                application.postcode = getPostcodeFromText(application.address)

                application.comment_url = urlparse.urljoin(self.base_url, h3.findNext("div").find(text=re.compile("Comment on Application")).parent['href'])

                application.info_url = self.info_url %(urllib.quote(application.council_reference))

                application.osgb_x, application.osgb_y = h3.findNext("div").find(text="Grid Reference:").parent.nextSibling.strip().split()

                self._results.addApplication(application)

        return self._results


    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
class ForestOfDeanParser:
    def __init__(self, *args):

        self.authority_name = "Forest of Dean District Council"
        self.authority_short_name = "Forest of Dean"
        self.base_url = "http://www.fdean.gov.uk/content.asp"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_date = datetime.date(year, month, day)

        search_data = urllib.urlencode(
            [
                ("parent_directory_id", "200"),
                ("nav", "679"),
                ("id", "13266"),
                ("RecStart", "1"),
                ("RecCount", "100"),
                ("SDate", search_date.strftime(date_format)),
                ("EDate", search_date.strftime(date_format)),
                ]
            )

        search_url = self.base_url + "?" + search_data

        response = urllib2.urlopen(search_url)
        soup = BeautifulSoup(response.read())

        results_table = soup.find("table", summary="List of planning applications that match your query")

        for tr in results_table.findAll("tr")[1:]:
            application = PlanningApplication()
            
            application.date_received = search_date
            
            tds = tr.findAll("td")

            application.council_reference = tds[0].a.string.strip()
            application.info_url = urlparse.urljoin(self.base_url, tds[0].a['href'])
            application.comment_url = application.info_url

            application.address = ' '.join(tds[1].string.strip().split())
            application.postcode = getPostcodeFromText(application.address)

            application.description = tds[2].string.strip()

            self._results.addApplication(application)

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
Example #6
0
class SolihullParser:

    def __init__(self, *args):

        self.authority_name = "Solihull Metropolitan Borough Council"
        self.authority_short_name = "Solihull"
        self.base_url = "http://www.solihull.gov.uk/planning/dc/weeklist.asp?SD=%s&ward=ALL"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_day = datetime.date(year, month, day)

        # What we actually need is the monday before the date searched for:
        monday_before = search_day - datetime.timedelta(search_day.weekday())

        # Now get the search page
        response = urllib2.urlopen(self.base_url %(monday_before.strftime(date_format)))
        soup = BeautifulSoup(response.read())

        result_tables = soup.findAll("table", width="98%", cellpadding="2")

        for table in result_tables:
            application = PlanningApplication()

            trs = table.findAll("tr")
	    application.council_reference = trs[0].strong.string.strip()
            relative_info_url = trs[0].a['href']
            application.info_url = urlparse.urljoin(self.base_url, relative_info_url)

            application.address = trs[1].findAll("td")[1].string.strip()
            application.postcode = getPostcodeFromText(application.address)
            application.description = trs[2].findAll("td")[1].string.strip()

	    #There's probably a prettier way to get the date, but with Python, it's easier for me to reinvent the wheel than to find an existing wheel!
	    raw_date_recv = trs[3].findAll("td")[3].string.strip().split("/")
	    #Check whether the application is on the target day. If not, discard it and move on.
	    if int(raw_date_recv[0]) != day:
	      continue
	    application.date_received = datetime.date(int(raw_date_recv[2]), int(raw_date_recv[1]), int(raw_date_recv[0]))

            try:
                relative_comment_url = trs[5].findAll("td")[1].a['href']
                application.comment_url = urlparse.urljoin(self.base_url, relative_comment_url)
            except:
                application.comment_url = "No Comment URL."

            self._results.addApplication(application)

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
class LeicestershireParser:
    def __init__(self, *args):

        self.authority_name = "Leicestershire County Council"
        self.authority_short_name = "Leicestershire"
        self.base_url = "http://www.leics.gov.uk/index/environment/community_services_planning/planning_applications/index/environment/community_services_planning/planning_applications/eplanning_searchform/eplanning_resultpage.htm?sd=%(date)s&ed=%(date)s&kw=&map=f"
 
        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_date = datetime.date(year, month, day)

        response = urllib2.urlopen(self.base_url %{"date": search_date.strftime(search_date_format)})
        soup = BeautifulSoup.BeautifulSoup(response.read())

        if not soup.find(text=re.compile("No Results Found")):
            
            trs = soup.findAll("table", {"class": "dataTable"})[1].findAll("tr")[1:]

            for tr in trs:
                tds = tr.findAll("td")

                application = PlanningApplication()

                # We can fill in the date received without actually looking at the data
                application.date_received = search_date

                application.council_reference = tds[0].a.string.strip()
                application.info_url = urlparse.urljoin(self.base_url, tds[0].a['href'])
                application.address = ', '.join([x for x in tds[1].contents 
                                                 if isinstance(x, BeautifulSoup.NavigableString)])
                application.postcode = getPostcodeFromText(application.address)
                application.description = tds[2].string.strip()

                # To get the comment link we need to fetch the info page

                info_response = urllib2.urlopen(application.info_url)
                info_soup = BeautifulSoup.BeautifulSoup(info_response.read())

                base = info_soup.base['href']

                application.comment_url = urlparse.urljoin(base,
                                                           info_soup.find("a", target="Planning Application Consultation Form")['href'])

                self._results.addApplication(application)

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
Example #8
0
class HounslowParser:
    def __init__(self, *args):

        self.authority_name = "London Borough of Hounslow"
        self.authority_short_name = "Hounslow"
        self.base_url = "http://planning.hounslow.gov.uk/planningv2/planning_summary.aspx?strWeekListType=SRCH&strRecTo=%(date)s&strRecFrom=%(date)s&strWard=ALL&strAppTyp=ALL&strWardTxt=All%%20Wards&strAppTypTxt=All%%20Application%%20Types&strArea=ALL&strAreaTxt=All%%20Areas&strStreet=ALL&strStreetTxt=All%%20Streets&strPC=&strLimit=500"
        # Limited to 500 cases - putting 1000 causes a default value of 50 to be used. 500 should be plenty.

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)

    def getResultsByDayMonthYear(self, day, month, year):
        search_day = datetime.date(year, month, day)

        # Now get the search page
        response = urllib2.urlopen(self.base_url % {"date": search_day.strftime(date_format)})
        soup = BeautifulSoup(response.read())

        # Results are shown in a table each. The tables don't have any nice
        # attributes, but they do all contain a NavString "Application",
        # and nothing else does...
        nav_strings = soup.findAll(text="Application")

        for nav_string in nav_strings:
            result_table = nav_string.findPrevious("table")

            application = PlanningApplication()
            application.date_received = search_day

            links = result_table.findAll("a")

            # We can get OSGB coordinates from the link to streetmap
            map_qs_dict = cgi.parse_qs(urlparse.urlsplit(links[0]["href"])[3])

            application.osgb_x = map_qs_dict.get("x")[0]
            application.osgb_y = map_qs_dict.get("y")[0]

            application.council_reference = links[1].string.strip()
            application.info_url = urlparse.urljoin(self.base_url, links[1]["href"])
            application.comment_url = urlparse.urljoin(self.base_url, links[2]["href"])

            application.address = " ".join(links[0].previous.strip().split())
            application.postcode = getPostcodeFromText(application.address)

            application.description = links[2].previous.strip()

            self._results.addApplication(application)

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
Example #9
0
class KensingtonParser:

    def __init__(self, *args):

        self.authority_name = "The Royal Borough of Kensington and Chelsea"
        self.authority_short_name = "Kensington and Chelsea"
        self.base_url = "http://www.rbkc.gov.uk/Planning/scripts/weeklyresults.asp"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_day = datetime.date(year, month, day)

        # We want the sunday of the week being searched for.
        # (sunday is at the end of the week).
        friday = search_day - datetime.timedelta(search_day.weekday()) + datetime.timedelta(4)

        # Not using urllib.urlencode as it insists on turning the "+" into "%2B"
        post_data = "WeekEndDate=%d%%2F%d%%2F%d&order=Received+Date&submit=search" %(friday.day, friday.month, friday.year)


        # Now get the search page
        response = urllib2.urlopen(self.base_url, post_data)
        soup = BeautifulSoup(response.read())

        trs = soup.find("table", summary="Planning Application search results table").findAll("tr")[1:]

        for tr in trs:
            application = PlanningApplication()

            tds = tr.findAll("td")

            # Not sure why these are entities. We'll convert them back.
            application.council_reference = tds[0].a.contents[1].strip().replace("&#47;", "/")
            application.info_url = urlparse.urljoin(self.base_url, tds[0].a['href'])
            application.comment_url = application.info_url

            application.date_received = datetime.datetime(*(time.strptime(tds[1].string.strip(), date_format)[0:6]))

            application.address = tds[2].string.strip()
            application.postcode = getPostcodeFromText(application.address)

            application.description = tds[3].string.strip()

            self._results.addApplication(application)
        
        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
class KingstonParser:
    comments_email_address = "*****@*****.**"

    def __init__(self, *args):
        self.authority_name = "Royal Borough of Kingston upon Thames"
        self.authority_short_name = "Kingston upon Thames"
        self.base_url = "http://maps.kingston.gov.uk/isis_main/planning/planning_summary.aspx?strWeekListType=SRCH&strRecTo=%(date)s&strRecFrom=%(date)s&strWard=ALL&strAppTyp=ALL&strWardTxt=All%%20Wards&strAppTypTxt=All%%20Application%%20Types&strStreets=ALL&strStreetsTxt=All%%20Streets&strLimit=500"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_day = datetime.date(year, month, day)

        # Now get the search page
        response = urllib2.urlopen(self.base_url %{"date": search_day.strftime(date_format)})
        soup = BeautifulSoup(response.read())

        # Each app is stored in a table on it's own. 
        # These tables don't have any nice distinguishing features,
        # but they do all contain a NavigableString "Application",
        # and nothing else in the page does.
        nav_strings = soup.findAll(text="Application")
        
        for nav_string in nav_strings:
            results_table = nav_string.findPrevious("table")

            application = PlanningApplication()
            application.date_received = search_day

            application.council_reference = results_table.a.string.strip()
            application.info_url = urlparse.urljoin(self.base_url, results_table.a['href'])
            application.address = results_table.findAll("td")[7].a.string.strip()

            application.postcode = getPostcodeFromText(application.address)
            application.description = results_table.findAll("td")[-1].contents[0].strip()

            # A few applications have comment urls, but most don't.
            # When they do, they have a case officer - I don't think we can
            # work out the other urls - even if they exist.
            # Best to use the email address.
            application.comment_url = self.comments_email_address

            self._results.addApplication(application)

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
Example #11
0
class ExmoorParser:
    def __init__(self, *args):

        self.authority_name = "Exmoor National Park"
        self.authority_short_name = "Exmoor"
        self.base_url = "http://www.exmoor-nationalpark.gov.uk/planning_weekly_list.htm?weeklylist=%s"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_day = datetime.date(year, month, day)

        response = urllib2.urlopen(self.base_url %(search_day.strftime(search_date_format)))
        soup = BeautifulSoup(response.read())

        # The first <tr> contains headers
        trs = soup.table.findAll("tr")[1:]

        for tr in trs:
            application = PlanningApplication()

            tds = tr.findAll("td")

            application.date_received = datetime.datetime.strptime(tds[0].string, received_date_format).date()

            application.info_url = urllib.unquote(urllib.quote_plus(urlparse.urljoin(self.base_url, tds[1].a['href'])))
            application.council_reference = tds[1].a.string.strip()
            application.address = tds[2].a.string.strip()
            application.postcode = getPostcodeFromText(application.address)

            # Now fetch the info url

            info_response = urllib.urlopen(application.info_url)
            info_soup = BeautifulSoup(info_response.read())

            application.description = info_soup.find(text="Proposal:").findNext("td").string.strip()

            try:
                application.comment_url = urlparse.urljoin(self.base_url, info_soup.find(text="Comment").parent['href'])
            except:
                application.comment_url = "No Comments"

            self._results.addApplication(application)

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
Example #12
0
class BarnsleyParser:
    comments_email_address = "*****@*****.**"

    def __init__(self, *args):

        self.authority_name = "Barnsley Metropolitan Borough Council"
        self.authority_short_name = "Barnsley"
        self.base_url = "http://applications.barnsley.gov.uk/service/development/week_compact.asp?AppDate=%s"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        search_day = datetime.date(year, month, day)

        # What we actually need is the monday before the date searched for:
        monday_before = search_day - datetime.timedelta(search_day.weekday())

        # Now get the search page
        response = urllib2.urlopen(self.base_url %(monday_before.strftime(date_format)))
        soup = BeautifulSoup(response.read())

        result_tables = soup.findAll("table", align="Center", cellpadding="3")

        for table in result_tables:
            application = PlanningApplication()

            # We can set the date received and the comment url straight away.
            application.comment_url = self.comments_email_address

            trs = table.findAll("tr")

            application.council_reference = trs[0].a.string.strip()
            relative_info_url = trs[0].a['href']

            application.info_url = urlparse.urljoin(self.base_url, relative_info_url)

            application.date_received = monday_before

            application.address = trs[1].findAll("td")[1].string.strip()
            application.postcode = getPostcodeFromText(application.address)
            application.description = trs[2].findAll("td")[1].string.strip()

            self._results.addApplication(application)

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
Example #13
0
class HarrowParser:
    def __init__(self, *args):

        self.authority_name = "London Borough of Harrow"
        self.authority_short_name = "Harrow"

        # This is a link to the last seven days applications
        # The day, month, and year arguments will be ignored.
        self.base_url = "http://www.harrow.gov.uk/www4/planning/dcweek1.asp"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)


    def getResultsByDayMonthYear(self, day, month, year):
        # Now get the search page
        response = urllib2.urlopen(self.base_url)

        soup = BeautifulSoup(response.read())

        # Each application contains the nav string "Application: "
        nav_strings = soup.findAll(text="Application: ")

        for nav_string in nav_strings:
            application = PlanningApplication()

            application.council_reference = nav_string.findPrevious("tr").findAll("td", limit=2)[1].string.strip()

            application.address = nav_string.findNext(text=location_re).split(":")[1].strip()
            application.postcode = getPostcodeFromText(application.address)

            application.description = nav_string.findNext(text="Proposal: ").findNext("td").string.strip()

            application.comment_url = urlparse.urljoin(self.base_url, nav_string.findNext(text="Proposal: ").findNext("a")['href'])

            application.date_received = datetime.datetime.strptime(nav_string.findNext(text=date_received_re).split(": ")[1], date_format).date()

            # FIXME: There is no appropriate info_url for the Harrow apps. 
            # I'll put the base url for the moment, but as that is
            # a list of apps from the last 7 days that will quickly be out of date.

            application.info_url = self.base_url
            
            self._results.addApplication(application)

        return self._results

    def getResults(self, day, month, year):
        return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
Example #14
0
    def __init__(self, *args):

        self.authority_name = "The Royal Borough of Kensington and Chelsea"
        self.authority_short_name = "Kensington and Chelsea"
        self.base_url = "http://www.rbkc.gov.uk/Planning/scripts/weeklyresults.asp"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
    def __init__(self, *args):

        self.authority_name = "Aberdeenshire Council"
        self.authority_short_name = "Aberdeenshire"
        self.base_url = "http://www.aberdeenshire.gov.uk/planning/apps/search.asp?startDateSearch=%(day)s%%2F%(month)s%%2F%(year)s&endDateSearch=%(day)s%%2F%(month)s%%2F%(year)s&Submit=Search"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #16
0
    def __init__(self, *args):

        self.authority_name = "London Borough of Wandsworth"
        self.authority_short_name = "Wandsworth"
        self.base_url = "http://www.wandsworth.gov.uk/gis/search/Search.aspx"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #17
0
    def __init__(self, *args):

        self.authority_name = "Isle of Wight Council"
        self.authority_short_name = "Isle of Wight"
        self.base_url = "http://www.iwight.com/council/departments/planning/appsdip/PlanAppSearch.aspx?__EVENTTARGET=lnkShowAll"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #18
0
    def __init__(self, *args):

        self.authority_name = "Shetland Islands Council"
        self.authority_short_name = "Shetland Islands"
        self.base_url = "http://www.shetland.gov.uk/planningcontrol/apps/apps.asp?time=14&Orderby=DESC&parish=All&Pref=&Address=&Applicant=&ApplicantBut=View&sortby=PlanRef&offset=%d"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #19
0
    def __init__(self, *args):

        self.authority_name = "Hampshire County Council"
        self.authority_short_name = "Hampshire"
        self.base_url = "http://www3.hants.gov.uk/planning/mineralsandwaste/planning-applications/applications/applications-open.htm"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
    def __init__(self, authority_name, authority_short_name, base_url, debug=False):
        HTMLParser.HTMLParser.__init__(self)

        self.authority_name = authority_name
        self.authority_short_name = authority_short_name
        self.base_url = base_url
        self.debug = debug

        self.search_url = urlparse.urljoin(self.base_url, "portal/servlets/ApplicationSearchServlet")

        self._comment_url = urlparse.urljoin(
            self.base_url, "portal/servlets/PlanningComments?REFNO=%(council_reference)s"
        )

        self._requested_date = None

        # 0 - no
        # 1 - maybe
        # 2 - yes
        # 3 - finished
        self._in_results_table = 0
        self._tr_count = 0
        self._td_count = 0
        self._data_list = []

        # this will hold the application we are currently working on.
        self._current_application = None

        # The object which stores our set of planning application results
        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #21
0
    def __init__(self, *args):

        self.authority_name = "Lichfield District Council"
        self.authority_short_name = "Lichfield"
        self.base_url = "http://www.lichfielddc.gov.uk/site/scripts/planning_list.php"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
    def __init__(self, *args):

        self.authority_name = "Leicestershire County Council"
        self.authority_short_name = "Leicestershire"
        self.base_url = "http://www.leics.gov.uk/index/environment/community_services_planning/planning_applications/index/environment/community_services_planning/planning_applications/eplanning_searchform/eplanning_resultpage.htm?sd=%(date)s&ed=%(date)s&kw=&map=f"
 
        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
    def __init__(self, *args):

        self.authority_name = "Forest of Dean District Council"
        self.authority_short_name = "Forest of Dean"
        self.base_url = "http://www.fdean.gov.uk/content.asp"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #24
0
    def __init__(self, *args):

        self.authority_name = "Berwick-upon-Tweed Borough Council"
        self.authority_short_name = "Berwick"
        self.base_url = "http://www.berwick-upon-tweed.gov.uk/planning/register/wl/%s.htm"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #25
0
    def __init__(self, *args):

        self.authority_name = "Barnsley Metropolitan Borough Council"
        self.authority_short_name = "Barnsley"
        self.base_url = "http://applications.barnsley.gov.uk/service/development/week_compact.asp?AppDate=%s"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #26
0
    def __init__(self, *args):
        self.authority_name = "Calderdale Council"
        self.authority_short_name = "Calderdale"
        self.base_url = "http://www.calderdale.gov.uk/environment/planning/search-applications/planapps.jsp?status=0&date1=%(date)s&date2=%(date)s&Search=Search"
        self.info_url = "http://www.calderdale.gov.uk/environment/planning/search-applications/planapps.jsp?app=%s&Search=Search"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #27
0
    def __init__(self, *args):

        self.authority_name = "Solihull Metropolitan Borough Council"
        self.authority_short_name = "Solihull"
        self.base_url = "http://www.solihull.gov.uk/planning/dc/weeklist.asp?SD=%s&ward=ALL"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
    def __init__(self):
	HTMLParser.HTMLParser.__init__(self)

        self._requested_date = None

        # We'll keep a count of the number of tables we have seen.
        # All the interesting stuff is in table 3
        self._table_count = 0

        # While inside table 3, we'll keep a count of the number of
        # <td>s we have seen. What is in which numbered <td> is detailed below.
        # 1 reference
        # 3 place and description
        # 5 date received
        # 2 and 4 are just padding
        self._td_count = 0

        # This is just a flag to say that we are now ready to get the reference
        # from the next bit of data
        self._get_reference = False

        self._data = ''

        # this will hold the application we are currently working on.
        self._current_application = None
        
        # The object which stores our set of planning application results
        self._results = PlanningAuthorityResults(authority_name, authority_short_name)
Example #29
0
    def __init__(self, *args):

        self.authority_name = "City of Westminster"
        self.authority_short_name = "Westminster"
        self.base_url = "http://www3.westminster.gov.uk/planningapplications/currentsearch-results.cfm"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
Example #30
0
    def __init__(self, *args):

        self.authority_name = "Exmoor National Park"
        self.authority_short_name = "Exmoor"
        self.base_url = "http://www.exmoor-nationalpark.gov.uk/planning_weekly_list.htm?weeklylist=%s"

        self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)