Ejemplo n.º 1
0
def fetch_results():
    index = 0
    while True:
        content = requests.get(URL.format(index)).json()
        content = content['content']
        if len(content) == 0:
            break
        else:
            print('GOT {} chars'.format(len(content)))
        content = pq(content)
        boxes = content.find('.modal')
        if len(boxes) == 0:
            break
        else:
            print('GOT {} boxes'.format(len(boxes)))
        for box in boxes:
            box = pq(box)
            description = pq(box.find('.moreInfoInner'))
            description = sanitize_html(description)
            print('GOT {}'.format(pq(box.find('#modal-title')).text()))
            yield dict(
                publication_id=0,
                tender_id='0',
                tender_type=None,
                tender_type_he=pq(
                    box.find('.generalInfo-jobs li:nth-child(1) span')).text(),
                decision=None,
                page_title=pq(box.find('#modal-title')).text(),
                page_url=SRC_URL,
                publisher=pq(box.find('.publisher_link')).text(),
                start_date=pq(
                    box.find('.generalInfo-jobs li:nth-child(2) span')).text(),
                description=description,
            )
            index += 1
    def func(row):
        if row.get('parsed') is None:
            return
        url = row['page_url']
        print(url)
        page_text = requests.get(url).text
        page = pq(page_text)
        rules = [
            ('decision', '#MMDTendersStatusField span'),
            ('start_date', '.PublishingDate'),
            ('claim_date', '.LastDateForSubmission'),
            ('description', '#ctl00_PlaceHolderMain_ctl13_ctl00__ControlWrapper_RichHtmlField'),
        ]
        output = {}
        for rule, selector in rules:
            elements = page.find(selector)
            if len(elements):
                output[rule] = sanitize_html(pq(elements[0])).strip()
            else:
                output[rule] = None

        files = [l for l in page_text.split('\n') if 'var GovXDLFWrapperObj = ' in l]
        if len(files) > 0:
            files = files[0].split('var GovXDLFWrapperObj = ')[1].split(';//]]>')[0]
            files = json.loads(files)['Items']
            files = [
                dict(
                    link=f['fileURL'],
                    description=f['FileTitle'],
                )
                for f in files
            ]
        output['documents'] = files
        row['parsed'] = output
Ejemplo n.º 3
0
def get_decision_list():
    session = requests.Session()
    session.headers['User-Agent'] = 'datagov-external-client'
    response = session.get(SEARCH_PAGE_RESULTS_URL.format(skip=0)).json()
    results = response['results']
    count = 0
    while True:
        for result in results:
            content_pq = pq(result['Content']) if result['Content'] else None
            links = get_links(result['Content'], session)
            content_pq = sanitize_html(content_pq)

            yield {
                'text':
                content_pq,
                'linked_docs':
                links,
                'doc_published_date':
                result['DocPublishedDate'],
                'doc_update_date':
                result['DocUpdateDate'],
                'id':
                result['ItemUniqueId'],
                'office':
                result['ConnectedOffices'][0]["Title"]
                if result.get('ConnectedOffices') else '',
                'government':
                result['PmoGovernmentDesc'][0]
                if result.get('PmoGovernmentDesc') else
                (result.get('PmoGovernment')[0]
                 if result.get('PmoGovernment') else None),
                'policy_type':
                result['PolicyTypeDesc'][0]
                if result.get('PolicyTypeDesc') else '',
                'procedure_number':
                result['ProcedureNumberNumeric'],
                'procedure_number_str':
                result['ProcedureNumber'],
                'publish_date':
                result['PublishDate'],
                'publish_date_prod':
                result['PublishProd'],
                'title':
                result['Title'],
                'unit':
                result['UnitsDesc'][0] if result.get('UnitsDesc') else
                (result.get('Units')[0] if result.get('Units') else None),
                'update_date':
                result['UpdateDate'],
                'url_id':
                result['UrlName'],
                'score':
                1
            }
            count += 1
        response = session.get(
            SEARCH_PAGE_RESULTS_URL.format(skip=count)).json()
        results = response['results']
        if not results:
            return
Ejemplo n.º 4
0
def scraper():
    for link in all_links():
        url = BASE_URL + link
        main = pq(requests.get(url).text)
        title = pq(main.find('h1.main-title')[0]).text()
        text = pq(main.find('article.node'))
        start_date = text.find('div.field-type-datetime')
        start_date = pq(start_date).text()
        start_date = '/'.join(x for x in start_date.split('/'))
        description = sanitize_html(
            pq(text.find('.field-type-text-with-summary')))
        try:
            claim_date = pq(text.find('strong')[-1]).text()
            claim_date = claim_date_re.findall(claim_date)[0]
        except IndexError:
            try:
                claim_date = claim_date_re.findall(description)[-1]
            except:
                print('SKIPPING', url)
                continue
        claim_date = [int(p) for p in claim_date_re_parts.findall(claim_date)]
        if claim_date[2] < 1000:
            claim_date[2] += 2000
        claim_date = '/'.join(str(p) for p in claim_date)

        tender_type = None
        if not tender_type:
            for x in ["מכרז ל", "במכרז ל", "למכרז ל", "הזמנה לקבלת הצעות"]:
                if x in title:
                    tender_type = 'office'
                    break
        if not tender_type:
            for x in ["פטור ממכרז", "בפטור"]:
                if x in title:
                    tender_type = 'exemptions'
                    break
        if not tender_type:
            for x in ["קול קורא", "הודעה על", "הליך לקבלת הצעות"]:
                if x in title:
                    tender_type = 'call_for_bids'
                    break
        if not tender_type:
            continue

        yield dict(
            page_url=url,
            page_title=title,
            start_date=start_date,
            claim_date=claim_date,
            description=description,
            tender_type=tender_type,
            publication_id=0,
            tender_type_he='קול קורא',
            tender_id='joint' + link.replace('/', '-'),
            publisher='הג׳וינט',
        )
Ejemplo n.º 5
0
 def func(row):
     details = pq(s.get(row['page_url']).text)
     for key, selector, *_ in details_headers:
         if selector.endswith('li'):
             row[key] = [pq(x).text() for x in details.find(selector)]
         else:
             el = pq(details.find(selector))
             if key == 'description':
                 row[key] = sanitize_html(el)
             else:
                 row[key] = pq(el).text()
def scraper(gcd):
    for start_date, link in all_links(gcd):
        url = BASE_URL + link
        gcd.driver.get(url)
        time.sleep(1)
        page = pq(gcd.driver.page_source)
        title = pq(page.find('.page-content-container h1')).text().strip()
        content = pq(page.find('.page-content-item'))
        description = sanitize_html(content)
        content = content.text().strip()
        if not content or not title:
            logging.warning('FAILED TO FIND TITLE OR CONTENT FOR %s', url)
            continue
        try:
            claim_date = claim_date_re.findall(content)[0]
        except:
            logging.warning('FAILED TO FIND CLAIM DATE for %s', url)
            claim_date = None

        if claim_date:
            claim_date = [
                int(p) for p in claim_date_re_parts.findall(claim_date)
            ]
            if claim_date[2] < 1000:
                claim_date[2] += 2000
            claim_date = '/'.join(str(p) for p in claim_date)
        start_date = '/'.join(x for x in start_date.split('.'))

        yield dict(
            page_url=url,
            page_title=title,
            start_date=start_date,
            claim_date=claim_date,
            description=description,
            tender_type='call_for_bids',
            publication_id=0,
            tender_type_he='קול קורא',
            tender_id='kkl' + link.replace('/', '-'),
            publisher='הקרן הקיימת לישראל',
        )