def fetcher(url): if self.method == 'LOCAL': return "No case names fetched during tests." else: r = requests.get( url, allow_redirects=True, headers={'User-Agent': 'Juriscraper'}, verify=certifi.where(), ) r.raise_for_status() html_tree = html.fromstring(r.text) html_tree.make_links_absolute(self.url) plaintiff = '' defendant = '' try: plaintiff = html_tree.xpath( "//text()[contains(., 'Style:')]/ancestor::div[@class='span2']/following-sibling::div/text()" )[0] defendant = html_tree.xpath( "//text()[contains(., 'v.:')]/ancestor::div[@class='span2']/following-sibling::div/text()" )[0] except IndexError: logger.warn("No title or defendant found for {}".format(url)) if defendant.strip(): # If there's a defendant return titlecase('%s v. %s' % (plaintiff, defendant)) else: return titlecase(plaintiff)
def __init__(self, *args, **kwargs): logger.warn("Using DeferringList object which cannot be sorted until " "fetched. Note that in usual processing, the fetching " "happens before the sorting, so this is OK.") self._data = kwargs['seed'] self._fetched_items = [False] * len(kwargs['seed']) self._fetching_function = kwargs['fetcher']
def fetcher(url): if self.method == 'LOCAL': return "No case names fetched during tests." else: r = requests.get( url, allow_redirects=True, headers={'User-Agent': 'Juriscraper'}, verify=certifi.where(), ) r.raise_for_status() html_tree = html.fromstring(r.text) html_tree.make_links_absolute(self.url) plaintiff = '' defendant = '' try: plaintiff = html_tree.xpath( "//text()[contains(., 'Style:')]/ancestor::div[@class='span2']/following-sibling::div/text()" )[0] defendant = html_tree.xpath( "//text()[contains(., 'v.:')]/ancestor::div[@class='span2']/following-sibling::div/text()" )[0] except IndexError: logger.warn( "No title or defendant found for {}".format(url)) if defendant.strip(): # If there's a defendant return titlecase('%s v. %s' % (plaintiff, defendant)) else: return titlecase(plaintiff)
def __init__(self, *args, **kwargs): logger.warn("Using DeferringList object which cannot be sorted until " "fetched. Note that in usual processing, the fetching " "happens before the sorting, so this is OK.") logger.info("DeferringList has %s entries to fetch." % len(kwargs["seed"])) self._data = kwargs["seed"] self._fetched_items = [False] * len(kwargs["seed"]) self._fetching_function = kwargs["fetcher"]
def fetcher(url): if self.method == 'LOCAL': return "No case names fetched during tests." else: html_tree = self._get_html_tree_by_url(url, self.request_dict) plaintiff = '' defendant = '' try: plaintiff = html_tree.xpath( "//text()[contains(., 'Style:')]/ancestor::div[@class='span2']/following-sibling::div/text()" )[0] defendant = html_tree.xpath( "//text()[contains(., 'v.:')]/ancestor::div[@class='span2']/following-sibling::div/text()" )[0] except IndexError: logger.warn("No title or defendant found for {}".format(url)) if defendant.strip(): # If there's a defendant return titlecase('%s v. %s' % (plaintiff, defendant)) else: return titlecase(plaintiff)
def fetcher(url): if self.method == 'LOCAL': return "No case names fetched during tests." else: html_tree = self._get_html_tree_by_url(url, self.request_dict) plaintiff = '' defendant = '' try: plaintiff = html_tree.xpath( "//text()[contains(., 'Style:')]/ancestor::div[@class='span2']/following-sibling::div/text()" )[0] defendant = html_tree.xpath( "//text()[contains(., 'v.:')]/ancestor::div[@class='span2']/following-sibling::div/text()" )[0] except IndexError: logger.warn("No title or defendant found for {}".format(url)) if defendant.strip(): # If there's a defendant return titlecase('%s v. %s' % (plaintiff, defendant)) else: return titlecase(plaintiff)
def scrape_court(self, site, full_crawl=False): download_error = False # Get the court object early for logging # opinions.united_states.federal.ca9_u --> ca9 court_str = site.court_id.split('.')[-1].split('_')[0] court = Court.objects.get(pk=court_str) dup_checker = DupChecker(court, full_crawl=full_crawl) abort = dup_checker.abort_by_url_hash(site.url, site.hash) if not abort: if site.cookies: logger.info("Using cookies: %s" % site.cookies) for i, item in enumerate(site): msg, r = get_binary_content( item['download_urls'], site.cookies, site._get_adapter_instance(), method=site.method ) if msg: logger.warn(msg) ErrorLog(log_level='WARNING', court=court, message=msg).save() continue content = site.cleanup_content(r.content) current_date = item['case_dates'] try: next_date = site[i + 1]['case_dates'] except IndexError: next_date = None # request.content is sometimes a str, sometimes unicode, so # force it all to be bytes, pleasing hashlib. sha1_hash = hashlib.sha1(force_bytes(content)).hexdigest() onwards = dup_checker.press_on( Audio, current_date, next_date, lookup_value=sha1_hash, lookup_by='sha1' ) if dup_checker.emulate_break: break if onwards: # Not a duplicate, carry on logger.info('Adding new document found at: %s' % item['download_urls'].encode('utf-8')) dup_checker.reset() docket, audio_file, error = self.make_objects( item, court, sha1_hash, content, ) if error: download_error = True continue self.save_everything( items={ 'docket': docket, 'audio_file': audio_file, }, index=False, ) process_audio_file.apply_async( (audio_file.pk,), countdown=random.randint(0, 3600) ) logger.info( "Successfully added audio file {pk}: {name}".format( pk=audio_file.pk, name=item['case_names'].encode('utf-8') ) ) # Update the hash if everything finishes properly. logger.info("%s: Successfully crawled oral arguments." % site.court_id) if not download_error and not full_crawl: # Only update the hash if no errors occurred. dup_checker.update_site_hash(site.hash)
def scrape_court(self, site, full_crawl=False): download_error = False # Get the court object early for logging # opinions.united_states.federal.ca9_u --> ca9 court_str = site.court_id.split('.')[-1].split('_')[0] court = Court.objects.get(pk=court_str) dup_checker = DupChecker(court, full_crawl=full_crawl) abort = dup_checker.abort_by_url_hash(site.url, site.hash) if not abort: if site.cookies: logger.info("Using cookies: %s" % site.cookies) for i in range(0, len(site.case_names)): msg, r = get_binary_content(site.download_urls[i], site.cookies, method=site.method) if msg: logger.warn(msg) ErrorLog(log_level='WARNING', court=court, message=msg).save() continue current_date = site.case_dates[i] try: next_date = site.case_dates[i + 1] except IndexError: next_date = None # Make a hash of the data sha1_hash = hashlib.sha1(r.content).hexdigest() if court_str == 'nev' and site.precedential_statuses[ i] == 'Unpublished': # Nevada's non-precedential cases have different SHA1 sums every time. onwards = dup_checker.should_we_continue_break_or_carry_on( Document, current_date, next_date, lookup_value=site.download_urls[i], lookup_by='download_url') else: onwards = dup_checker.should_we_continue_break_or_carry_on( Document, current_date, next_date, lookup_value=sha1_hash, lookup_by='sha1') if onwards == 'CONTINUE': # It's a duplicate, but we haven't hit any thresholds yet. continue elif onwards == 'BREAK': # It's a duplicate, and we hit a date or dup_count threshold. dup_checker.update_site_hash(sha1_hash) break elif onwards == 'CARRY_ON': # Not a duplicate, carry on logger.info('Adding new document found at: %s' % site.download_urls[i]) dup_checker.reset() cite, docket, doc = self.associate_meta_data_to_objects( site, i, court, sha1_hash) # Make and associate the file object try: cf = ContentFile(r.content) extension = get_extension(r.content) # See bitbucket issue #215 for why this must be # lower-cased. file_name = trunc(site.case_names[i].lower(), 75) + extension doc.local_path.save(file_name, cf, save=False) except: msg = 'Unable to save binary to disk. Deleted document: % s.\n % s' % \ (site.case_names[i], traceback.format_exc()) logger.critical(msg) ErrorLog(log_level='CRITICAL', court=court, message=msg).save() download_error = True continue # Save everything, but don't update Solr index yet self.save_everything(cite, docket, doc, index=False) random_delay = random.randint(0, 3600) extract_doc_content.delay(doc.pk, callback=subtask(extract_by_ocr), citation_countdown=random_delay) logger.info("Successfully added doc {pk}: {name}".format( pk=doc.pk, name=site.case_names[i])) # Update the hash if everything finishes properly. logger.info("%s: Successfully crawled opinions." % site.court_id) if not download_error and not full_crawl: # Only update the hash if no errors occurred. dup_checker.update_site_hash(site.hash)
def download_and_save(): """This function is run in many threads simultaneously. Each thread runs so long as there are items in the queue. Once an item is found, it's downloaded and saved. The number of items that can be concurrently saved is determined by the number of threads that are running this function. """ while True: item = queue.get() logger.info("Attempting to add item at: %s" % item['url']) try: msg, r = get_binary_content( item['url'], {}, ) except: logger.info("Unable to get item at: %s" % item['url']) queue.task_done() if msg: logger.warn(msg) queue.task_done() sha1_hash = hashlib.sha1(r.content).hexdigest() if Audio.objects.filter(sha1=sha1_hash).exists(): # Simpsons did it! Try the next one. logger.info("Item already exists, moving to next item.") queue.task_done() else: # New item, onwards! logger.info('Adding new document found at: %s' % item['url']) audio_file = Audio( source='H', sha1=sha1_hash, case_name=item['case_name'], date_argued=item['date_argued'], download_url=item['url'], processing_complete=False, ) if item['judges']: audio_file.judges = item['judges'] if item['docket_number']: audio_file.docket_number = item['docket_number'] court = Court.objects.get(pk=item['court_code']) docket = Docket( case_name=item['case_name'], court=court, ) # Make and associate the file object try: cf = ContentFile(r.content) extension = get_extension(r.content) if extension not in ['.mp3', '.wma']: extension = '.' + item['url'].rsplit('.', 1)[1] # See bitbucket issue #215 for why this must be # lower-cased. file_name = trunc(item['case_name'].lower(), 75) + extension audio_file.local_path_original_file.save(file_name, cf, save=False) except: msg = 'Unable to save binary to disk. Deleted document: % s.\n % s' % \ (item['case_name'], traceback.format_exc()) logger.critical(msg) queue.task_done() docket.save() audio_file.docket = docket audio_file.save(index=False) random_delay = random.randint(0, 3600) process_audio_file.apply_async((audio_file.pk, ), countdown=random_delay) logger.info("Successfully added audio file %s: %s" % (audio_file.pk, audio_file.case_name))
def scrape_court(self, site, full_crawl=False): download_error = False # Get the court object early for logging # opinions.united_states.federal.ca9_u --> ca9 court_str = site.court_id.split(".")[-1].split("_")[0] court = Court.objects.get(pk=court_str) dup_checker = DupChecker(court, full_crawl=full_crawl) abort = dup_checker.abort_by_url_hash(site.url, site.hash) if not abort: if site.cookies: logger.info("Using cookies: %s" % site.cookies) for i in range(0, len(site.case_names)): msg, r = get_binary_content( site.download_urls[i], site.cookies, site._get_adapter_instance(), method=site.method ) if msg: logger.warn(msg) ErrorLog(log_level="WARNING", court=court, message=msg).save() continue content = site.cleanup_content(r.content) current_date = site.case_dates[i] try: next_date = site.case_dates[i + 1] except IndexError: next_date = None # Make a hash of the data if isinstance(content, unicode): sha1_hash = hashlib.sha1(content.encode("utf-8")).hexdigest() else: sha1_hash = hashlib.sha1(content).hexdigest() if court_str == "nev" and site.precedential_statuses[i] == "Unpublished": # Nevada's non-precedential cases have different SHA1 # sums every time. onwards = dup_checker.should_we_continue_break_or_carry_on( Document, current_date, next_date, lookup_value=site.download_urls[i], lookup_by="download_url" ) else: onwards = dup_checker.should_we_continue_break_or_carry_on( Document, current_date, next_date, lookup_value=sha1_hash, lookup_by="sha1" ) if onwards == "CONTINUE": # It's a duplicate, but we haven't hit any thresholds yet. continue elif onwards == "BREAK": # It's a duplicate, and we hit a date or dup_count # threshold. dup_checker.update_site_hash(sha1_hash) break elif onwards == "CARRY_ON": # Not a duplicate, carry on logger.info("Adding new document found at: %s" % site.download_urls[i].encode("utf-8")) dup_checker.reset() cite, docket, doc = self.associate_meta_data_to_objects(site, i, court, sha1_hash) # Make and associate the file object try: cf = ContentFile(content) extension = get_extension(content) # See bitbucket issue #215 for why this must be # lower-cased. file_name = trunc(site.case_names[i].lower(), 75) + extension doc.local_path.save(file_name, cf, save=False) except: msg = "Unable to save binary to disk. Deleted " "document: % s.\n % s" % ( site.case_names[i], traceback.format_exc(), ) logger.critical(msg.encode("utf-8")) ErrorLog(log_level="CRITICAL", court=court, message=msg).save() download_error = True continue # Save everything, but don't update Solr index yet self.save_everything(cite, docket, doc, index=False) random_delay = random.randint(0, 3600) extract_doc_content.delay(doc.pk, callback=subtask(extract_by_ocr), citation_countdown=random_delay) logger.info( "Successfully added doc {pk}: {name}".format(pk=doc.pk, name=site.case_names[i].encode("utf-8")) ) # Update the hash if everything finishes properly. logger.info("%s: Successfully crawled opinions." % site.court_id) if not download_error and not full_crawl: # Only update the hash if no errors occurred. dup_checker.update_site_hash(site.hash)
def scrape_court(site, full_crawl=False): download_error = False # Get the court object early for logging # opinions.united_states.federal.ca9_u --> ca9 court_str = site.court_id.split('.')[-1].split('_')[0] court = Court.objects.get(pk=court_str) dup_checker = DupChecker(site.court_id, full_crawl=full_crawl) abort = dup_checker.abort_by_hash(site.hash) if not abort: for i in range(0, len(site.case_names)): msg, r = get_binary_content(site.download_urls[i], site._get_cookies()) clean_content = site._cleanup_content(r.content) if msg: logger.warn(msg) ErrorLog(log_level='WARNING', court=court, message=msg).save() continue current_date = site.case_dates[i] try: next_date = site.case_dates[i + 1] except IndexError: next_date = None # Make a hash of the data. Need to convert unicode to binary before hashing. if type(clean_content) == unicode: hash_content = clean_content.encode('utf-8') else: hash_content = clean_content sha1_hash = hashlib.sha1(hash_content).hexdigest() if court_str == 'nev' and site.precedential_statuses[i] == 'Unpublished': # Nevada's non-precedential cases have different SHA1 sums every time. onwards = dup_checker.should_we_continue_break_or_carry_on( current_date, next_date, lookup_value=site.download_urls[i], lookup_by='download_url' ) else: onwards = dup_checker.should_we_continue_break_or_carry_on( current_date, next_date, lookup_value=sha1_hash, lookup_by='sha1' ) if onwards == 'CONTINUE': # It's a duplicate, but we haven't hit any thresholds yet. continue elif onwards == 'BREAK': # It's a duplicate, and we hit a date or dup_count threshold. dup_checker.update_site_hash(sha1_hash) break elif onwards == 'CARRY_ON': # Not a duplicate, carry on logger.info('Adding new document found at: %s' % site.download_urls[i]) dup_checker.reset() # Make a citation cite = Citation(case_name=site.case_names[i]) if site.docket_numbers: cite.docket_number = site.docket_numbers[i] if site.neutral_citations: cite.neutral_cite = site.neutral_citations[i] if site.west_citations: cite.federal_cite_one = site.west_citations[i] if site.west_state_citations: cite.west_state_cite = site.west_state_citations[i] # Make the document object doc = Document(source='C', sha1=sha1_hash, date_filed=site.case_dates[i], court=court, download_url=site.download_urls[i], precedential_status=site.precedential_statuses[i]) # Make and associate the file object try: cf = ContentFile(clean_content) extension = get_extension(r.content) # See issue #215 for why this must be lower-cased. file_name = trunc(site.case_names[i].lower(), 75) + extension doc.local_path.save(file_name, cf, save=False) except: msg = 'Unable to save binary to disk. Deleted document: % s.\n % s' % \ (cite.case_name, traceback.format_exc()) logger.critical(msg) ErrorLog(log_level='CRITICAL', court=court, message=msg).save() download_error = True continue if site.judges: doc.judges = site.judges[i] if site.nature_of_suit: doc.nature_of_suit = site.nature_of_suit[i] # Save everything, but don't update Solr index yet cite.save(index=False) doc.citation = cite doc.save(index=False) # Extract the contents asynchronously. extract_doc_content(doc.pk, callback=subtask(extract_by_ocr)) logger.info("Successfully added doc %s: %s" % (doc.pk, site.case_names[i])) # Update the hash if everything finishes properly. logger.info("%s: Successfully crawled." % site.court_id) if not download_error and not full_crawl: # Only update the hash if no errors occurred. dup_checker.update_site_hash(site.hash)
def scrape_court(self, site, full_crawl=False): download_error = False # Get the court object early for logging # opinions.united_states.federal.ca9_u --> ca9 court_str = site.court_id.split('.')[-1].split('_')[0] court = Court.objects.get(pk=court_str) dup_checker = DupChecker(court, full_crawl=full_crawl) abort = dup_checker.abort_by_url_hash(site.url, site.hash) if not abort: if site.cookies: logger.info("Using cookies: %s" % site.cookies) for i in range(0, len(site.case_names)): msg, r = get_binary_content(site.download_urls[i], site.cookies, site._get_adapter_instance(), method=site.method) if msg: logger.warn(msg) ErrorLog(log_level='WARNING', court=court, message=msg).save() continue content = site.cleanup_content(r.content) current_date = site.case_dates[i] try: next_date = site.case_dates[i + 1] except IndexError: next_date = None sha1_hash = hashlib.sha1(content).hexdigest() onwards = dup_checker.should_we_continue_break_or_carry_on( Audio, current_date, next_date, lookup_value=sha1_hash, lookup_by='sha1') if onwards == 'CONTINUE': # It's a duplicate, but we haven't hit any thresholds yet. continue elif onwards == 'BREAK': # It's a duplicate, and we hit a date or dup_count threshold. dup_checker.update_site_hash(sha1_hash) break elif onwards == 'CARRY_ON': # Not a duplicate, carry on logger.info('Adding new document found at: %s' % site.download_urls[i]) dup_checker.reset() docket, audio_file = self.associate_meta_data_to_objects( site, i, court, sha1_hash) # Make and associate the file object try: cf = ContentFile(content) extension = get_extension(content) if extension not in ['.mp3', '.wma']: extension = '.' + site.download_urls[i].rsplit( '.', 1)[1] # See bitbucket issue #215 for why this must be # lower-cased. file_name = trunc(site.case_names[i].lower(), 75) + extension audio_file.local_path_original_file.save(file_name, cf, save=False) except: msg = 'Unable to save binary to disk. Deleted document: % s.\n % s' % \ (site.case_names[i], traceback.format_exc()) logger.critical(msg) ErrorLog(log_level='CRITICAL', court=court, message=msg).save() download_error = True continue self.save_everything(docket, audio_file) random_delay = random.randint(0, 3600) process_audio_file.apply_async((audio_file.pk, ), countdown=random_delay) logger.info("Successfully added audio file %s: %s" % (audio_file.pk, site.case_names[i])) # Update the hash if everything finishes properly. logger.info("%s: Successfully crawled oral arguments." % site.court_id) if not download_error and not full_crawl: # Only update the hash if no errors occurred. dup_checker.update_site_hash(site.hash)
def scrape_court(self, site, full_crawl=False): download_error = False # Get the court object early for logging # opinions.united_states.federal.ca9_u --> ca9 court_str = site.court_id.split('.')[-1].split('_')[0] court = Court.objects.get(pk=court_str) dup_checker = DupChecker(court, full_crawl=full_crawl) abort = dup_checker.abort_by_url_hash(site.url, site.hash) if not abort: if site.cookies: logger.info("Using cookies: %s" % site.cookies) for i, item in enumerate(site): msg, r = get_binary_content(item['download_urls'], site.cookies, site._get_adapter_instance(), method=site.method) if msg: logger.warn(msg) ErrorLog(log_level='WARNING', court=court, message=msg).save() continue content = site.cleanup_content(r.content) current_date = item['case_dates'] try: next_date = site[i + 1]['case_dates'] except IndexError: next_date = None # request.content is sometimes a str, sometimes unicode, so # force it all to be bytes, pleasing hashlib. sha1_hash = hashlib.sha1(force_bytes(content)).hexdigest() onwards = dup_checker.press_on(Audio, current_date, next_date, lookup_value=sha1_hash, lookup_by='sha1') if dup_checker.emulate_break: break if onwards: # Not a duplicate, carry on logger.info('Adding new document found at: %s' % item['download_urls'].encode('utf-8')) dup_checker.reset() docket, audio_file, error = self.make_objects( item, court, sha1_hash, content, ) if error: download_error = True continue self.save_everything( items={ 'docket': docket, 'audio_file': audio_file, }, index=False, ) process_audio_file.apply_async( (audio_file.pk, ), countdown=random.randint(0, 3600)) logger.info( "Successfully added audio file {pk}: {name}".format( pk=audio_file.pk, name=item['case_names'].encode('utf-8'))) # Update the hash if everything finishes properly. logger.info("%s: Successfully crawled oral arguments." % site.court_id) if not download_error and not full_crawl: # Only update the hash if no errors occurred. dup_checker.update_site_hash(site.hash)
def download_and_save(): """This function is run in many threads simultaneously. Each thread runs so long as there are items in the queue. Once an item is found, it's downloaded and saved. The number of items that can be concurrently saved is determined by the number of threads that are running this function. """ while True: item = queue.get() logger.info("Attempting to add item at: %s" % item['url']) try: msg, r = get_binary_content( item['url'], {}, ) except: logger.info("Unable to get item at: %s" % item['url']) queue.task_done() if msg: logger.warn(msg) queue.task_done() sha1_hash = hashlib.sha1(r.content).hexdigest() if Audio.objects.filter(sha1=sha1_hash).exists(): # Simpsons did it! Try the next one. logger.info("Item already exists, moving to next item.") queue.task_done() else: # New item, onwards! logger.info('Adding new document found at: %s' % item['url']) audio_file = Audio( source='H', sha1=sha1_hash, case_name=item['case_name'], date_argued=item['date_argued'], download_url=item['url'], processing_complete=False, ) if item['judges']: audio_file.judges = item['judges'] if item['docket_number']: audio_file.docket_number = item['docket_number'] court = Court.objects.get(pk=item['court_code']) docket = Docket( case_name=item['case_name'], court=court, ) # Make and associate the file object try: cf = ContentFile(r.content) extension = get_extension(r.content) if extension not in ['.mp3', '.wma']: extension = '.' + item['url'].rsplit('.', 1)[1] # See bitbucket issue #215 for why this must be # lower-cased. file_name = trunc(item['case_name'].lower(), 75) + extension audio_file.local_path_original_file.save(file_name, cf, save=False) except: msg = 'Unable to save binary to disk. Deleted document: % s.\n % s' % \ (item['case_name'], traceback.format_exc()) logger.critical(msg) queue.task_done() docket.save() audio_file.docket = docket audio_file.save(index=False) random_delay = random.randint(0, 3600) process_audio_file.apply_async( (audio_file.pk,), countdown=random_delay ) logger.info("Successfully added audio file %s: %s" % ( audio_file.pk, audio_file.case_name))
def scrape_court(self, site, full_crawl=False): download_error = False # Get the court object early for logging # opinions.united_states.federal.ca9_u --> ca9 court_str = site.court_id.split('.')[-1].split('_')[0] court = Court.objects.get(pk=court_str) dup_checker = DupChecker(court, full_crawl=full_crawl) abort = dup_checker.abort_by_url_hash(site.url, site.hash) if not abort: if site.cookies: logger.info("Using cookies: %s" % site.cookies) for i, item in enumerate(site): msg, r = get_binary_content( item['download_urls'], site.cookies, site._get_adapter_instance(), method=site.method ) if msg: logger.warn(msg) ErrorLog(log_level='WARNING', court=court, message=msg).save() continue content = site.cleanup_content(r.content) current_date = item['case_dates'] try: next_date = site[i + 1]['case_dates'] except IndexError: next_date = None # request.content is sometimes a str, sometimes unicode, so # force it all to be bytes, pleasing hashlib. sha1_hash = hashlib.sha1(force_bytes(content)).hexdigest() if (court_str == 'nev' and item['precedential_statuses'] == 'Unpublished'): # Nevada's non-precedential cases have different SHA1 # sums every time. lookup_params = {'lookup_value': item['download_urls'], 'lookup_by': 'download_url'} else: lookup_params = {'lookup_value': sha1_hash, 'lookup_by': 'sha1'} onwards = dup_checker.press_on(Opinion, current_date, next_date, **lookup_params) if dup_checker.emulate_break: break if onwards: # Not a duplicate, carry on logger.info('Adding new document found at: %s' % item['download_urls'].encode('utf-8')) dup_checker.reset() docket, opinion, cluster, error = self.make_objects( item, court, sha1_hash, content ) if error: download_error = True continue self.save_everything( items={ 'docket': docket, 'opinion': opinion, 'cluster': cluster }, index=False ) extract_doc_content.delay( opinion.pk, callback=subtask(extract_by_ocr), citation_countdown=random.randint(0, 3600) ) logger.info("Successfully added doc {pk}: {name}".format( pk=opinion.pk, name=item['case_names'].encode('utf-8'), )) # Update the hash if everything finishes properly. logger.info("%s: Successfully crawled opinions." % site.court_id) if not download_error and not full_crawl: # Only update the hash if no errors occurred. dup_checker.update_site_hash(site.hash)
def scrape_court(self, site, full_crawl=False): download_error = False # Get the court object early for logging # opinions.united_states.federal.ca9_u --> ca9 court_str = site.court_id.split('.')[-1].split('_')[0] court = Court.objects.get(pk=court_str) dup_checker = DupChecker(court, full_crawl=full_crawl) abort = dup_checker.abort_by_url_hash(site.url, site.hash) if not abort: if site.cookies: logger.info("Using cookies: %s" % site.cookies) for i in range(0, len(site.case_names)): msg, r = get_binary_content( site.download_urls[i], site.cookies, method=site.method ) if msg: logger.warn(msg) ErrorLog(log_level='WARNING', court=court, message=msg).save() continue current_date = site.case_dates[i] try: next_date = site.case_dates[i + 1] except IndexError: next_date = None sha1_hash = hashlib.sha1(r.content).hexdigest() onwards = dup_checker.should_we_continue_break_or_carry_on( Audio, current_date, next_date, lookup_value=sha1_hash, lookup_by='sha1' ) if onwards == 'CONTINUE': # It's a duplicate, but we haven't hit any thresholds yet. continue elif onwards == 'BREAK': # It's a duplicate, and we hit a date or dup_count threshold. dup_checker.update_site_hash(sha1_hash) break elif onwards == 'CARRY_ON': # Not a duplicate, carry on logger.info('Adding new document found at: %s' % site.download_urls[i]) dup_checker.reset() docket, audio_file = self.associate_meta_data_to_objects( site, i, court, sha1_hash) audio_file.docket = docket # Make and associate the file object try: cf = ContentFile(r.content) extension = get_extension(r.content) if extension not in ['.mp3', '.wma']: extension = '.' + site.download_urls[i].rsplit('.', 1)[1] # See bitbucket issue #215 for why this must be # lower-cased. file_name = trunc(site.case_names[i].lower(), 75) + extension audio_file.local_path_original_file.save(file_name, cf, save=False) except: msg = 'Unable to save binary to disk. Deleted document: % s.\n % s' % \ (site.case_names[i], traceback.format_exc()) logger.critical(msg) ErrorLog(log_level='CRITICAL', court=court, message=msg).save() download_error = True continue self.save_everything(docket, audio_file) random_delay = random.randint(0, 3600) process_audio_file.apply_async( (audio_file.pk,), countdown=random_delay ) logger.info("Successfully added audio file %s: %s" % (audio_file.pk, site.case_names[i])) # Update the hash if everything finishes properly. logger.info("%s: Successfully crawled oral arguments." % site.court_id) if not download_error and not full_crawl: # Only update the hash if no errors occurred. dup_checker.update_site_hash(site.hash)
def scrape_court(self, site, full_crawl=False): download_error = False # Get the court object early for logging # opinions.united_states.federal.ca9_u --> ca9 court_str = site.court_id.split('.')[-1].split('_')[0] court = Court.objects.get(pk=court_str) dup_checker = DupChecker(court, full_crawl=full_crawl) abort = dup_checker.abort_by_url_hash(site.url, site.hash) if not abort: if site.cookies: logger.info("Using cookies: %s" % site.cookies) for i, item in enumerate(site): msg, r = get_binary_content(item['download_urls'], site.cookies, site._get_adapter_instance(), method=site.method) if msg: logger.warn(msg) ErrorLog(log_level='WARNING', court=court, message=msg).save() continue content = site.cleanup_content(r.content) current_date = item['case_dates'] try: next_date = site[i + 1]['case_dates'] except IndexError: next_date = None # request.content is sometimes a str, sometimes unicode, so # force it all to be bytes, pleasing hashlib. sha1_hash = hashlib.sha1(force_bytes(content)).hexdigest() if (court_str == 'nev' and item['precedential_statuses'] == 'Unpublished'): # Nevada's non-precedential cases have different SHA1 # sums every time. lookup_params = { 'lookup_value': item['download_urls'], 'lookup_by': 'download_url' } else: lookup_params = { 'lookup_value': sha1_hash, 'lookup_by': 'sha1' } onwards = dup_checker.press_on(Opinion, current_date, next_date, **lookup_params) if dup_checker.emulate_break: break if onwards: # Not a duplicate, carry on logger.info('Adding new document found at: %s' % item['download_urls'].encode('utf-8')) dup_checker.reset() docket, opinion, cluster, error = self.make_objects( item, court, sha1_hash, content) if error: download_error = True continue self.save_everything(items={ 'docket': docket, 'opinion': opinion, 'cluster': cluster }, index=False) extract_doc_content.delay( opinion.pk, callback=subtask(extract_by_ocr), citation_countdown=random.randint(0, 3600)) logger.info("Successfully added doc {pk}: {name}".format( pk=opinion.pk, name=item['case_names'].encode('utf-8'), )) # Update the hash if everything finishes properly. logger.info("%s: Successfully crawled opinions." % site.court_id) if not download_error and not full_crawl: # Only update the hash if no errors occurred. dup_checker.update_site_hash(site.hash)