def choices_to_csv(obj, field_name): """Produce a CSV of possible choices for a field on an object. :param obj: The object type you want to inspect :param field_name: The field on the object you want to get the choices for :return s: A comma-separated list of possible values for the field """ field = obj._meta.get_field(field_name) flat_choices = flatten_choices(field) # Get the second value in the choices tuple choice_values = [t for s, t in flat_choices] return oxford_join(choice_values, conjunction="or", separator=";")
def choices_to_csv(obj, field_name): """Produce a CSV of possible choices for a field on an object. :param obj: The object type you want to inspect :param field_name: The field on the object you want to get the choices for :return s: A comma-separated list of possible values for the field """ field = obj._meta.get_field(field_name) flat_choices = flatten_choices(field) # Get the second value in the choices tuple choice_values = [t for s, t in flat_choices] return oxford_join(choice_values, conjunction='or', separator=";")
def test_oxford(self) -> None: # Zero items self.assertEqual(oxford_join([]), "") # One item self.assertEqual(oxford_join(["a"]), "a") # Two items self.assertEqual(oxford_join(["a", "b"]), "a and b") # Three items self.assertEqual(oxford_join(["a", "b", "c"]), "a, b, and c") # Custom separator self.assertEqual(oxford_join(["a", "b", "c"], separator=";"), "a; b; and c") # Custom conjunction(self) -> None: self.assertEqual(oxford_join(["a", "b", "c"], conjunction="or"), "a, b, or c")
def test_oxford_zero_items(self): self.assertEqual(oxford_join([]), "")
def test_oxford_conjunction(self): self.assertEqual(oxford_join(["a", "b", "c"], conjunction="or"), "a, b, or c")
def test_oxford_two_items(self): self.assertEqual(oxford_join(['a', 'b']), 'a and b')
def test_oxford_three_items(self): self.assertEqual(oxford_join(["a", "b", "c"]), "a, b, and c")
def test_oxford_zero_items(self): self.assertEqual(oxford_join([]), '')
def test_oxford_one_item(self): self.assertEqual(oxford_join(['a']), 'a')
def test_oxford_conjunction(self): self.assertEqual(oxford_join(['a', 'b', 'c'], conjunction='or'), 'a, b, or c')
def test_oxford_separator(self): self.assertEqual(oxford_join(['a', 'b', 'c'], separator=';'), 'a; b; and c')
def test_oxford_three_items(self): self.assertEqual(oxford_join(['a', 'b', 'c']), 'a, b, and c')
def test_oxford_one_item(self): self.assertEqual(oxford_join(["a"]), "a")
def test_oxford_two_items(self): self.assertEqual(oxford_join(["a", "b"]), "a and b")
def test_oxford_separator(self): self.assertEqual(oxford_join(["a", "b", "c"], separator=";"), "a; b; and c")
def process_recap_zip(self, pk): """Process a zip uploaded from a PACER district court The general process is to use our existing infrastructure. We open the zip, identify the documents inside, and then associate them with the rest of our collection. :param self: A celery task object :param pk: The PK of the ProcessingQueue object to process :return: A list of new PQ's that were created, one per PDF that was enqueued. """ pq = ProcessingQueue.objects.get(pk=pk) mark_pq_status(pq, "", PROCESSING_STATUS.IN_PROGRESS) logger.info("Processing RECAP zip (debug is: %s): %s", pq.debug, pq) with ZipFile(pq.filepath_local.path, "r") as archive: # Security: Check for zip bombs. max_file_size = convert_size_to_bytes("200MB") for zip_info in archive.infolist(): if zip_info.file_size < max_file_size: continue mark_pq_status( pq, "Zip too large; possible zip bomb. File in zip named %s " "would be %s bytes expanded." % (zip_info.filename, zip_info.file_size), PROCESSING_STATUS.INVALID_CONTENT, ) return {"new_pqs": [], "tasks": []} # For each document in the zip, create a new PQ new_pqs = [] tasks = [] for file_name in archive.namelist(): file_content = archive.read(file_name) f = SimpleUploadedFile(file_name, file_content) file_name = file_name.split(".pdf")[0] if "-" in file_name: doc_num, att_num = file_name.split("-") if att_num == "main": att_num = None else: doc_num = file_name att_num = None if att_num: # An attachment, ∴ nuke the pacer_doc_id value, since it # corresponds to the main doc only. pacer_doc_id = "" else: pacer_doc_id = pq.pacer_doc_id # Create a new PQ and enqueue it for processing new_pq = ProcessingQueue.objects.create( court=pq.court, uploader=pq.uploader, pacer_case_id=pq.pacer_case_id, pacer_doc_id=pacer_doc_id, document_number=doc_num, attachment_number=att_num, filepath_local=f, status=PROCESSING_STATUS.ENQUEUED, upload_type=UPLOAD_TYPE.PDF, debug=pq.debug, ) new_pqs.append(new_pq.pk) tasks.append(process_recap_pdf.delay(new_pq.pk)) # At the end, mark the pq as successful and return the PQ mark_pq_status( pq, "Successfully created ProcessingQueue objects: %s" % oxford_join(new_pqs), PROCESSING_STATUS.SUCCESSFUL, ) # Returning the tasks allows tests to wait() for the PDFs to complete # before checking assertions. return { "new_pqs": new_pqs, "tasks": tasks, }