def test_image_quality_setting1(tmpdir): tmpdir = Path(tmpdir) projects = sa.search_projects(PROJECT_NAME1, return_metadata=True) for project in projects: sa.delete_project(project) project = sa.create_project(PROJECT_NAME1, "test", "Vector") sa.upload_videos_from_folder_to_project( project, "./tests/sample_videos", target_fps=2 ) projects = sa.search_projects(PROJECT_NAME2, return_metadata=True) for project in projects: sa.delete_project(project) sa.create_project(PROJECT_NAME2, "test", "Vector") subprocess.run( f'superannotate upload-videos --project "{PROJECT_NAME2}" --folder ./tests/sample_videos --target-fps 2', check=True, shell=True ) time.sleep(5) assert len(sa.search_images(PROJECT_NAME1)) == len( sa.search_images(PROJECT_NAME2) )
def test_recursive_preannotations_folder(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects( TEMP_PROJECT_NAME + "2", return_metadata=True ) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(TEMP_PROJECT_NAME + "2", "test", "Vector") sa.upload_images_from_folder_to_project( project, "./tests/sample_recursive_test", annotation_status="QualityCheck", recursive_subfolders=True ) assert len(sa.search_images(project)) == 2 sa.create_annotation_classes_from_classes_json( project, "./tests/sample_recursive_test/classes/classes.json" ) sa.upload_preannotations_from_folder_to_project( project, "./tests/sample_recursive_test", recursive_subfolders=True ) for image in sa.search_images(project): sa.download_image_preannotations(project, image, tmpdir) assert len(list(tmpdir.glob("*.json"))) == 2
def test_basic_export(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects(PROJECT_NAME, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(PROJECT_NAME, "t", "Vector") sa.upload_images_from_folder_to_project( project, PROJECT_FOLDER, annotation_status="InProgress" ) len_orig = len(sa.search_images(project)) sa.create_annotation_classes_from_classes_json( project, PROJECT_FOLDER / "classes" / "classes.json" ) sa.upload_annotations_from_folder_to_project(project, PROJECT_FOLDER) export = sa.prepare_export(project, include_fuse=True) sa.download_export(project, export, tmpdir) projects_found = sa.search_projects( PROJECT_NAME + " import", return_metadata=True ) for pr in projects_found: sa.delete_project(pr) project_new = sa.create_project(PROJECT_NAME + " import", "f", "Vector") sa.upload_images_from_folder_to_project( project_new, tmpdir, annotation_status="InProgress" ) len_new = len(sa.search_images(project_new)) assert len_new == len_orig
def test_preannotations_nonrecursive_s3_folder(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects( TEMP_PROJECT_NAME + "7", return_metadata=True ) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(TEMP_PROJECT_NAME + "7", "test", "Vector") sa.upload_images_from_folder_to_project( project, "sample_recursive_test", from_s3_bucket="superannotate-python-sdk-test", recursive_subfolders=True ) assert len(sa.search_images(project)) == 2 sa.create_annotation_classes_from_classes_json( project, "sample_recursive_test/classes/classes.json", from_s3_bucket="superannotate-python-sdk-test" ) sa.upload_preannotations_from_folder_to_project( project, "sample_recursive_test", recursive_subfolders=False, from_s3_bucket="superannotate-python-sdk-test" ) for image in sa.search_images(project): sa.download_image_preannotations(project, image, tmpdir)
def test_cli_image_upload_project_export(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects(PROJECT_NAME, return_metadata=True) for pr in projects_found: sa.delete_project(pr) subprocess.run( f'superannotate create-project --name "{PROJECT_NAME}" --description gg --type Vector', check=True, shell=True ) project = PROJECT_NAME sa.create_annotation_classes_from_classes_json( PROJECT_NAME, "./tests/sample_recursive_test/classes/classes.json" ) subprocess.run( f'superannotate upload-images --project "{PROJECT_NAME}" --folder ./tests/sample_recursive_test --extensions=jpg --set-annotation-status QualityCheck' , check=True, shell=True ) time.sleep(1) assert len(sa.search_images(project)) == 1 subprocess.run( f'superannotate upload-images --project "{PROJECT_NAME}" --folder ./tests/sample_recursive_test --extensions=jpg --recursive' , check=True, shell=True ) time.sleep(1) assert len(sa.search_images(project)) == 2 sa.upload_annotations_from_folder_to_project( project, "./tests/sample_recursive_test" ) subprocess.run( f'superannotate export-project --project "{PROJECT_NAME}" --folder {tmpdir}' , check=True, shell=True ) assert len(list(tmpdir.glob("*.json"))) == 1 assert len(list(tmpdir.glob("*.jpg"))) == 0 assert len(list(tmpdir.glob("*.png"))) == 0 time.sleep(60) subprocess.run( f'superannotate export-project --project "{PROJECT_NAME}" --folder {tmpdir} --include-fuse' , check=True, shell=True ) assert len(list(tmpdir.glob("*.json"))) == 1 assert len(list(tmpdir.glob("*.jpg"))) == 1 assert len(list(tmpdir.glob("*.png"))) == 1
def test_run_prediction(): upload_project( Path(PROJECT_PATH_VECTOR), PROJECT_NAME_VECTOR, "Test for ml functionality", "Vector" ) upload_project( Path(PROJECT_PATH_PIXEL), PROJECT_NAME_PIXEL_PREDICTION, "Test for ml functionality", "Pixel" ) #Tests for the case when provided images do not exist in the project with pytest.raises(SABaseException) as e: sa.run_prediction( PROJECT_NAME_VECTOR, ["NonExistantImage.jpg"], MODEL_NAME ) assert str(e) == "No valid image names were provided" #Tests that the argument 'project' is valid with pytest.raises(SABaseException) as e: sa.run_prediction( [PROJECT_NAME_VECTOR, PROJECT_NAME_PIXEL_PREDICTION], ["DoesntMatter.jpg"], MODEL_NAME ) assert str( e ) == "smart prediction cannot be run on images from different projects simultaneously" #Tests if prediction on all available images gets run image_names_pixel = sa.search_images(PROJECT_NAME_PIXEL_PREDICTION) image_names_vector = sa.search_images(PROJECT_NAME_VECTOR) succeded_imgs, failed_imgs = sa.run_prediction( PROJECT_NAME_VECTOR, image_names_vector[:4], MODEL_NAME ) assert (len(succeded_imgs) + len(failed_imgs)) == 4 succeded_imgs, failed_imgs = sa.run_prediction( PROJECT_NAME_PIXEL_PREDICTION, image_names_pixel[:4], MODEL_NAME ) assert (len(succeded_imgs) + len(failed_imgs)) == 4 succeded_imgs, failed_imgs = sa.run_prediction( PROJECT_NAME_PIXEL_PREDICTION, image_names_pixel[:4] + ["NA.jpg"], MODEL_NAME ) assert (len(succeded_imgs) + len(failed_imgs)) == 4 succeded_imgs, failed_imgs = sa.run_prediction( PROJECT_NAME_VECTOR, image_names_vector[:4] + ["NA.jpg"], MODEL_NAME ) assert (len(succeded_imgs) + len(failed_imgs)) == 4
def test_add_bbox_noinit(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects( PROJECT_NAME_NOINIT, return_metadata=True ) for pr in projects_found: sa.delete_project(pr) project = sa.create_project( PROJECT_NAME_NOINIT, PROJECT_DESCRIPTION, "Vector" ) sa.upload_images_from_folder_to_project( project, PATH_TO_SAMPLE_PROJECT, annotation_status="InProgress" ) sa.create_annotation_classes_from_classes_json( project, PATH_TO_SAMPLE_PROJECT / "classes" / "classes.json" ) sa.create_annotation_class(project, "test_add", "#FF0000") images = sa.search_images(project, "example_image_1") image_name = images[0] sa.add_annotation_bbox_to_image( project, image_name, [10, 10, 500, 100], "test_add" ) sa.add_annotation_polygon_to_image( project, image_name, [100, 100, 500, 500, 200, 300], "test_add" ) annotations_new = sa.get_image_annotations(project, image_name)["annotation_json"] assert len(annotations_new) == 2 export = sa.prepare_export(project, include_fuse=True) sa.download_export(project, export, tmpdir) assert len(list(Path(tmpdir).rglob("*.*"))) == 4
def test_vector_preannotation_upload_from_s3(tmpdir): projects_found = sa.search_projects(TEST_PROJECT3, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(TEST_PROJECT3, "hk_test", project_type="Vector") f = urlparse(f"s3://superannotate-python-sdk-test/{TEST_PROJECT_VECTOR}") sa.upload_images_from_folder_to_project(project, f.path[1:], annotation_status="NotStarted", from_s3_bucket=f.netloc) sa.create_annotation_classes_from_classes_json(project, f.path[1:] + '/classes/classes.json', from_s3_bucket=f.netloc) assert sa.get_project_image_count(project) == 4 sa.upload_preannotations_from_folder_to_project(project, TEST_PROJECT_VECTOR, from_s3_bucket=f.netloc) for image in sa.search_images(project): sa.download_image_preannotations(project, image, tmpdir) assert len(list(Path(tmpdir).glob("*.*"))) == 4 sa.delete_project(project)
def test_preannotation_folder_upload_download_cli_vector_COCO(tmpdir): project_type = "Vector" name = "Example Project test vector2 preannotation cli upload coco vector" description = "test" from_folder = "./tests/converter_test/COCO/input/toSuperAnnotate/keypoint_detection" task = "keypoint_detection" dataset_name = "person_keypoints_test" projects_found = sa.search_projects(name, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(name, description, project_type) sa.upload_images_from_folder_to_project(project, from_folder, annotation_status="InProgress") subprocess.run( f'superannotatecli upload-preannotations --project "{name}" --folder "{from_folder}" --format COCO --task {task} --dataset-name {dataset_name}', check=True, shell=True) time.sleep(5) count_in = 2 images = sa.search_images(project) for image_name in images: sa.download_image_preannotations(project, image_name, tmpdir) count_out = len(list(Path(tmpdir).glob("*.json"))) assert count_in == count_out
def test_direct_s3_upload_folder(): projects_found = sa.search_projects(TEST_PROJECT_NAME2, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(TEST_PROJECT_NAME2, "a", "Vector") print(project["id"]) csv = (Path.home() / ".aws" / "credentials").read_text().splitlines() access_key_id = csv[1].split(" = ")[1] access_secret = csv[2].split(" = ")[1] sa.create_folder(project, "folder1") project_folder = project["name"] + "/folder1" sa.upload_images_from_s3_bucket_to_project(project_folder, access_key_id, access_secret, S3_BUCKET, S3_FOLDER) s3_client = boto3.client('s3') paginator = s3_client.get_paginator('list_objects_v2') response_iterator = paginator.paginate(Bucket=S3_BUCKET, Prefix=S3_FOLDER) on_s3 = [] for response in response_iterator: if 'Contents' in response: for object_data in response['Contents']: key = object_data['Key'] if key[-4:] in [".jpg", ".png"]: on_s3.append(key) assert len(on_s3) == len(sa.search_images(project_folder))
def test_upload_images_from_public_to_project_with_image_name(): PROJECT_NAME = 'test_public_links_upload2' test_img_list = [ 'https://images.pexels.com/photos/3702354/pexels-photo-3702354.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940', 'https://images.pexels.com/photos/3702354/pexels-photo-3702354.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940', 'https://images.pexels.com/photos/3702354/pexels-photo-3702354.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940', 'https://images.pexels.com/photos/3702354/pexels-photo-3702354.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940', 'https://images.pexels.com/photos/3702354/pexels-photo-3702354.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940' ] img_name_list = ['img1.jpg', 'img2.jpg', 'img3.jpg', 'img4.jpg', 'img5.jpg'] if sa.search_projects(PROJECT_NAME) != []: sa.delete_project(PROJECT_NAME) proj_data = sa.create_project(PROJECT_NAME, "test", "Vector") uploaded_urls, uploaded_filenames, duplicate_filenames, not_uploaded_urls = sa.upload_images_from_public_urls_to_project( proj_data, test_img_list, img_name_list, annotation_status='InProgress', image_quality_in_editor="original" ) images_in_project = sa.search_images( proj_data, annotation_status='InProgress' ) # check how many images were uploaded and how many were not assert len(uploaded_urls) == 5 assert len(duplicate_filenames) == 0 assert len(uploaded_filenames) == 5 assert len(not_uploaded_urls) == 0
def test_preannotation_folder_upload_download_cli_pixel_object_COCO_folder( tmpdir): project_type = "Pixel" name = "Example Project folder test pixel1 preannotation cli upload coco object pixel" description = "test" from_folder = "./tests/converter_test/COCO/input/toSuperAnnotate/panoptic_segmentation" task = "panoptic_segmentation" dataset_name = "panoptic_test" projects_found = sa.search_projects(name, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(name, description, project_type) sa.create_folder(project, "folder1") project_with_folder = project["name"] + "/folder1" sa.upload_images_from_folder_to_project(project_with_folder, from_folder, annotation_status="InProgress") subprocess.run( f'superannotatecli upload-preannotations --project "{project_with_folder}" --folder "{from_folder}" --format COCO --task {task} --dataset-name {dataset_name}', check=True, shell=True) time.sleep(5) count_in = 3 images = sa.search_images(project_with_folder) for image_name in images: sa.download_image_preannotations(project_with_folder, image_name, tmpdir) count_out = len(list(Path(tmpdir).glob("*.json"))) assert count_in == count_out
def test_upload_images_from_public_urls_to_project(): PROJECT_NAME = 'test_public_links_upload1' test_img_list = [ 'https://images.pexels.com/photos/3702354/pexels-photo-3702354.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940', 'https://www.pexels.com/photo/5450829/download/', 'https://www.pexels.com/photo/3702354/download/', 'https://www.pexels.com/photo/3702354/download/', 'https://www.pexels.com/photo/3702354/dwnload/', '', 'test_non_url' ] if sa.search_projects(PROJECT_NAME) != []: sa.delete_project(PROJECT_NAME) proj_data = sa.create_project(PROJECT_NAME, "test", "Vector") uploaded_urls, uploaded_filenames, duplicate_filenames, not_uploaded_urls = sa.upload_images_from_public_urls_to_project( proj_data, test_img_list, annotation_status='InProgress', image_quality_in_editor="original" ) images_in_project = sa.search_images( proj_data, annotation_status='InProgress' ) # check how many images were uploaded and how many were not assert len(uploaded_urls) == 3 assert len(duplicate_filenames) == 1 assert len(uploaded_filenames) == 3 assert len(not_uploaded_urls) == 3 for image in images_in_project: assert image in uploaded_filenames
def test_single_image_upload_bytesio(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects(PROJECT_NAME_BYTES, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(PROJECT_NAME_BYTES, "test", "Vector") with open("./tests/sample_project_vector/example_image_1.jpg", "rb") as f: img = io.BytesIO(f.read()) try: sa.upload_image_to_project(project, img, annotation_status="InProgress") except sa.SABaseException as e: assert e.message == "Image name img_name should be set if img is not Pathlike" else: assert False sa.upload_image_to_project(project, img, image_name="rr.jpg", annotation_status="InProgress") images = sa.search_images(project) assert len(images) == 1 image = images[0] assert sa.annotation_status_int_to_str( sa.get_image_metadata(project, image)["annotation_status"]) == "InProgress" assert image == "rr.jpg"
def test_annotation_download_upload(project_type, name, description, from_folder, tmpdir): projects = sa.search_projects(name, return_metadata=True) for project in projects: sa.delete_project(project) project = sa.create_project(name, description, project_type) sa.upload_images_from_folder_to_project(project, from_folder, annotation_status="NotStarted") sa.create_annotation_classes_from_classes_json( project, from_folder / "classes" / "classes.json") sa.upload_annotations_from_folder_to_project(project, from_folder) image = sa.search_images(project)[2] sa.download_image_annotations(project, image, tmpdir) anns_json_in_folder = list(Path(tmpdir).glob("*.json")) anns_mask_in_folder = list(Path(tmpdir).glob("*.png")) assert len(anns_json_in_folder) == 1 assert len(anns_mask_in_folder) == (1 if project_type == "Pixel" else 0) input_annotation_paths = sa.image_path_to_annotation_paths( from_folder / image, project_type) json1 = json.load(open(input_annotation_paths[0])) json2 = json.load(open(anns_json_in_folder[0])) for i in json1: i.pop("classId", None) for i in json2: i.pop("classId", None) assert json1 == json2 if project_type == "Pixel": assert filecmp.cmp(input_annotation_paths[1], anns_mask_in_folder[0], shallow=False)
def test_preannotation_folder_upload_download_cli(project_type, name, description, from_folder, tmpdir): projects_found = sa.search_projects(name, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(name, description, project_type) sa.upload_images_from_folder_to_project(project, from_folder, annotation_status="InProgress") sa.create_annotation_classes_from_classes_json( project, from_folder / "classes" / "classes.json") subprocess.run([ f"superannotate upload-preannotations --project '{name}' --folder '{from_folder}'" ], check=True, shell=True) time.sleep(5) count_in = len(list(from_folder.glob("*.json"))) images = sa.search_images(project) for image_name in images: sa.download_image_preannotations(project, image_name, tmpdir) count_out = len(list(Path(tmpdir).glob("*.json"))) assert count_in == count_out
def test_image_move(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects(PROJECT_NAME_MOVE, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(PROJECT_NAME_MOVE, "test", "Vector") sa.upload_image_to_project( project, "./tests/sample_project_vector/example_image_1.jpg", annotation_status="InProgress") sa.upload_image_to_project( project, "./tests/sample_project_vector/example_image_2.jpg", annotation_status="InProgress") images = sa.search_images(project) assert len(images) == 2 image = images[0] try: sa.move_image(project, image, project) except sa.SABaseException as e: assert e.message == "Cannot move image if source_project == destination_project." else: assert False projects_found = sa.search_projects(PROJECT_NAME_MOVE + "dif", return_metadata=True) for pr in projects_found: sa.delete_project(pr) dest_project = sa.create_project(PROJECT_NAME_MOVE + "dif", "test", "Vector") sa.move_image(project, image, dest_project) di = sa.search_images(dest_project, image) assert len(di) == 1 assert di[0] == image si = sa.search_images(project, image) assert len(si) == 0 si = sa.search_images(project) assert len(si) == 1
def test_image_copy_mult(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects(PROJECT_NAME_CPY_MULT, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(PROJECT_NAME_CPY_MULT, "test", "Vector") sa.upload_image_to_project( project, "./tests/sample_project_vector/example_image_1.jpg", annotation_status="InProgress") sa.create_annotation_classes_from_classes_json( project, "./tests/sample_project_vector/classes/classes.json") sa.upload_image_annotations( project, "example_image_1.jpg", "./tests/sample_project_vector/example_image_1.jpg___objects.json") sa.upload_image_to_project( project, "./tests/sample_project_vector/example_image_2.jpg", annotation_status="InProgress") sa.pin_image(project, "example_image_1.jpg") images = sa.search_images(project) assert len(images) == 2 image = images[0] for _ in range(3): sa.copy_image(project, image, project, include_annotations=True, copy_annotation_status=True, copy_pin=True) assert len(sa.search_images(project)) == 5 images = sa.search_images(project) for i in range(3): assert f"example_image_1_({i+1}).jpg" in images anns = sa.get_image_annotations(project, f"example_image_1_({i+1}).jpg") assert anns["annotation_json"] is not None metadata = sa.get_image_metadata(project, f"example_image_1_({i+1}).jpg") assert metadata["is_pinned"] == 1
def test_video(tmpdir): tmpdir = Path(tmpdir) projects = sa.search_projects(PROJECT_NAME1, return_metadata=True) for project in projects: sa.delete_project(project) project = sa.create_project(PROJECT_NAME1, "test", "Vector") time.sleep(1) sa.create_annotation_class(project, "fr", "#FFAAAA") time.sleep(1) sa.create_annotation_class(project, "fr2", "#FFAACC") sa.upload_videos_from_folder_to_project( project, "./tests/sample_videos", target_fps=2 ) projects = sa.search_projects(PROJECT_NAME2, return_metadata=True) for project in projects: sa.delete_project(project) project = sa.create_project(PROJECT_NAME2, "test", "Vector") subprocess.run( f'superannotatecli upload-videos --project "{PROJECT_NAME2}" --folder ./tests/sample_videos --target-fps 2', check=True, shell=True ) time.sleep(5) sa.create_annotation_class(project, "fr2", "#FFAACC") assert len(sa.search_images(PROJECT_NAME1)) == len( sa.search_images(PROJECT_NAME2) ) sa.create_folder(project, "new folder") sa.upload_videos_from_folder_to_project( PROJECT_NAME2 + "/new folder", "./tests/sample_videos", target_fps=2 ) assert len(sa.search_images(PROJECT_NAME2 + "/new folder")) == len( sa.search_images(PROJECT_NAME2) )
def test_image_copy(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects(PROJECT_NAME_CPY, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(PROJECT_NAME_CPY, "test", "Vector") sa.upload_image_to_project( project, "./tests/sample_project_vector/example_image_1.jpg", annotation_status="InProgress") sa.upload_image_to_project( project, "./tests/sample_project_vector/example_image_2.jpg", annotation_status="InProgress") images = sa.search_images(project) assert len(images) == 2 image = images[0] sa.copy_image(project, image, project) images = sa.search_images(project) assert len(images) == 3 image = "example_image_1_(1).jpg" assert len(sa.search_images(project, image)) == 1 sa.copy_image(project, image, project) image = "example_image_1_(2).jpg" assert len(sa.search_images(project, image)) == 1 projects_found = sa.search_projects(PROJECT_NAME_CPY + "dif", return_metadata=True) for pr in projects_found: sa.delete_project(pr) dest_project = sa.create_project(PROJECT_NAME_CPY + "dif", "test", "Vector") sa.copy_image(project, image, dest_project) di = sa.search_images(dest_project, image) assert len(di) == 1 assert di[0] == image
def test_annotation_download_upload(project_type, name, description, from_folder, tmpdir): # projects = sa.search_projects(name, return_metadata=True) # for project in projects: # sa.delete_project(project) # project = sa.create_project(name, description, project_type) # sa.upload_images_from_folder_to_project( # project, from_folder, annotation_status="NotStarted" # ) # sa.create_annotation_classes_from_classes_json( # project, from_folder / "classes" / "classes.json" # ) # sa.upload_annotations_from_folder_to_project(project, from_folder) project = upload_project(from_folder, name, description, project_type) image = sa.search_images(project)[0] paths = sa.download_image_annotations(project, image, tmpdir) input_annotation_paths_after = sa.image_path_to_annotation_paths( tmpdir / image, project_type) assert paths[0] == str(input_annotation_paths_after[0]) if project_type == "Pixel": assert paths[1] == str(input_annotation_paths_after[1]) else: assert len(paths) == 1 anns_json_in_folder = list(Path(tmpdir).glob("*.json")) anns_mask_in_folder = list(Path(tmpdir).glob("*.png")) assert len(anns_json_in_folder) == 1 assert len(anns_mask_in_folder) == (1 if project_type == "Pixel" else 0) input_annotation_paths = sa.image_path_to_annotation_paths( from_folder / image, project_type) json1 = json.load(open(input_annotation_paths[0])) json2 = json.load(open(anns_json_in_folder[0])) for i in json1["instances"]: i.pop("classId", None) for j in i["attributes"]: j.pop("groupId", None) j.pop("id", None) for i in json2["instances"]: i.pop("classId", None) for j in i["attributes"]: j.pop("groupId", None) j.pop("id", None) assert json1 == json2 if project_type == "Pixel": assert filecmp.cmp(input_annotation_paths[1], anns_mask_in_folder[0], shallow=False)
def test_export_s3(tmpdir): paginator = s3_client.get_paginator('list_objects_v2') response_iterator = paginator.paginate(Bucket=S3_BUCKET, Prefix=S3_PREFIX2) for response in response_iterator: if 'Contents' in response: for object_data in response['Contents']: key = object_data['Key'] s3_client.delete_object(Bucket=S3_BUCKET, Key=key) tmpdir = Path(tmpdir) project = upload_project(Path("./tests/sample_project_vector"), PROJECT_NAME_EXPORT, 'test', 'Vector', annotation_status='InProgress') # projects = sa.search_projects(PROJECT_NAME_EXPORT, return_metadata=True) # for project in projects: # sa.delete_project(project) # project = sa.create_project(PROJECT_NAME_EXPORT, "test", "Vector") # sa.upload_images_from_folder_to_project( # project, # Path("./tests/sample_project_vector"), # annotation_status="InProgress" # ) # sa.create_annotation_classes_from_classes_json( # project, Path("./tests/sample_project_vector/classes/classes.json") # ) # sa.upload_annotations_from_folder_to_project( # project, Path("./tests/sample_project_vector") # ) images = sa.search_images(project) for img in images: sa.set_image_annotation_status(project, img, 'QualityCheck') new_export = sa.prepare_export(project, include_fuse=True) sa.download_export(project, new_export, S3_PREFIX2, to_s3_bucket=S3_BUCKET) files = [] response_iterator = paginator.paginate(Bucket=S3_BUCKET, Prefix=S3_PREFIX2) for response in response_iterator: if 'Contents' in response: for object_data in response['Contents']: key = object_data['Key'] files.append(key) output_path = tmpdir / S3_PREFIX2 output_path.mkdir() sa.download_export(project, new_export, output_path) local_files = list(output_path.rglob("*.*")) assert len(local_files) == len(files)
def test_from_s3_upload(): projects = sa.search_projects(PROJECT_NAME_UPLOAD, return_metadata=True) for project in projects: sa.delete_project(project) project = sa.create_project(PROJECT_NAME_UPLOAD, "hk", "Vector") sa.create_annotation_classes_from_classes_json( project, "frex9/classes/classes.json", S3_BUCKET) sa.upload_images_from_folder_to_project(project, S3_PREFIX, ["jpg"], annotation_status="QualityCheck", from_s3_bucket=S3_BUCKET) assert len(sa.search_images(project)) == 4
def test_images_nonrecursive(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects( TEMP_PROJECT_NAME + "9", return_metadata=True ) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(TEMP_PROJECT_NAME + "9", "test", "Vector") sa.upload_images_from_folder_to_project( project, "./tests/sample_recursive_test", recursive_subfolders=False ) assert len(sa.search_images(project)) == 1
def test_annotations_nonrecursive_s3_folder(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects( TEMP_PROJECT_NAME + "5", return_metadata=True ) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(TEMP_PROJECT_NAME + "5", "test", "Vector") sa.upload_images_from_folder_to_project( project, "sample_recursive_test", annotation_status="QualityCheck", from_s3_bucket="superannotate-python-sdk-test", recursive_subfolders=True ) assert len(sa.search_images(project)) == 2 sa.create_annotation_classes_from_classes_json( project, "sample_recursive_test/classes/classes.json", from_s3_bucket="superannotate-python-sdk-test" ) sa.upload_annotations_from_folder_to_project( project, "sample_recursive_test", recursive_subfolders=False, from_s3_bucket="superannotate-python-sdk-test" ) export = sa.prepare_export(project) time.sleep(1) sa.download_export(project, export, tmpdir) non_empty_annotations = 0 json_files = tmpdir.glob("*.json") for json_file in json_files: json_ann = json.load(open(json_file)) if "instances" in json_ann and len(json_ann["instances"]) > 0: non_empty_annotations += 1 assert non_empty_annotations == 1
def test_images_nonrecursive_s3(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects( TEMP_PROJECT_NAME + "8", return_metadata=True ) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(TEMP_PROJECT_NAME + "8", "test", "Vector") sa.upload_images_from_folder_to_project( project, "sample_recursive_test", from_s3_bucket="superannotate-python-sdk-test", recursive_subfolders=False ) assert len(sa.search_images(project)) == 1
def test_single_image_upload_s3(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects(PROJECT_NAME_S3, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(PROJECT_NAME_S3, "test", "Vector") sa.upload_image_to_project(project, "sample_project_vector/example_image_1.jpg", annotation_status="InProgress", from_s3_bucket="superannotate-python-sdk-test") images = sa.search_images(project) assert len(images) == 1 image = images[0] assert sa.get_image_metadata(project, image)["annotation_status"] == "InProgress"
def test_single_image_upload(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects(PROJECT_NAME, return_metadata=True) print(projects_found) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(PROJECT_NAME, "test", "Vector") sa.upload_image_to_project( project, "./tests/sample_project_vector/example_image_1.jpg", annotation_status="InProgress") images = sa.search_images(project) assert len(images) == 1 image = images[0] assert sa.get_image_metadata(project, image)["annotation_status"] == "InProgress" assert image == "example_image_1.jpg"
def test_add_bbox_noinit(tmpdir): tmpdir = Path(tmpdir) projects_found = sa.search_projects(PROJECT_NAME_NOINIT, return_metadata=True) for pr in projects_found: sa.delete_project(pr) project = sa.create_project(PROJECT_NAME_NOINIT, PROJECT_DESCRIPTION, "Vector") sa.upload_images_from_folder_to_project(project, PATH_TO_SAMPLE_PROJECT, annotation_status="InProgress") sa.create_annotation_classes_from_classes_json( project, PATH_TO_SAMPLE_PROJECT / "classes" / "classes.json") sa.create_annotation_class(project, "test_add", "#FF0000") images = sa.search_images(project, "example_image_1") image_name = images[0] sa.add_annotation_bbox_to_image(project, image_name, [10, 10, 500, 100], "test_add") sa.add_annotation_polygon_to_image(project, image_name, [100, 100, 500, 500, 200, 300], "test_add") annotations_new = sa.get_image_annotations(project, image_name)["annotation_json"] assert len(annotations_new["instances"]) == 2 export = sa.prepare_export(project, include_fuse=True) sa.download_export(project, export, tmpdir) non_empty_annotations = 0 json_files = tmpdir.glob("*.json") for json_file in json_files: json_ann = json.load(open(json_file)) if "instances" in json_ann and len(json_ann["instances"]) > 0: non_empty_annotations += 1 assert len(json_ann["instances"]) == 2 assert non_empty_annotations == 1
def test_run_segmentation(): model_auto = 'autonomous' model_generic = 'generic' upload_project( Path(PROJECT_PATH_PIXEL), PROJECT_NAME_PIXEL_SEGMENTATION, "Test for ml functionality", "Pixel" ) image_names_pixel = sa.search_images(PROJECT_NAME_PIXEL_SEGMENTATION) with pytest.raises(SABaseException) as e: sa.run_segmentation(PROJECT_NAME_VECTOR, image_names_pixel, model_auto) assert str(e) == "Operation not supported for given project type" with pytest.raises(SABaseException) as e: sa.run_segmentation( PROJECT_NAME_PIXEL_SEGMENTATION, image_names_pixel[:2], "NonExistantModel" ) assert str(e) == "Model Does not exist" with pytest.raises(SABaseException) as e: sa.run_segmentation( PROJECT_NAME_PIXEL_SEGMENTATION, ["NonExistantImage.jpg"], MODEL_NAME ) assert str(e) == "No valid image names were provided" succeded_imgs, failed_imgs = sa.run_segmentation( PROJECT_NAME_PIXEL_SEGMENTATION, image_names_pixel[:4] + ["NA.jpg"], model_generic ) assert (len(succeded_imgs) + len(failed_imgs)) == 4 succeded_imgs, failed_imgs = sa.run_segmentation( PROJECT_NAME_PIXEL_SEGMENTATION, image_names_pixel[:4] + ["NA.jpg"], model_auto ) assert (len(succeded_imgs) + len(failed_imgs)) == 4