def test_tensorflow_mnist_adapter(app): setup_logging() log.debug("Testing Tensorflow NNIST Adapter") # Setup a test job for classifying a test mnist image. The test is a 28 x 28 image of a 7 which # has been flattened into a single float 784 element vector format as required by the tensorflow # example (see mnist_seven_image definition above). job_parameters = { 'input_type': 'attached', 'input_data': { 'images': [mnist_seven_image], }, 'output_type': 'attached', } # Get the service for an MNIST classifier. A service identifies a unique service provided by # SingularityNET and is part of the ontology. ontology = app['ontology'] mnist_service = ontology.get_service(MNIST_CLASSIFIER_ID) # Create the Tensorflow MNIST service adapter. mnist_service_adapter = TensorflowMNIST(app, mnist_service) # Create a service descriptor. These are post-contract negotiated descriptors that may include # other parameters like quality of service, input and output formats, etc. mnist_service_descriptor = ServiceDescriptor(MNIST_CLASSIFIER_ID) # Create a new job descriptor with a single set of parameters for the test image of a 7 in the # format defined above for the python variable: mnist_seven_image. job_list = [job_parameters] job = JobDescriptor(mnist_service_descriptor, job_list) # Setup the service manager. NOTE: This will add services that are (optionally) passed in # so you can manually create services in addition to those that are loaded from the config # file. After all the services are added, it will call post_load_initialize on all the # services. setup_service_manager(app, [mnist_service_adapter]) # Test perform for the mnist service adapter. try: exception_caught = False results = mnist_service_adapter.perform(job) except RuntimeError as exception: exception_caught = True log.error(" Exception caught %s", exception) log.debug(" Error performing %s %s", job, mnist_service_adapter) assert not exception_caught # Check our results for format and content. assert len(results) == 1 assert results[0]['predictions'] == [7] assert results[0]['confidences'][0] > 0.9900 if results[0]['predictions'] == [7]: log.debug( "Tensorflow NNIST Adapter - CORRECT evaluation of image as 7")
def test_service_manager(app): print() setup_logging() log.debug("--- test_service_manager ---") setup_service_manager(app) # Excercise the service manager methods. assert (not app['service_manager'] is None) service_manager = app['service_manager'] check_adapter(service_manager, ontology.DOCUMENT_SUMMARIZER_ID, demo.document_summarizer.DocumentSummarizer) check_adapter(service_manager, ontology.ENTITY_EXTRACTER_ID, demo.entity_extracter.EntityExtracter) check_adapter(service_manager, ontology.FACE_RECOGNIZER_ID, demo.face_recognizer.FaceRecognizer) check_adapter(service_manager, ontology.TEXT_SUMMARIZER_ID, demo.text_summarizer.TextSummarizer) check_adapter(service_manager, ontology.VIDEO_SUMMARIZER_ID, demo.video_summarizer.VideoSummarizer) check_adapter(service_manager, ontology.WORD_SENSE_DISAMBIGUATER_ID, demo.word_sense_disambiguater.WordSenseDisambiguater) service_adapter = service_manager.get_service_adapter_for_id( ontology.DOCUMENT_SUMMARIZER_ID) assert (not service_adapter is None) assert (isinstance(service_adapter, demo.document_summarizer.DocumentSummarizer)) service_adapter = service_manager.get_service_adapter_for_id( ontology.ENTITY_EXTRACTER_ID) assert (not service_adapter is None) assert (isinstance(service_adapter, demo.entity_extracter.EntityExtracter)) service_adapter = service_manager.get_service_adapter_for_id( ontology.FACE_RECOGNIZER_ID) assert (not service_adapter is None) assert (isinstance(service_adapter, demo.face_recognizer.FaceRecognizer)) service_adapter = service_manager.get_service_adapter_for_id( ontology.TEXT_SUMMARIZER_ID) assert (not service_adapter is None) assert (isinstance(service_adapter, demo.text_summarizer.TextSummarizer)) service_adapter = service_manager.get_service_adapter_for_id( ontology.VIDEO_SUMMARIZER_ID) assert (not service_adapter is None) assert (isinstance(service_adapter, demo.video_summarizer.VideoSummarizer)) service_adapter = service_manager.get_service_adapter_for_id( ontology.WORD_SENSE_DISAMBIGUATER_ID) assert (not service_adapter is None) assert (isinstance(service_adapter, demo.word_sense_disambiguater.WordSenseDisambiguater))
def create_app(): # Significant performance improvement: https://github.com/MagicStack/uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) app = web.Application() setup_logging() setup_routes(app) setup_ontology(app) setup_network(app) setup_service_manager(app) setup_agent(app) app['name'] = 'SingularityNET Agent' return app
def test_start_stop_services(app): print() setup_logging() log.debug("") log.debug("--- test_start_stop_services ---") setup_service_manager(app) # Start and stop some services. assert (not app['service_manager'] is None) service_manager = app['service_manager'] service_manager.start(tests.DOCUMENT_SUMMARIZER_ID) service_manager.start(tests.WORD_SENSE_DISAMBIGUATER_ID) service_manager.start(tests.ENTITY_EXTRACTER_ID) service_manager.stop(tests.ENTITY_EXTRACTER_ID) service_manager.stop(tests.WORD_SENSE_DISAMBIGUATER_ID) service_manager.stop(tests.DOCUMENT_SUMMARIZER_ID)
def test_perform_services(app): print() setup_logging() init_test_jobs() setup_service_manager(app) # The test jobs specify output URLs for files in an "output" directory inside the "tests" directory. print("current directory is ", os.getcwd()) print("test directory is ", TEST_DIR) output_directory = os.path.join(TEST_DIR, "output") print("output directory is ", output_directory) if not os.path.exists(output_directory): os.mkdir(output_directory) # Excercise the service manager methods. assert (not app['service_manager'] is None) service_manager = app['service_manager'] perform_one_service(app, service_manager, ontology.DOCUMENT_SUMMARIZER_ID) perform_one_service(app, service_manager, ontology.ENTITY_EXTRACTER_ID) perform_one_service(app, service_manager, ontology.FACE_RECOGNIZER_ID) perform_one_service(app, service_manager, ontology.TEXT_SUMMARIZER_ID) perform_one_service(app, service_manager, ontology.VIDEO_SUMMARIZER_ID) perform_one_service(app, service_manager, ontology.WORD_SENSE_DISAMBIGUATER_ID) start_stop_start_one_service(app, service_manager, ontology.DOCUMENT_SUMMARIZER_ID) start_stop_start_one_service(app, service_manager, ontology.ENTITY_EXTRACTER_ID) start_stop_start_one_service(app, service_manager, ontology.FACE_RECOGNIZER_ID) start_stop_start_one_service(app, service_manager, ontology.TEXT_SUMMARIZER_ID) start_stop_start_one_service(app, service_manager, ontology.VIDEO_SUMMARIZER_ID) start_stop_start_one_service(app, service_manager, ontology.WORD_SENSE_DISAMBIGUATER_ID)
def test_tensorflow_imagenet_adapter(app): setup_logging() log.debug("Testing Tensorflow ImageNet Adapter") # images to be tested images = ["bucket.jpg", "cup.jpg", "bowtie.png"] encoded_images = [] image_types = [] for image in images: # Load each image and encode it base 64. image_path = os.path.join(TEST_DIRECTORY, "data", "imagenet", image) image_file = open(image_path, 'rb') image_bytes = image_file.read() encoded_images.append(base64.b64encode(image_bytes)) image_types.append(image.split('.')[1]) # Setup a test job for classifying the test images. job_parameters = { 'input_type': 'attached', 'input_data': { 'images': encoded_images, 'image_types': image_types }, 'output_type': 'attached', } # Get the service for an ImageNet classifier. A service identifies a unique service provided by # SingularityNET and is part of the ontology. ontology = app['ontology'] imagenet_service = ontology.get_service(IMAGENET_CLASSIFIER_ID) # Create the Tensorflow ImageNet service adapter. imagenet_service_adapter = TensorflowImageNet(app, imagenet_service) # Create a service descriptor. These are post-contract negotiated descriptors that may include # other parameters like quality of service, input and output formats, etc. imagenet_service_descriptor = ServiceDescriptor(IMAGENET_CLASSIFIER_ID) # Create a new job descriptor with a single set of parameters for the test image of a 7 in the # format defined above for the python variable: mnist_seven_image. job_list = [job_parameters] job = JobDescriptor(imagenet_service_descriptor, job_list) # Setup the service manager. NOTE: This will add services that are (optionally) passed in # so you can manually create services in addition to those that are loaded from the config # file. After all the services are added, it will call post_load_initialize on all the # services. setup_service_manager(app, [imagenet_service_adapter]) # Test perform for the ImageNet service adapter. try: exception_caught = False results = imagenet_service_adapter.perform(job) except RuntimeError as exception: exception_caught = True log.error(" Exception caught %s", exception) log.debug(" Error performing %s %s", job, imagenet_service_adapter) assert not exception_caught print(results) # Check our results for format and content. assert len(results) == 1 assert results[0]['predictions'] == [['bucket, pail'], ['cup', 'coffee mug'], ['bow tie, bow-tie, bowtie']] assert results[0]['confidences'][0][0] > 0.9600 and results[0][ 'confidences'][2][0] < 1.0 assert results[0]['confidences'][1][0] > 0.4000 and results[0][ 'confidences'][1][1] < 0.4200 assert results[0]['confidences'][1][1] > 0.4000 and results[0][ 'confidences'][1][1] < 0.4100 assert results[0]['confidences'][2][0] > 0.9990 and results[0][ 'confidences'][2][0] < 1.0
def test_perform_services(app): # The test jobs specify output URLs for files in an "output" directory inside the "tests" directory. print("current directory is ", os.getcwd()) print("test directory is ", TEST_DIRECTORY) output_directory = os.path.join(TEST_DIRECTORY, "output") print("output directory is ", output_directory) if not os.path.exists(output_directory): os.mkdir(output_directory) def remap_file_url(output_url: str): file_name = output_url.split("/")[-1] file_url = os.path.join(output_directory, file_name) return file_url def perform_one_service(app, service_manager, service_id): log.debug(" test_one_service") service_adapter = service_manager.get_service_adapter_for_id( service_id) test_jobs = JobDescriptor.get_test_jobs(service_id) # Remap the job item output URLs to the output directory. for job in test_jobs: for job_item in job: output_type = job_item['output_type'] if output_type == 'file_url_put': job_item['output_url'] = remap_file_url( job_item['output_url']) else: raise RuntimeError("Bad output type %s for job %s" % (output_type, self)) log.debug(" Testing jobs") job = None try: exception_caught = False for job in test_jobs: log.debug(" testing job %s", job) service_adapter.perform(job) except RuntimeError as exception: exception_caught = True log.error(" Exception caught %s", exception) log.debug(" Error performing %s %s", job, service_adapter) assert not exception_caught def start_stop_start_one_service(app, service_manager, service_id): log.debug(" start_stop_start_one_service") service_adapter = service_manager.get_service_adapter_for_id( service_id) try: exception_caught = False service_adapter.start() service_adapter.stop() service_adapter.start() except RuntimeError as exception: exception_caught = True log.error(" Exception caught %s", exception) log.debug(" Error starting or stopping %s", service_adapter) assert not exception_caught print() setup_logging() init_test_jobs() setup_service_manager(app) # Excercise the service manager methods. assert (not app['service_manager'] is None) service_manager = app['service_manager'] perform_one_service(app, service_manager, onto.DOCUMENT_SUMMARIZER_ID) perform_one_service(app, service_manager, onto.ENTITY_EXTRACTER_ID) perform_one_service(app, service_manager, onto.FACE_RECOGNIZER_ID) perform_one_service(app, service_manager, onto.TEXT_SUMMARIZER_ID) perform_one_service(app, service_manager, onto.VIDEO_SUMMARIZER_ID) perform_one_service(app, service_manager, onto.WORD_SENSE_DISAMBIGUATER_ID) start_stop_start_one_service(app, service_manager, onto.DOCUMENT_SUMMARIZER_ID) start_stop_start_one_service(app, service_manager, onto.ENTITY_EXTRACTER_ID) start_stop_start_one_service(app, service_manager, onto.FACE_RECOGNIZER_ID) start_stop_start_one_service(app, service_manager, onto.TEXT_SUMMARIZER_ID) start_stop_start_one_service(app, service_manager, onto.VIDEO_SUMMARIZER_ID) start_stop_start_one_service(app, service_manager, onto.WORD_SENSE_DISAMBIGUATER_ID)
def test_bogus_yaml_config(app): print() setup_logging() # Test missing opencog ontology_node_id original_config_file = os.environ['SN_SERVICE_ADAPTER_CONFIG_FILE'] yaml_file = os.path.join(TEST_DIRECTORY, "service_adapter_test.yml") os.environ['SN_SERVICE_ADAPTER_CONFIG_FILE'] = yaml_file exception_caught = False try: setup_service_manager(app) except RuntimeError as exception: exception_caught = True log.debug(" Expected Exception caught %s", exception) except: pass assert (exception_caught) # Test missing JSONRPC ontology_node_id yaml_file = os.path.join(TEST_DIRECTORY, "service_adapter_test_2.yml") os.environ['SN_SERVICE_ADAPTER_CONFIG_FILE'] = yaml_file exception_caught = False try: setup_service_manager(app) except RuntimeError as exception: exception_caught = True log.debug(" Expected Exception caught %s", exception) except: pass assert (exception_caught) # Test missing Module ontology_node_id yaml_file = os.path.join(TEST_DIRECTORY, "service_adapter_test_3.yml") os.environ['SN_SERVICE_ADAPTER_CONFIG_FILE'] = yaml_file exception_caught = False try: setup_service_manager(app) except RuntimeError as exception: exception_caught = True log.debug(" Expected Exception caught %s", exception) except: pass assert (exception_caught) # Test bogus service adapter type yaml_file = os.path.join(TEST_DIRECTORY, "service_adapter_test_4.yml") os.environ['SN_SERVICE_ADAPTER_CONFIG_FILE'] = yaml_file exception_caught = False try: setup_service_manager(app) except RuntimeError as exception: exception_caught = True log.debug(" Expected Exception caught %s", exception) except: pass assert (exception_caught) # Reset to the original config file. os.environ['SN_SERVICE_ADAPTER_CONFIG_FILE'] = original_config_file setup_service_manager(app)