Beispiel #1
0
    def sub_adapter_job(self, tag: str, sub_adapter: ModuleServiceAdapterABC, job: JobDescriptor):
        new_service_descriptor = ServiceDescriptor(sub_adapter.service)
        new_job = JobDescriptor(new_service_descriptor)
        item_count = 0
        for job_item in job:

            # Just pass the inputs on directly to the subtasks.
            new_job_item = {}
            new_job_item['input_type'] = job_item['input_type']
            new_job_item['input_url'] = job_item['input_url']

            output_type = job_item['output_type']
            new_job_item['output_type'] = output_type

            # Transform the output URL so we can get separate output files for each sub-adapter
            # That way we can assemble the various parts at the end using these separate URLs.
            if output_type == 'file_url_put':
                output_url = job_item['output_url']
                sub_adapter_output_url = self.transform_output_url(tag, item_count, output_url)
            else:
                raise RuntimeError("Bad output type %s for job %s" % (output_type, self))
            new_job_item['output_url'] = sub_adapter_output_url
            new_job.append_job_item(new_job_item)

            item_count += 1
        return new_job
def test_jobs():
    print()
    setup_logging()
    init_test_jobs()
    test_jobs = JobDescriptor.get_test_jobs(ontology.DOCUMENT_SUMMARIZER_ID)
    for job in test_jobs:
        service_id = 0
        if str(job) != "NO_JOB":
            service_id = ontology.DOCUMENT_SUMMARIZER_ID

        job_parameters = {
            'input_type': 'file',
            'input_url': 'http://test.com/inputs/test_input.txt',
            'output_type': 'file_url_put',
            'output_url': 'test_output.txt'
        }
        job_parameters_2 = {
            'input_type': 'file',
            'input_url': 'http://test.com/inputs/test_input.txt',
            'output_type': 'file_url_put',
            'output_url': 'test_output.txt'
        }

        service_id = ontology.DOCUMENT_SUMMARIZER_ID

        # Create a new job descriptor with four sets of parameters.
        job_list = [
            job_parameters, job_parameters, job_parameters, job_parameters
        ]
        new_job = JobDescriptor(ServiceDescriptor(service_id), job_list)

        file_count = 0
        for job_item in new_job:
            if job_item['input_type'] == 'file':
                file_count += 1
            else:
                file_count = 0
        assert (file_count == 4)

        # Cover and test iteration and list item retrieval and length.
        new_job[0] = job_parameters_2
        assert (new_job[0] == job_parameters_2)
        job_count = len(new_job)
        del new_job[1]
        assert (len(new_job) == job_count - 1)

        # Test equality and string conversion functions.
        last_job = new_job
        assert (last_job == new_job)
        assert (str(last_job) == str(new_job))

        test_jobs.append(new_job)
        total_jobs = len(test_jobs)
        test_jobs[0] = new_job
        del test_jobs[total_jobs - 1]

        # Check the string conversion with no ServiceDescriptor...
        new_job = JobDescriptor(None, [job_parameters])
        assert (str(new_job) != "")
async def perform(service_node_id=None, job_params=None, context=None):
    service_descriptor = ServiceDescriptor(service_node_id)

    job = JobDescriptor(service_descriptor, job_params)
    app = context

    result = await perform_job(app, job)
    logging.debug('Result of perform was %s', result)
    return result
def test_tensorflow_mnist_adapter(app):
    setup_logging()
    log.debug("Testing Tensorflow NNIST Adapter")

    # Setup a test job for classifying a test mnist image. The test is a 28 x 28 image of a 7 which
    # has been flattened into a single float 784 element vector format as required by the tensorflow
    # example (see mnist_seven_image definition above).
    job_parameters = {
        'input_type': 'attached',
        'input_data': {
            'images': [mnist_seven_image],
        },
        'output_type': 'attached',
    }

    # Get the service for an MNIST classifier. A service identifies a unique service provided by
    # SingularityNET and is part of the ontology.
    ontology = app['ontology']
    mnist_service = ontology.get_service(MNIST_CLASSIFIER_ID)

    # Create the Tensorflow MNIST service adapter.
    mnist_service_adapter = TensorflowMNIST(app, mnist_service)

    # Create a service descriptor. These are post-contract negotiated descriptors that may include
    # other parameters like quality of service, input and output formats, etc.
    mnist_service_descriptor = ServiceDescriptor(MNIST_CLASSIFIER_ID)

    # Create a new job descriptor with a single set of parameters for the test image of a 7 in the
    # format defined above for the python variable: mnist_seven_image.
    job_list = [job_parameters]
    job = JobDescriptor(mnist_service_descriptor, job_list)

    # Setup the service manager. NOTE: This will add services that are (optionally) passed in
    # so you can manually create services in addition to those that are loaded from the config
    # file. After all the services are added, it will call post_load_initialize on all the
    # services.
    setup_service_manager(app, [mnist_service_adapter])

    # Test perform for the mnist service adapter.
    try:
        exception_caught = False
        results = mnist_service_adapter.perform(job)
    except RuntimeError as exception:
        exception_caught = True
        log.error("    Exception caught %s", exception)
        log.debug("    Error performing %s %s", job, mnist_service_adapter)
    assert not exception_caught

    # Check our results for format and content.
    assert len(results) == 1
    assert results[0]['predictions'] == [7]
    assert results[0]['confidences'][0] > 0.9900

    if results[0]['predictions'] == [7]:
        log.debug(
            "Tensorflow NNIST Adapter - CORRECT evaluation of image as 7")
Beispiel #5
0
def init_test_jobs():
    test_jobs[tests.DOCUMENT_SUMMARIZER_ID] = []
    test_jobs[tests.WORD_SENSE_DISAMBIGUATER_ID] = []
    test_jobs[tests.FACE_RECOGNIZER_ID] = []
    test_jobs[tests.TEXT_SUMMARIZER_ID] = []
    test_jobs[tests.VIDEO_SUMMARIZER_ID] = []
    test_jobs[tests.ENTITY_EXTRACTER_ID] = []

    job_parameters = [
        {
            'input_type': 'file',
                      'input_url': 'http://test.com/inputs/test_input.txt',
                      'output_type': 'file_url_put',
                      'output_url': 'test_output.txt'
        }

    ]

    job_parameters_2 = [
        {
            'input_type': 'file',
            'input_url': 'http://test.com/inputs/test_input_2.txt',
            'output_type': 'file_url_put',
            'output_url': 'test_output_2.txt'
        }
    ]

    # Create test jobs for the document summarizer.
    service_id = tests.DOCUMENT_SUMMARIZER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters_2)
    test_jobs[service_id].append(job)

    # Create test jobs for the word-sense disambiguator.
    service_id = tests.WORD_SENSE_DISAMBIGUATER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters_2)
    test_jobs[service_id].append(job)

    # Create test jobs for the face recognizer.
    service_id = tests.FACE_RECOGNIZER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)

    # Create test jobs for the text summarizer.
    service_id = tests.TEXT_SUMMARIZER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)

    # Create test jobs for the entity extractor.
    service_id = tests.ENTITY_EXTRACTER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)
Beispiel #6
0
def init_test_jobs():
    test_jobs[ontology.DOCUMENT_SUMMARIZER_ID] = []
    test_jobs[ontology.WORD_SENSE_DISAMBIGUATER_ID] = []
    test_jobs[ontology.FACE_RECOGNIZER_ID] = []
    test_jobs[ontology.TEXT_SUMMARIZER_ID] = []
    test_jobs[ontology.VIDEO_SUMMARIZER_ID] = []
    test_jobs[ontology.ENTITY_EXTRACTER_ID] = []

    job_parameters = {
        'input_type': 'file',
        'input_url': 'http://test.com/inputs/test_input.txt',
        'output_type': 'file_url_put',
        'output_url': 'test_output.txt'
    }
    job_parameters_2 = {
        'input_type': 'file',
        'input_url': 'http://test.com/inputs/test_input_2.txt',
        'output_type': 'file_url_put',
        'output_url': 'test_output_2.txt'
    }

    service_id = ontology.DOCUMENT_SUMMARIZER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters_2)
    test_jobs[service_id].append(job)

    service_id = ontology.WORD_SENSE_DISAMBIGUATER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters_2)
    test_jobs[service_id].append(job)

    service_id = ontology.FACE_RECOGNIZER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)

    service_id = ontology.TEXT_SUMMARIZER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)

    service_id = ontology.ENTITY_EXTRACTER_ID
    job = JobDescriptor(ServiceDescriptor(service_id), job_parameters)
    test_jobs[service_id].append(job)
def test_tensorflow_imagenet_adapter(app):
    setup_logging()
    log.debug("Testing Tensorflow ImageNet Adapter")

    # images to be tested
    images = ["bucket.jpg", "cup.jpg", "bowtie.png"]
    encoded_images = []
    image_types = []

    for image in images:
        # Load each image and encode it base 64.
        image_path = os.path.join(TEST_DIRECTORY, "data", "imagenet", image)
        image_file = open(image_path, 'rb')
        image_bytes = image_file.read()
        encoded_images.append(base64.b64encode(image_bytes))
        image_types.append(image.split('.')[1])

    # Setup a test job for classifying the test images.
    job_parameters = {
        'input_type': 'attached',
        'input_data': {
            'images': encoded_images,
            'image_types': image_types
        },
        'output_type': 'attached',
    }

    # Get the service for an ImageNet classifier. A service identifies a unique service provided by
    # SingularityNET and is part of the ontology.
    ontology = app['ontology']
    imagenet_service = ontology.get_service(IMAGENET_CLASSIFIER_ID)

    # Create the Tensorflow ImageNet service adapter.
    imagenet_service_adapter = TensorflowImageNet(app, imagenet_service)

    # Create a service descriptor. These are post-contract negotiated descriptors that may include
    # other parameters like quality of service, input and output formats, etc.
    imagenet_service_descriptor = ServiceDescriptor(IMAGENET_CLASSIFIER_ID)

    # Create a new job descriptor with a single set of parameters for the test image of a 7 in the
    # format defined above for the python variable: mnist_seven_image.
    job_list = [job_parameters]
    job = JobDescriptor(imagenet_service_descriptor, job_list)

    # Setup the service manager. NOTE: This will add services that are (optionally) passed in
    # so you can manually create services in addition to those that are loaded from the config
    # file. After all the services are added, it will call post_load_initialize on all the
    # services.
    setup_service_manager(app, [imagenet_service_adapter])

    # Test perform for the ImageNet service adapter.
    try:
        exception_caught = False
        results = imagenet_service_adapter.perform(job)
    except RuntimeError as exception:
        exception_caught = True
        log.error("    Exception caught %s", exception)
        log.debug("    Error performing %s %s", job, imagenet_service_adapter)
    assert not exception_caught

    print(results)

    # Check our results for format and content.
    assert len(results) == 1
    assert results[0]['predictions'] == [['bucket, pail'],
                                         ['cup', 'coffee mug'],
                                         ['bow tie, bow-tie, bowtie']]
    assert results[0]['confidences'][0][0] > 0.9600 and results[0][
        'confidences'][2][0] < 1.0
    assert results[0]['confidences'][1][0] > 0.4000 and results[0][
        'confidences'][1][1] < 0.4200
    assert results[0]['confidences'][1][1] > 0.4000 and results[0][
        'confidences'][1][1] < 0.4100
    assert results[0]['confidences'][2][0] > 0.9990 and results[0][
        'confidences'][2][0] < 1.0
async def can_perform(service_node_id=None, context=None):
    # figure out what we are being asked to perform and answer
    service = ServiceDescriptor(service_node_id)
    app = context
    return await can_perform_service(app, service)
Beispiel #9
0
async def internal_can_perform(app, service_node_id):
    # figure out what we are being asked to perform and answer
    service = ServiceDescriptor(service_node_id)
    return await can_perform_service(app, service)
Beispiel #10
0
async def internal_perform_job(app, service_node_id, job_params):
    service_descriptor = ServiceDescriptor(service_node_id)
    job = JobDescriptor(service_descriptor, job_params)
    result = await perform_job(app, job)
    return result
Beispiel #11
0
async def internal_offer(app, service_node_id, price):
    service_descriptor = ServiceDescriptor(service_node_id)
    result = app['accounting'].incoming_offer(service_descriptor, price)
    return result