Пример #1
0
def test_model_options_missing():

    rw = RunwayModel()
    @rw.setup(options={'initialization_array': array(item_type=text)})
    def setup(opts):
        pass

    # this will print to stderr still, but the test should pass
    with pytest.raises(SystemExit):
        with pytest.raises(MissingOptionError):
            rw.run(debug=True)
Пример #2
0
def test_model_options_passed_as_arguments_to_run():

    # use a dict to share state across function scopes. This makes up for the
    # fact that Python 2.x doesn't have support for the 'nonlocal' keyword.
    closure = dict(setup_ran = False)

    rw = RunwayModel()
    @rw.setup(options={'initialization_array': array(item_type=text)})
    def setup(opts):
        assert opts['initialization_array'] == ['one', 'two', 'three']
        closure['setup_ran'] = True

    rw.run(debug=True, model_options={ 'initialization_array': ['one', 'two', 'three'] })
    assert closure['setup_ran'] == True
Пример #3
0
@runway.setup
def setup():
    global USE_CUDA
    if dlib.cuda.get_num_devices() > 0 and dlib.DLIB_USE_CUDA:
        USE_CUDA = True
        print('CUDA detected, using CNN model...')

# https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/facerec_from_webcam_faster.py#L60
identify_face_inputs = {
    'input_image': image,
    'label_image': image,
    'match_tolerance': number(min=0.1, max=1.0, step=0.1, default=0.6)
}
identify_face_outputs = {
    'results': array(image_bounding_box),
}
@runway.command('identify_face', inputs=identify_face_inputs, outputs=identify_face_outputs)
def identify_face(model, args):
    input_arr = np.array(args['input_image'])
    label_arr = np.array(args['label_image'])
    input_locations = face_recognition.face_locations(input_arr, **get_model_kwargs())
    input_encodings = face_recognition.face_encodings(input_arr, known_face_locations=input_locations)

    global LAST_LABEL_IMAGE_ARR
    global LAST_LABEL_ENCODINGS
    label_encodings = None
    # if the label image has changed, update the label encodings, otherwise use
    # a the cached version of the encodings
    if LAST_LABEL_IMAGE_ARR is None or not np.array_equal(LAST_LABEL_IMAGE_ARR, label_arr):
        label_encodings = face_recognition.face_encodings(label_arr)
Пример #4
0
setup_options = {}


@runway.setup(options=setup_options)
def setup(opts):
    model = FaceTracker(opts)
    return model


@runway.command(
    name='find_faces',
    inputs={'input': image(description="The input image to analyze")},
    outputs={
        'ids':
        array(number, description="IDs of found faces"),
        'boxes':
        array(image_bounding_box, description="bounding boxes of found faces")
    },
    description='Look for faces in the image')
def find_faces(model, args):

    output = model.process(args['input'])

    return {
        'ids': [o["index"] for o in output],
        'boxes': [o["box"] for o in output]
    }


if __name__ == '__main__':
Пример #5
0
from example_model import FaceTracker

setup_options = {}


@runway.setup(options=setup_options)
def setup(opts):
    model = FaceTracker(opts)
    return model


@runway.command(
    name='find_faces',
    inputs={'input': image(description="The input image to analyze")},
    outputs={
        'ids': array(number, description="IDs of found faces"),
        'boxes': array(image_bounding_box, description="Bounding boxes")
    },
    description='Look for faces in the image.')
def find_faces(model, args):
    output = model.process(args["input"])

    return {
        'ids': [o["index"] for o in output],
        'boxes': [o["box"] for o in output]
    }


if __name__ == '__main__':
    runway.run(host='0.0.0.0', port=8000, debug=True)
Пример #6
0
import runway
from runway.data_types import array, text


@runway.setup(options={"seed_sentences": array(item_type=text, min_length=5)})
def setup(opts):
    for i in range(5):
        print("Sentence {} is \"{}\"".format(i + 1, opts["seed_sentences"][i]))


runway.run()

# curl -H "content-type: application/json" -d '{"seed_sentences": ["the", "sly", "fox", "is", "sly"]}' http://localhost:8000/setup
Пример #7
0
@runway.setup
def setup():
    """Initialize and return model."""
    return emotion_classifier.Model()


faces_description = "Bounding boxes of found faces"
detect_description = ("Detect faces in given image and return their "
                      "bounding boxes sorted largest to smallest")


@runway.command('detect',
                inputs={'image': image},
                outputs={
                    'faces':
                    array(item_type=image_bounding_box,
                          description=faces_description)
                },
                description=detect_description)
def detect(model, inputs):
    """Detect faces in a provided image and return their bounding boxes.

    Bounding boxes will be sorted largest to smallest.
    """
    image = inputs['image']

    # convert to gray scale numpy array and detect faces
    gray_image = utils.grayscale_from_pil(image)
    faces = model.detect_faces(gray_image)

    # convert to runway style bounding boxes
    runway_faces = [
Пример #8
0
def test_meta(capsys):

    rw = RunwayModel()

    @rw.setup(options={'initialization_array': array(item_type=text)})
    def setup(opts):
        pass

    kwargs_1 = {
        'inputs': {
            'image': image,
            'vector': vector(length=5)
        },
        'outputs': {
            'label': text
        }
    }

    @rw.command('command_1', **kwargs_1)
    def command_1(opts):
        pass

    kwargs_2 = {
        'description': 'This command is used for testing.',
        'inputs': {
            'any': any_type,
            'file': file
        },
        'outputs': {
            'number': number(min=10, max=100)
        }
    }

    @rw.command('command_2', **kwargs_2)
    def command_2(opts):
        pass

    expected_manifest = {
        'options': [{
            'minLength': 0,
            'type': 'array',
            'name': 'initialization_array',
            'description': None,
            'itemType': {
                'default': '',
                'minLength': 0,
                'type': 'text',
                'name': 'text_array_item',
                'description': None
            }
        }],
        'commands': [{
            'name':
            'command_2',
            'description':
            'This command is used for testing.',
            'inputs': [
                {
                    'type': 'any',
                    'name': 'any',
                    'description': None,
                },
                {
                    'type': 'file',
                    'name': 'file',
                    'description': None,
                },
            ],
            'outputs': [
                {
                    'name': 'number',
                    'min': 10,
                    'default': 10,
                    'max': 100,
                    'type': 'number',
                    'description': None
                },
            ]
        }, {
            'name':
            'command_1',
            'description':
            None,
            'inputs': [
                {
                    'channels': 3,
                    'type': 'image',
                    'name': 'image',
                    'description': None,
                    'defaultOutputFormat': 'JPEG'
                },
                {
                    'samplingMean': 0,
                    'length': 5,
                    'type': 'vector',
                    'name': 'vector',
                    'samplingStd': 1,
                    'default': None,
                    'description': None
                },
            ],
            'outputs': [{
                'default': '',
                'minLength': 0,
                'type': 'text',
                'name': 'label',
                'description': None
            }]
        }]
    }

    # RW_META should not be set during testing
    os.environ['RW_META'] = '1'

    rw.run(debug=True,
           model_options={'initialization_array': ['one', 'two', 'three']})
    std = capsys.readouterr()
    manifest = json.loads(std.out.strip('\n'))

    # DeepDiff is required here because Python2 handles stdin encoding strangely
    # and because dict order is not guaranteed in Python2. I ran up a tree
    # trying to get this comparison working without relying on a lib, but
    # ultimately it was just wasting my time.
    diff = DeepDiff(manifest, expected_manifest, ignore_order=True)
    assert len(diff.keys()) == 0
    assert std.err == ''

    os.environ['RW_META'] = '0'
@runway.setup
def setup():
    global USE_CUDA
    if dlib.cuda.get_num_devices() > 0 and dlib.DLIB_USE_CUDA:
        USE_CUDA = True
        print('CUDA detected, using CNN model...')

# https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/facerec_from_webcam_faster.py#L60
identify_face_inputs = {
    'input_image': image,
    'label_image': image,
    'match_tolerance': number(min=0.1, max=1.0, step=0.1, default=0.6)
}
identify_face_outputs = {
    'results': array(item_type=any),
    'size': any
}
@runway.command('identify_face', inputs=identify_face_inputs, outputs=identify_face_outputs)
def identify_face(model, args):
    input_arr = np.array(args['input_image'])
    label_arr = np.array(args['label_image'])
    input_locations = face_recognition.face_locations(input_arr, **get_model_kwargs())
    input_encodings = face_recognition.face_encodings(input_arr, known_face_locations=input_locations)

    global LAST_LABEL_IMAGE_ARR
    global LAST_LABEL_ENCODINGS
    label_encodings = None
    # if the label image has changed, update the label encodings, otherwise use
    # a the cached version of the encodings
    if LAST_LABEL_IMAGE_ARR is None or not np.array_equal(LAST_LABEL_IMAGE_ARR, label_arr):
Пример #10
0
    return model


desc = """\
Infers embeddings for input. Returns an array with the lines of
text and an array with the corresponding embeddings for each line.
"""


@runway.command(name='embed',
                inputs={
                    'text': text(),
                    'tokenize_sentences': boolean(default=True)
                },
                outputs={
                    'sentences': array(item_type=text),
                    'embeddings': array(item_type=vector(length=512))
                },
                description=desc)
def embed(model, args):
    if args['tokenize_sentences']:
        sentences = sent_tokenize(args['text'])
    else:
        sentences = args['text'].split("\n")
    print('[EMBED] Embedding {} sentences'.format(len(sentences)))
    results = model.embed(sentences)
    return {'sentences': sentences, 'embeddings': results}


if __name__ == '__main__':
    runway.run(host='0.0.0.0', port=8000)
Пример #11
0
@runway.setup(options=setup_options)
def setup(opts):
    msg = '[SETUP] Ran with options: seed = {}, truncation = {}'
    print(msg.format(opts['seed'], opts['truncation']))
    model = FaceTracker(opts)
    return model


# Every model needs to have at least one command. Every command allows to send
# inputs and process outputs. To see a complete list of supported inputs and
# outputs data types: https://sdk.runwayml.com/en/latest/data_types.html
@runway.command(name='generate',
                inputs={'input': image()},
                outputs={
                    'ids': array(item_type=number),
                    'boxes': array(image_bounding_box)
                },
                description='Sends face ids found.')
def generate(model, args):
    faces = model.process(args['input'])
    return {
        'ids': [f["index"] for f in faces],
        'boxes': [f["location"] for f in faces]
    }


if __name__ == '__main__':
    # run the model server using the default network interface and ports,
    # displayed here for convenience
    runway.run(host='0.0.0.0', port=8000, debug=True)
Пример #12
0
import runway
from runway.data_types import number, text, image, array, image_bounding_box
from example_model import FaceTracker

setup_options = {
}
@runway.setup(options=setup_options)
def setup(opts):
    model = FaceTracker(opts)
    return model

@runway.command(name='find_faces',
                inputs={ 'input': image(description="The input image to analyze") },
                outputs={ 'ids': array(number, description="IDs of Found Faces"), 'boxes': array(image_bounding_box, description="Bounding boxes of found faces") },
                description='Look for faces in the image')
def find_faces(model, args):
    
    output = model.process(args['input'])

    return {
        'ids': [o["index"] for o in output],
        'boxes': [o["box"] for o in output] 
    }

if __name__ == '__main__':
    runway.run(host='0.0.0.0', port=8000, debug=True)


 
Пример #13
0
        runway.category(description="Pretrained checkpoints to use.",
                        choices=['celebAHQ-512', 'celebAHQ-256', 'celeba'],
                        default='celebAHQ-512')
    })
def setup(opts):
    model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    return model, tokenizer


@runway.command(name='sequence_score',
                inputs={
                    'line1': text(),
                    'next_line_candidates': text()
                },
                outputs={'scores': array(item_type=number)})
def sequence_score(setup_tuple, inputs):
    model, tokenizer = setup_tuple
    line1 = inputs['line1']
    outpath = line1[:5] + '.txt'
    outfile = open(outpath, 'w')
    next_line_candidates = inputs['next_line_candidates']
    candidates = [line.strip() for line in next_line_candidates.split('\n')]
    loss_scores = []
    for candidate in candidates:
        combined = inputs[
            'line1'] + ' ' + candidate  # may be better to concatenate *after* tokenization using special [SEP] token
        input_tokens = tokenizer.encode(combined, add_special_tokens=True)
        input_ids = torch.tensor(input_tokens).unsqueeze(0)
        outputs = model(input_ids)
        sequence_loss = outputs[0][0][