architecture = opts['architecture'] print('[SETUP] Ran with architecture "{}"'.format(architecture)) if architecture == 'Artistic': colorizer = get_image_colorizer(artistic=True) elif architecture == 'Stable': colorizer = get_image_colorizer(artistic=False) else: colorizer = get_video_colorizer().vis return colorizer @runway.command(name='generate', inputs={ 'image': image(description='Image to colorize'), 'render_factor': number(description=render_factor_description, min=7, max=45, step=1, default=35) }, outputs={'image': image(description='Colorized image')}) def generate(model, args): render_factor = args['render_factor'] print('[GENERATE] Ran with render_factor "{}"'.format(render_factor)) orig_image = args['image'].convert('RGB') model._clean_mem() output_image = model.filter.filter(orig_image,
import runway from runway.data_types import number, text, image, array, image_bounding_box from example_model import FaceTracker setup_options = {} @runway.setup(options=setup_options) def setup(opts): model = FaceTracker(opts) return model @runway.command( name='find_faces', inputs={'input': image(description="The input image to analyze")}, outputs={ 'ids': array(number, description="IDs of found faces"), 'boxes': array(image_bounding_box, description="bounding boxes of found faces") }, description='Look for faces in the image') def find_faces(model, args): output = model.process(args['input']) return { 'ids': [o["index"] for o in output], 'boxes': [o["box"] for o in output] }
# supported configs. The setup function should return the model ready to be # used. setup_options = { 'truncation': number(min=1, max=10, step=1, default=5, description='Example input.'), 'seed': number(min=0, max=1000000, description='A seed used to initialize the model.') } @runway.setup(options=setup_options) def setup(opts): msg = '[SETUP] Ran with options: seed = {}, truncation = {}' print(msg.format(opts['seed'], opts['truncation'])) model = ExampleModel(opts) return model inputs = { # 'file': file(extension=".zip"), 'image': image(), 'model': category(choices=["none", "random", "color", "bit/m-r101x1", "vgg16"], default="color", description='Cluster model.'), 'slices': number(min=5, max=30, step=5, default=10, description='Number of slices.'), 'vgg_depth': number(min=1, max=8, step=1, default=7, description='VGG Feature Depth'), } # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command(name='generate', inputs=inputs, outputs={ 'image': image(width=512, height=512), 'info': text("hello") }, description='Generates a red square when the input text input is "red".') def generate(model, args): print('[GENERATE] Ran with image "{}"'.format(args['image'])) # Generate a PIL or Numpy image based on the input caption, and return it
import runway from runway.data_types import category, vector, image from your_code import model @runway.setup def setup(): return model() sample_inputs = { "z": vector(length=512), "category": category(choices=["day", "night"]) } sample_outputs = {"image": image(width=1024, height=1024)} @runway.command("sample", inputs=sample_inputs, outputs=sample_outputs) def sample(model, inputs): # The parameters passed to a function decorated by @runway.command() are: # 1. The return value of a function wrapped by @runway.setup(), usually a model # 2. The inputs sent with the HTTP request to the /<command_name> endpoint, # as defined by the inputs keyword argument delivered to @runway.command(). img = model.sample(z=inputs["z"], category=inputs["category"]) return {"image": img}
vgg.to(device) decoder.to(device) content_tf = test_transform(0, False) style_tf = test_transform(0, False) return { 'vgg': vgg, 'decoder': decoder, 'content_tf': content_tf, 'style_tf': style_tf, } @runway.command(name='generate', inputs={ 'content_image': image(description='Content Image'), 'style_image': image(description='Style Image'), 'preserve_color': boolean(description='Preserve content image color'), 'alpha': number(description='Controls the degree of stylization', min=0, max=1, step=0.01, default=1) }, outputs={ 'image': image(description='Output image') }) def generate(model, args): content_image = args['content_image'].convert('RGB') style_image = args['style_image'].convert('RGB') preserve_color = args['preserve_color'] alpha = args['alpha'] print('[GENERATE] Ran with preserve_color "{}". alpha "{}"'.format(preserve_color, alpha)) vgg = model['vgg'] decoder = model['decoder']
max=1.0, step=.1, description='Minimum scaling amount'), 'transform_max': number(default=0.5, min=0.0, max=1.0, step=.1, description='Maximum scaling amount') } @runway.command( name='generate', inputs=input_options, outputs={'image': image()}, description= 'Use Lucid to visualize the layers and neurons of a specific ML network.') def generate(model, args): print('[GENERATE] Ran with layer {} and neuron {}'.format( args['layer'], args['neuron'])) layer_id = args['layer'].split(' ')[0] layer_neuron = '{}:{}'.format(layer_id, args['neuron']) s = int(args['size']) min_scale = args['transform_min'] max_scale = args['transform_max'] scale_offset = (max_scale - min_scale) * 10 # https://github.com/tensorflow/lucid/issues/148
# Copyright (c) 2021 Justin Pinkney import runway from runway.data_types import category, vector, image, number import editor import face_detection edit_controls = {k: number(description=k, default=0, min=-20, max=20) for k in editor.edits.keys()} inputs = {'original': image()} inputs.update(edit_controls) outputs = { 'image': image() } @runway.setup(options={ 'checkpoint': runway.file(extension='.pt', default="psp_ffhq_encode.pt"), 'face_detector': runway.file(extension='.dat', default="shape_predictor_5_face_landmarks.dat"), }) def setup(opts): checkpoint_path = opts['checkpoint'] face_detection.MODEL_PATH = opts['face_detector'] encoder, decoder, latent_avg = editor.load_model(checkpoint_path) manipulator = editor.manipulate_model(decoder) manipulator.edits = {editor.idx_dict[v[0]]: {v[1]: 0} for k, v in editor.edits.items()} return encoder, decoder, latent_avg, manipulator @runway.command('encode', inputs=inputs, outputs=outputs, description='Generate an image.')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) preprocess = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) @runway.setup def setup(): return models.squeezenet1_1(pretrained=True) @runway.command('classify', inputs={'photo': image()}, outputs={'label': text()}) def classify(model, input): img = input['photo'] img_tensor = preprocess(img) img_tensor.unsqueeze_(0) img_variable = Variable(img_tensor) fc_out = model(img_variable) label = labels[str(fc_out.data.numpy().argmax())] return {'label': label} if __name__ == '__main__': runway.run()
p_pro = GIFSmoothing(r=35, eps=0.001) else: from photo_smooth import Propagator p_pro = Propagator() if torch.cuda.is_available(): p_wct.cuda(0) return { 'p_wct': p_wct, 'p_pro': p_pro, } @runway.command(name='generate', inputs={ 'content': image(), 'style': image() }, outputs={'image': image()}) def generate(model, args): p_wct = model['p_wct'] p_pro = model['p_pro'] # TODO: Use image directly instead of saving to path content_image_path = '/tmp/content.png' style_image_path = '/tmp/style.png' args['content'].save(content_image_path, 'PNG') args['style'].save(style_image_path, 'PNG') output_image_path = '/tmp/output.png' process_stylization.stylization(
} @runway.setup(options=setup_options) def setup(opts): msg = '[SETUP] Ran with options: seed = {}, truncation = {}' print(msg.format(opts['seed'], opts['truncation'])) model = FaceTracker(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command(name='generate', inputs={'input': image()}, outputs={ 'ids': array(item_type=number), 'boxes': array(image_bounding_box) }, description='Sends face ids found.') def generate(model, args): faces = model.process(args['input']) return { 'ids': [f["index"] for f in faces], 'boxes': [f["location"] for f in faces] } if __name__ == '__main__': # run the model server using the default network interface and ports,
mdl = Pix2Pix256RGBA() return mdl # Generate Image ######################################## def generate_from_PIL_img(mdl, img_pil): img_ten_in = ImgUtil.imgpil_to_imgten(img_pil) # converts a PIL image to a normalized tensor of dimension [1,sz,sz,3] img_ten_out = mdl.generator(img_ten_in, training=True) return( ImgUtil.imgten_to_imgpil(img_ten_out) ) # converts a normalized tensor of dimension [1,sz,sz,3] to a PIL image generate_command_inputs = { 'image_in': image(width=256, height=256) } generate_command_outputs = { 'image_out': image(width=256, height=256, channels=4) } @runway.command(name='generate', inputs=generate_command_inputs, outputs=generate_command_outputs, description='this thing does a thing') def generate(model, args): print('[GENERATE]\n image_in: "{}"'.format(args['image_in'])) output_image = args['image_in'] size_in = args['image_in'].size if model.generator: with torch.no_grad():
labels = json.load(open('labels.json')) normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) preprocess = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) @runway.setup def setup(): return models.squeezenet1_1(pretrained=True) @runway.command('classify', inputs={'photo': image() }, outputs={'label': text() }) def classify(model, inputs): img = inputs['photo'] img_tensor = preprocess(img) img_tensor.unsqueeze_(0) img_variable = Variable(img_tensor) fc_out = model(img_variable) label = labels[str(fc_out.data.numpy().argmax())] return {'label': label} if __name__ == '__main__': runway.run()
# Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of # supported configs. The setup function should return the model ready to be # used. setup_options = {'checkpoint': runway.file(extension='.h5')} @runway.setup(options=setup_options) def setup(opts): model = Pix2Pix(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command(name='generate', inputs={ 'input_image': image() }, outputs={ 'output_image': image() }, description='Generates a predicted image based on the given input image.') def generate(model, args): # Generate an output image based on the input image, and return it output_image = model.run_on_input(args['input_image']) return { 'output_image': output_image } if __name__ == '__main__': # run the model server using the default network interface and ports, # displayed here for convenience port=8000 print(f"Running on port {port}..")
from runway.data_types import number, text, image from cartoonize_model import CartoonizeModel setup_options = { 'model_path': text(description='Model path. Empty string = default model.'), } @runway.setup(options=setup_options) def setup(opts): model = CartoonizeModel({ 'model_path': opts['model_path'] or 'WBCartoonization/test_code/saved_models' }) return model @runway.command(name='cartoonize', inputs={ 'image': image(), 'resize': number(default=50, min=0, max=100, step=1) }, outputs={ 'image': image() }, description='Cartoonize.') def cartoonize(model, args): output_image = model.cartoonize(args['image'], { 'resize': args['resize'] / 100 }) return { 'image': output_image } if __name__ == '__main__':
import runway from runway.data_types import image inputs = {"image": image(width=512, height=512)} outputs = {"image": image(width=512, height=512)} @runway.command("style_transfer", inputs=inputs, outputs=outputs) def style_transfer(result_of_setup, args): # perform some transformation to the image, and then return it as a # PIL image or numpy array img = do_style_transfer(args["image"]) return {"image": img} runway.run() # curl -H "content-type: application/json" -d '{ "image": "data:image/jpeg;base64,/9j/4AAQSkZJRgA..." }' http://localhost:9000/batch_process
"inception4b": model.inception4b, "inception4c": model.inception4c, "inception4d": model.inception4d, "inception4e": model.inception4e, "inception5a": model.inception5a, "inception5b": model.inception5b, } config = { "image_path": None, "layers": [model.inception4b], "custom_func": [None] } input_dict = { "image": image(), "octave_scale": number(step=0.05, min=1.0, max=1.7, default=1.2), "num_octaves": number(step=1, min=1, max=25, default=5), "iterations": number(step=1, min=1, max=100, default=14), "lr": number(step=1e-4, min=1e-9, max=1e-1, default=0.05), "max_rotation": number(step=0.1, min=0.0, max=1.5, default=0.9), "layer_index": number(step=1, min=0, max=len(layers), default=0), "channel_index": number(step=1, min=-1, max=511, default=0), "invert_mask": boolean(default=False) } @runway.setup def setup(): dreamy_boi = dreamer(model) return dreamy_boi
runway.category(description="Pretrained checkpoints to use.", choices=['skip'], default='skip') }) def setup(opts): msg = '[SETUP] Running Model' print(msg) model = matcap_model.get_generator() return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command(name='generate', inputs={'patch': image(width=16, height=16)}, outputs={'image': image(width=16, height=16)}) def generate(model, args): # Generate a PIL or Numpy image based on the input caption, and return it img = args['patch'] img_tensor = torch.tensor(numpy.array(img)).float() / 256.0 img = img_tensor hacky_workaround = [] hacky_workaround.append(img_tensor) for i in range(99): hacky_workaround.append(torch.zeros(16, 16, 3) + 0.2) noise = torch.stack(hacky_workaround).float() noise = noise.unsqueeze(0).view(100, -1).float() input_image = Image.open("./input.png")
def sample(model, img): # Ensure imaize is under 1024 if img.size[0] > 1024 or img.size[1] > 1024: img.thumbnail((1024, 1024)) return model.run(np.array(img)).convert("L") @runway.setup(options={"onnx": number(default=0)}) def setup(opts): import basnet return basnet @runway.command(name='paste', inputs={'image': image}, outputs={'image': image(channels=4)}) def paste(model, inputs): start = time.time() logging.info('generating mask...') img = inputs['image'] if img.size[0] > 1024 or img.size[1] > 1024: img.thumbnail((1024, 1024)) mask = sample(model, img) logging.info(' > compositing final image...') ref = inputs['image'] empty = Image.new("RGBA", ref.size, 0) res = Image.composite(ref, empty, mask.resize(ref.size)) # Print stats logging.info(f'Completed in {time.time() - start:.2f}s') return res
for softmax in probabilities ] return { 'faces': runway_faces, 'probabilities': probabilities, 'most_likely_classes': most_likely_classes } classify_description = ("Classify give face image returning probabilities and " "most likely class\n Probabilities correspond to:" f" {', '.join(emotion_classifier.EMOTIONS)}") @runway.command('classify', inputs={'image': image(description="Cropped face image")}, outputs={ 'probabilities': vector(length=len(emotion_classifier.EMOTIONS), description="Probabilities of corresponding " "face being each possible class"), 'most_likely_class': text(description="Most likely class of face") }, description=classify_description) def classify(model, inputs): """Classify given face image. Returns probabilities of face's emotion classification and most likely class.
def setup(opts): msg = '[SETUP] Ran with options: mode = {}, seed = {}, res = {}' print(msg.format(opts['mode'], opts['seed'], opts['resolution'])) res = opts['resolution'] model = CPPNModel(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html # sample_inputs = {'z': vector(length=3)} sample_inputs = {'z1': number(min=-5., max=5., step=0.01, default=.125), 'z2': number(min=-5., max=5., step=0.01, default=2.125), 'scale': number(min=0.01, max=10, step=0.01, default=1.500)} sample_outputs = {'image': image(width=res, height=res)} @runway.command(name='generate', inputs=sample_inputs, outputs=sample_outputs,) def generate(model, inputs): # print('[GENERATE] Ran with caption value "{}"'.format(args['caption'])) # Generate a PIL or Numpy image based on the input caption, and return it output_image = model.run_on_input([inputs['z1'], inputs['z2'], inputs['scale']]) return {'image': output_image} if __name__ == '__main__': # run the model server using the default network interface and ports, # displayed here for convenience runway.run(host='0.0.0.0', port=8000)
from PIL import Image # Setup the model, initialize weights, set the configs of the model, etc. # Every model will have a different set of configurations and requirements. # Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of # supported configs. The setup function should return the model ready to be # used. @runway.setup(options={'models': runway.file(extension='.zip')}) def setup(opts): model = Paint_MODEL(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command(name='paint', inputs={'input_image': image(width=1024, height=1024)}, outputs={'output_image': image(width=1024, height=1024)}) def classify(model, args): # Generate a PIL or Numpy image based on the input caption, and return it output_image = model.paint(args['input_image']) return {'output_image': output_image} if __name__ == '__main__': # run the model server using the default network interface and ports, # displayed here for convenience runway.run(host='0.0.0.0', port=8000)
@runway.setup(options={'checkpoint': runway.file(extension='.jpg')}) def setup(opts): checkpoint_path = opts['checkpoint'] model = load_model_from_checkpoint(checkpoint_path) msg = '[SETUP] Ran with options: seed = {}, truncation = {}' print(msg.format(opts['seed'], opts['truncation'])) model = ExampleModel(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command( name='generate', inputs={'image': image()}, outputs={'image': image(width=1200, height=1200)}, description='Generates a red square when the input text input is "red".') def generate(model, args): print('[GENERATE] Ran with caption value "{}"'.format(args['caption'])) # Generate a PIL or Numpy image based on the input caption, and return it output_image = runway.file(args['caption']) return {'image': runway.file(args['caption'])} if __name__ == '__main__': # run the model server using the default network interface and ports, # displayed here for convenience runway.run(host='0.0.0.0', port=8000) ## Now that the model is running, open a new terminal and give it a command to
import runway from runway.data_types import category, vector, image from your_code import model @runway.setup def setup(): return model() sample_inputs= { "z": vector(length=512, description="The seed used to generate an output image."), "category": category(choices=["day", "night"]) } sample_outputs = { "image": image(width=1024, height=1024) } # Descriptions are used to document each data type and `@runway.command()`; Their values appear # in the app as tooltips. Writing detailed descriptions for each model option goes a long way # towards helping users learn how to interact with your model. Write descriptions as full # sentences. sample_description = "Generate a new image based on a z-vector and an output style: Day or night." @runway.command("sample", inputs=sample_inputs, outputs=sample_outputs, description=sample_description) def sample(model, inputs): # The parameters passed to a function decorated by @runway.command() are: # 1. The return value of a function wrapped by @runway.setup(), usually a model. # 2. The inputs sent with the HTTP request to the /<command_name> endpoint, # as defined by the inputs keyword argument delivered to @runway.command().
# supported configs. The setup function should return the model ready to be # used. setup_options = { 'threshold': number(min=0, max=1, step=0.1, default=0.3), 'checkpoint': runway.file(extension='.pth'), 'mode': category(choices=["mask_only", "box_only", "both"], default="both"), } @runway.setup(options=setup_options) def setup(opts): model = YOLACT_MODEL(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command(name='detect', inputs={ 'input_image': image(width=550, height=550) }, outputs={ 'output_image': image(width=550, height=550) }) def detect(model, args): # Generate a PIL or Numpy image based on the input caption, and return it output_image = model.detect(args['input_image']) return { 'output_image': output_image } if __name__ == '__main__': # run the model server using the default network interface and ports, # displayed here for convenience runway.run(host='0.0.0.0', port=8000)
@runway.setup(options=setup_options) def setup(opts): msg = '[SETUP] Ran with options: seed = {}, truncation = {}' print(msg.format(opts['seed'], opts['truncation'])) model = ExampleModel(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command( name='generate', inputs={'caption': text()}, outputs={'image': image(width=512, height=512)}, description='Generates a red square when the input text input is "red".') def generate(model, args): print('[GENERATE] Ran with caption value "{}"'.format(args['caption'])) # Generate a PIL or Numpy image based on the input caption, and return it output_image = model.run_on_input(args['caption']) return {'image': output_image} @runway.command(name='add', inputs={ 'x': number('', 10, 10, 0, 200), 'y': number('', 40, 10, 0, 200), }, outputs={'addition': number()}, description='Adds two numbers.')
@runway.setup(options=setup_options) def setup(opts): model = DeblurHelper(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html COUNT = 0 @runway.command( name='generate', inputs={'input': image(width=256, height=256)}, outputs={'image': image(width=256, height=256)}, description='Generates a red square when the input text input is "red".') def generate(model, args): print('[GENERATE] Ran a new image "{}"'.format(time.time())) # Generate deblurred image output_image = model.run_on_input(args['input']) return {'image': output_image} if __name__ == '__main__': # run the model server using the default network interface and ports, # displayed here for convenience runway.run(host='0.0.0.0', port=8000)
import runway from runway.data_types import image from image_composition import Image_composition @runway.setup(options={}) def setup(opts): model = Image_composition() return model @runway.command(name='composite', inputs={ 'background': image(channels=4), 'foreground': image(channels=4) }, outputs={'composition': image(channels=3)}, description='Generates a composition of the two input images') def composite(model, args): output_image = model.run_on_input(args['background'], args['foreground']) return {'composition': output_image} if __name__ == '__main__': runway.run(host='0.0.0.0', port=9001)
# used. setup_options = { 'truncation': number(min=5, max=100, step=1, default=10), 'seed': number(min=0, max=1000000) } @runway.setup(options=setup_options) def setup(opts): msg = '[SETUP] Ran with options: seed = {}, truncation = {}' print(msg.format(opts['seed'], opts['truncation'])) model = ExampleModel(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command(name='classify', inputs={'image': image(width=224, height=224)}, outputs={'class_name': text()}) def classify(model, args): # Generate a PIL or Numpy image based on the input caption, and return it class_name = model.classify(args['image']) return {'class_name': class_name} if __name__ == '__main__': # run the model server using the default network interface and ports, # displayed here for convenience runway.run(host='0.0.0.0', port=8000)
import runway from runway.data_types import image from add_mask import Add_mask @runway.setup(options={}) def setup(opts): model = Add_mask() return model @runway.command(name='generate', inputs={ 'image': image(channels=3), 'mask': image(channels=4) }, outputs={'masked_image': image(channels=4)}, description='Add an alpha mask to an image') def generate(model, args): output_image = model.run_on_input(args['image'], args['mask']) return {'masked_image': output_image} if __name__ == '__main__': runway.run(host='0.0.0.0', port=9001)
'--name', 'pretrained', '--model', 'test', '--no_dropout']).parse() opt.num_threads = 0 # test code only supports num_threads = 1 opt.batch_size = 1 # test code only supports batch_size = 1 opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. opt.no_flip = True # no flip; comment this line if results on flipped images are needed. opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. opt.preprocess = 'none' # Don't resize to a square model = create_model(opt) model.setup(opt) return {'model': model, 'opt': opt} @runway.command(name='generate', inputs={ 'image': image(description='Input image') }, outputs={ 'image': image(description='Output image') }) def generate(model, args): opt = model['opt'] model = model['model'] orig_image = args['image'].convert('RGB') orig_size = orig_image.size input_nc = opt.output_nc if opt.direction == 'BtoA' else opt.input_nc transform = get_transform(opt, grayscale=(input_nc == 1)) A = transform(orig_image) input_obj = {'A': A.unsqueeze(0), 'A_paths': ''} model.set_input(input_obj) model.test() visuals = model.get_current_visuals()