コード例 #1
0
    content_tf = test_transform(0, False)
    style_tf = test_transform(0, False)
    return {
      'vgg': vgg,
      'decoder': decoder,
      'content_tf': content_tf,
      'style_tf': style_tf,
    }


@runway.command(name='generate',
                inputs={
                  'content_image': image(description='Content Image'),
                  'style_image': image(description='Style Image'),
                  'preserve_color': boolean(description='Preserve content image color'),
                  'alpha': number(description='Controls the degree of stylization',
                                                 min=0, max=1, step=0.01, default=1)
                },
                outputs={ 'image': image(description='Output image') })
def generate(model, args):
    content_image = args['content_image'].convert('RGB')
    style_image = args['style_image'].convert('RGB')
    preserve_color = args['preserve_color']
    alpha = args['alpha']
    print('[GENERATE] Ran with preserve_color "{}". alpha "{}"'.format(preserve_color, alpha))

    vgg = model['vgg']
    decoder = model['decoder']
    content_tf = model['content_tf']
    style_tf = model['style_tf']
コード例 #2
0
# =========================================================================

# Import the Runway SDK. Please install it first with
# `pip install runway-python`.
import runway
from runway.data_types import number, text, image, boolean
from deblur2runway import DeblurHelper
import time
# Setup the model, initialize weights, set the configs of the model, etc.
# Every model will have a different set of configurations and requirements.
# Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of
# supported configs. The setup function should return the model ready to be
# used.
setup_options = {
    'use_single_gpu': boolean(default=False),
}


@runway.setup(options=setup_options)
def setup(opts):
    model = DeblurHelper(opts)
    return model


# Every model needs to have at least one command. Every command allows to send
# inputs and process outputs. To see a complete list of supported inputs and
# outputs data types: https://sdk.runwayml.com/en/latest/data_types.html
COUNT = 0

コード例 #3
0
    category(choices=[
        "conv2d0 (max:64)", "maxpool0 (max:64)", "conv2d1 (max:64)",
        "conv2d2 (max:192)", "maxpool1 (max:192)", "mixed3a (max:256)",
        "mixed3b (max:480)", "maxpool4 (max:480)", "mixed4a (max:508)",
        "mixed4b (max:512)", "mixed4c (max:512)", "mixed4d (max:528)",
        "mixed4e (max:832)", "maxpool10 (max:832)", "mixed5a (max:832)",
        "mixed5b (max:1024)"
    ],
             default="mixed5b (max:1024)",
             description='choose layer of network to visualize'),
    'neuron':
    number(default=0, min=0, max=1023, step=1, description='Neuron ID'),
    'size':
    number(default=128, min=128, max=1024, step=128, description='Image Size'),
    'transforms':
    boolean(default=False, description='Vary size of visualization'),
    'transform_min':
    number(default=0.3,
           min=0.0,
           max=1.0,
           step=.1,
           description='Minimum scaling amount'),
    'transform_max':
    number(default=0.5,
           min=0.0,
           max=1.0,
           step=.1,
           description='Maximum scaling amount')
}

config = {
    "image_path": None,
    "layers": [model.inception4b],
    "custom_func": [None]
}

input_dict = {
    "image": image(),
    "octave_scale": number(step=0.05, min=1.0, max=1.7, default=1.2),
    "num_octaves": number(step=1, min=1, max=25, default=5),
    "iterations": number(step=1, min=1, max=100, default=14),
    "lr": number(step=1e-4, min=1e-9, max=1e-1, default=0.05),
    "max_rotation": number(step=0.1, min=0.0, max=1.5, default=0.9),
    "layer_index": number(step=1, min=0, max=len(layers), default=0),
    "channel_index": number(step=1, min=-1, max=511, default=0),
    "invert_mask": boolean(default=False)
}


@runway.setup
def setup():
    dreamy_boi = dreamer(model)
    return dreamy_boi


@runway.command(name="generate", inputs=input_dict, outputs={"image": image()})
def generate(dreamy_boi, input):

    image_np = preprocess_numpy_img(
        np.array(input["image"]).astype(np.float32) / 255.0)
    """
コード例 #5
0
#setup_options = {
#    'seed': number(min=0, max=10000, step=1, default=101, description='Seed for the random number generator.'),
#}
#@runway.setup(options=setup_options)
def setup():
    model = NeuralStyle()
    return model


input_list = {
    'content_image':
    image,
    'style_image_1':
    image,
    'original_colors':
    boolean(default=False),
    'style_only':
    boolean(default=False),
    'max_iterations':
    number(min=50, max=1500, step=50, default=500, description='Iterations'),
    'content_layer':
    category(choices=[
        'conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2',
        'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'conv5_1', 'conv5_2'
    ],
             default='conv4_2',
             description='what VGG19 layer to use for content'),
    'style_scale':
    number(min=0.1,
           max=2.0,
           step=.05,
コード例 #6
0
    print('[SETUP] loading model...')
    model = UniSentenceEncXling()
    print('[SETUP] done.')
    return model


desc = """\
Infers embeddings for input. Returns an array with the lines of
text and an array with the corresponding embeddings for each line.
"""


@runway.command(name='embed',
                inputs={
                    'text': text(),
                    'tokenize_sentences': boolean(default=True)
                },
                outputs={
                    'sentences': array(item_type=text),
                    'embeddings': array(item_type=vector(length=512))
                },
                description=desc)
def embed(model, args):
    if args['tokenize_sentences']:
        sentences = sent_tokenize(args['text'])
    else:
        sentences = args['text'].split("\n")
    print('[EMBED] Embedding {} sentences'.format(len(sentences)))
    results = model.embed(sentences)
    return {'sentences': sentences, 'embeddings': results}
コード例 #7
0
        number(default=24, min=1, max=120, step=1),
        'length_sec':
        number(default=2,
               min=1,
               max=10,
               step=0.2,
               description='Output video length in seconds.'),
        'effect_type':
        category(choices=effect_types,
                 default=effect_types[1],
                 description='Video effect.'),
        'effect_size':
        number(default=0.5, min=0, max=1, step=0.1),
        'reuse':
        boolean(
            default=False,
            description='Reuse depth map and continue from previous iteration.'
        ),
    },
    outputs={'image': image()},
    description='Cartoonize.')
def paint(model, args):
    x_shift = 0
    y_shift = 0
    z_shift = 0
    traj_type = 'double-straight-line'
    effect_type = args['effect_type']
    effect_size = args['effect_size'] * 2

    if effect_type == 'dolly-zoom-in':
        x_shift = 0.00
        y_shift = 0.00
コード例 #8
0
    "image_path": None,
    "layers": [model.inception4b],
    "custom_func": [None]
}

input_dict = {
    "image": image(),
    "octave_scale": number(step=0.05, min=1.0, max=1.7, default=1.2),
    "num_octaves": number(step=1, min=1, max=25, default=5),
    "iterations": number(step=1, min=1, max=100, default=14),
    "lr": number(step=1e-4, min=1e-9, max=1e-1, default=0.05),
    "max_rotation": number(step=0.1, min=0.0, max=1.5, default=0.9)
}

for key in list(layers.keys()):
    input_dict[key] = boolean(default=False)


@runway.setup
def setup():
    dreamy_boi = dreamer(model)
    return dreamy_boi


@runway.command(name="generate", inputs=input_dict, outputs={"image": image()})
def generate(dreamy_boi, input):

    image_np = preprocess_numpy_img(
        np.array(input["image"]).astype(np.float32) / 255.0)
    """
    generate mask