Beispiel #1
0
import runway
from run import restore
from PIL import Image


@runway.command(
    'translate',
    inputs={
        'source_imgs':
        runway.image(description='input image to be translated'),
        'scratch_remove':
        runway.boolean(default=False, description='remove scratches'),
    },
    outputs={'image': runway.image(description='out image after restoration')})
def translate(model, inputs):
    image = restore(inputs['source_imgs'], inputs['scratch_remove'])
    im = Image.open(open(image, 'rb'))
    return im


if __name__ == '__main__':
    runway.run(port=8889)
Beispiel #2
0
import runway
import cv2
import torch
import numpy as np
import download_checkpoint
from PIL import Image


@runway.command(
    'translate',
    inputs={
        'source_imgs':
        runway.image(description='input image to be translated'),
        'large': runway.boolean(default=True, description='use large model'),
    },
    outputs={
        'image':
        runway.image(
            description='output image containing the translated result')
    })
def translate(midas, inputs):
    cv_image = np.array(inputs['source_imgs'])
    img = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
    if inputs['large']:
        transform = download_checkpoint.transforml
        input_batch = transform(img)
        midas = download_checkpoint.midasl
    else:
        transform = download_checkpoint.transformS
        input_batch = transform(img)
        midas = download_checkpoint.midasS
@runway.setup(options={'checkpoint': runway.file(extension='.pth')})
def setup(opts):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    net = _load_model(MODEL, opts['checkpoint'])
    return Predictor(net, device)


inputs = {
    'composite_image':
    runway.image,
    'foreground_mask':
    runway.image,
    'transfer_color':
    runway.boolean(
        default=True,
        description=
        "Transfer colors back to source image for high-resolution output"),
    'transfer_resolution':
    runway.number(default=512,
                  min=256,
                  max=1024,
                  step=128,
                  description="Which resolution to transfer colors from")
}

outputs = {'harmonized_image': runway.image}


@runway.command('harmonize', inputs=inputs, outputs=outputs)
def harmonize(model, inputs):
Beispiel #4
0

def generate_image(generator, latent_vector):
    latent_vector = latent_vector.reshape((1, 18, 512))
    generator.set_dlatents(latent_vector)
    img_array = generator.generate_images()[0]
    img = PIL.Image.fromarray(img_array, 'RGB')
    return img.resize((512, 512))


# ENCODING

generate_inputs_1 = {
    'portrait': runway.image(),
    'iterations': runway.number(min=1, max=5000, default=10, step=1.0),
    'encode': runway.boolean(default=False)
}
generate_outputs_1 = {'image': runway.image(width=512, height=512)}

encodeCount = 0
latent_vectors = []
# latent_vectors.append(preLoad1)
# latent_vectors.append(preLoad2)
# latent_vectors.append(preLoad1)
# latent_vectors.append(preLoad2)


@runway.command('encode', inputs=generate_inputs_1, outputs=generate_outputs_1)
def find_in_space(model, inputs):
    global generated_dlatents
    global prevIterations
import runway
import numpy as np
import argparse
import torch
from process_order import draw
import os 
from PIL import Image
import shutil

@runway.command('translate', inputs={'source_imgs': runway.image(description='input image to be translated'), runway.boolean(default=True),}, outputs={'image': runway.image(description='output image containing the translated result')})
def translate(learn, inputs):
    os.makedirs('images', exist_ok=True)
    inputs['source_imgs'].save('images/temp.jpg')
    paths = os.path.join('images','temp.jpg')
    draw(paths)
    pathout = "./output/temp/result.jpg"
    img = Image.open(open(pathout, 'rb'))
    shutil.rmtree('./output/')
    return img



if __name__ == '__main__':
    runway.run(port=8889)