Esempio n. 1
0
        PARAMS = np.concatenate([PARAMS, v], axis=1)
        CANVAS_tmp = pt._render(PARAMS, save_jpgs=False, save_video=False)
        CANVAS_tmp = utils.img2patches(CANVAS_tmp, pt.m_grid + 1,
                                       pt.net_G.out_size).to(device)

    pt._save_stroke_params(PARAMS)
    final_rendered_image = pt._render(PARAMS, save_jpgs=False, save_video=True)

    return final_rendered_image


@runway.command(
    "translate",
    inputs={
        "source_imgs":
        runway.image(description="input image to be translated"),
        "Strokes":
        runway.number(min=100,
                      max=700,
                      default=100,
                      description="number of strokes"),
    },
    outputs={
        "image":
        runway.image(
            description="output image containing the translated result")
    },
)
def translate(learn, inputs):
    os.makedirs("images", exist_ok=True)
    inputs["source_imgs"].save("images/temp.jpg")
Esempio n. 2
0
# def setup(opts):
# 	# Initialize generator and perceptual model
# 	tflib.init_tf()
# 	model = opts['checkpoint']
# 	print("open model %s" % model)
# 	with open(model, 'rb') as file:
# 		G, D, Gs = pickle.load(file)
# 	Gs.print_layers()
# 	global generator
# 	generator = Generator(Gs, batch_size=1, randomize_noise=False)
# 	perceptual_model = PerceptualModel(256, layer=9, batch_size=1)
# 	perceptual_model.build_perceptual_model(generator.generated_image)
# 	return Gs

generate_inputs = {
    'portrait': runway.image(),
    'iterations': runway.number(min=1, max=5000, default=10, step=1.0),
    'age': runway.number(min=-30, max=20, default=4, step=0.2)
}
# generate_outputs = {
# 	'latent_vector': runway.file()
# }

generate_outputs = {
    #	'generated': runway.image(width=512, height=512)
    # 'vector': runway.vector(length=512)
    "hextext": runway.text
}


@runway.command('encode', inputs=generate_inputs, outputs=generate_outputs)

@runway.setup(options={'checkpoint': runway.file(extension='.pkl')})
def setup(opts):
    tflib.init_tf()
    with open(opts['checkpoint'], 'rb') as file:
        _, _, Gs = pickle.load(file, encoding='latin1')
    return Gs


#@runway.command('project', inputs=project_inputs, outputs={'images': runway.array(item_type=runway.image, max_length=10)})
@runway.command('project',
                inputs={
                    'projectionImage':
                    runway.image(min_width=1024,
                                 min_height=1024,
                                 max_width=1024,
                                 max_height=1024),
                    'steps':
                    runway.number(min=10, max=1000, default=200)
                },
                outputs={'image': runway.image})
def project(model, inputs):
    im = inputs['projectionImage']
    if not os.path.exists('./projection'):
        os.mkdir('./projection')
    if not os.path.exists('./projection/imgs'):
        os.mkdir('./projection/imgs')
    if not os.path.exists('./projection/records'):
        os.mkdir('./projection/records')
    if not os.path.exists('./projection/out'):
        os.mkdir('./projection/out')
Esempio n. 4
0
                                       layer=9,
                                       batch_size=1)
    perceptual_model.build_perceptual_model(generator.generated_image)
    return Gs


def generate_image(generator, latent_vector):
    latent_vector = latent_vector.reshape((1, 18, 512))
    generator.set_dlatents(latent_vector)
    img_array = generator.generate_images()[0]
    img = PIL.Image.fromarray(img_array, 'RGB')
    return img.resize((512, 512))


generate_inputs = {
    'portrait': runway.image(),
    'iterations': runway.number(min=1, max=5000, default=10, step=1.0),
    'age': runway.number(min=-30, max=20, default=4, step=0.2)
}

# generate_outputs = {
# 	'latent_vector': runway.file()
# }

generate_outputs = {
    'generated': runway.image(width=512, height=512)
    # 'vector': runway.vector(length=512)
    # "hextext": runway.text
}

Esempio n. 5
0

@runway.setup(options={})
def setup(opts):
    use_gpu = True if torch.cuda.is_available() else False
    # Load the model from the Pytorch Hub
    model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub',
                           'DCGAN',
                           pretrained=True,
                           useGPU=use_gpu)
    return model


@runway.command('generate',
                inputs={'z': runway.vector(length=64, sampling_std=0.5)},
                outputs={'image': runway.image(width=64, height=64)})
def generate(model, inputs):
    # Generate ♾ infinite ♾ images
    z = inputs['z']
    latents = z.reshape((1, 64))
    latents = torch.from_numpy(latents)
    # Generate one image
    noise, _ = model.buildNoiseData(1)
    with torch.no_grad():
        generated_image = model.test(noise)
    generated_image = generated_image.clamp(min=-1, max=1)
    generated_image = ((generated_image + 1.0) * 255 / 2.0)
    # Now generated_image contains our generated image! 🌞
    # return generated_image[0].permute(1, 2, 0).numpy().astype(np.uint8)
    return {
        'image': generated_image[0].permute(1, 2,
Esempio n. 6
0
def inference(learner, input_arr):
    im = Image(pil2tensor(input_arr, np.float32).div(255))
    result = learner.predict(im)
    np_img = result[2].data.numpy()[1]
    return np_img


@runway.command('mask',
                inputs={
                    'image':
                    runway.image,
                    'threshold':
                    runway.number(default=0.5, min=0, max=1, step=0.001)
                },
                outputs={'image': runway.image(channels=4)})
def mask(learner, inputs):
    inp = inputs['image'].convert('RGB')
    original_size = inp.size
    inp_resized = inp.resize((512, 512))
    mask1 = inference(learner, inp_resized)
    mask2 = np.fliplr(inference(learner, np.fliplr(inp_resized)))
    mask = (mask1 + mask2) / 2
    mask[mask > inputs['threshold']] = 255
    mask = resize(mask, (original_size[1], original_size[0]),
                  anti_aliasing=False).astype(np.uint8)
    masked = np.concatenate((np.array(inp), np.expand_dims(mask, -1)), axis=2)
    return masked


if __name__ == '__main__':
    netM = ResnetConditionHR(input_nc=(3, 3, 1, 4),
                             output_nc=4,
                             n_blocks1=7,
                             n_blocks2=3)
    netM = nn.DataParallel(netM)
    checkpoint_path = opts['checkpoint']
    netM.load_state_dict(torch.load(checkpoint_path))
    netM.cuda()
    netM.eval()
    cudnn.benchmark = True
    return netM


inputs = {
    'input_subject':
    runway.image(description='An input image with the subject.'),
    'input_background':
    runway.image(
        description='The background of the input image without the subject.'),
    'input_segmentation':
    runway.image(description='Segmentation image of the input image'),
    'target_background':
    runway.image(description='Target background image'),
}


@runway.command('generate', inputs=inputs, outputs={'output': runway.image})
def generate(model, inputs):
    netM = model
    reso = (512, 512)  #input reoslution to the network
    # original input image
Esempio n. 8
0
# 	generator = Generator(Gs, batch_size=1, randomize_noise=False)
# 	return Gs

def generate_image(generator, latent_vector):
	latent_vector = latent_vector.reshape((1, 18, 512))
	generator.set_dlatents(latent_vector)
	img_array = generator.generate_images()[0]
	img = PIL.Image.fromarray(img_array, 'RGB')
	return img.resize((512, 512))   

generate_inputs = {
	'age': runway.number(min=-500, max=500, default=6, step=0.1),
}

generate_outputs = {
	'image': runway.image(width=512, height=512),
}

@runway.command('generat3r', inputs=generate_inputs, outputs=generate_outputs)
def move_and_show(model, inputs):	
	latent_vector = (latent_vector_1 + latent_vector_2) * 2
	# load direction
	age_direction = np.load('ffhq_dataset/latent_directions/age.npy')
	direction = age_direction
	# model = generator
	coeff = inputs['age']/5.0
	new_latent_vector = latent_vector.copy()
	new_latent_vector[:8] = (latent_vector + coeff*direction)[:8]
	image = (generate_image(model, new_latent_vector))
	#ax[i].set_title('Coeff: %0.1f' % coeff)
	#plt.show()
Esempio n. 9
0
    tflib.init_tf()
    model = opts['checkpoint']
    print("open model %s" % model)
    with open(model, 'rb') as file:
        G, D, Gs = pickle.load(file)
    Gs.print_layers()
    generator = Generator(Gs, batch_size=1, randomize_noise=False)
    perceptual_model = PerceptualModel(opts['image dimensions'],
                                       layer=9,
                                       batch_size=1)
    perceptual_model.build_perceptual_model(generator.generated_image)
    return Gs


generate_inputs = {
    'portrait': runway.image(),
    'iterations': runway.number(min=1, max=5000, default=10, step=1.0)
}

# generate_outputs = {
# 	'latent_vector': runway.file()
# }

generate_outputs = {'generated': runway.image(width=512, height=512)}


@runway.command('encode', inputs=generate_inputs, outputs=generate_outputs)
def find_in_space(model, inputs):
    names = ["looking at you!"]
    perceptual_model.set_reference_images(inputs['portrait'])
    print("image loaded")
import runway
import numpy as np
import argparse
import torch
from process_order import draw
import os 
from PIL import Image
import shutil

@runway.command('translate', inputs={'source_imgs': runway.image(description='input image to be translated'), runway.boolean(default=True),}, outputs={'image': runway.image(description='output image containing the translated result')})
def translate(learn, inputs):
    os.makedirs('images', exist_ok=True)
    inputs['source_imgs'].save('images/temp.jpg')
    paths = os.path.join('images','temp.jpg')
    draw(paths)
    pathout = "./output/temp/result.jpg"
    img = Image.open(open(pathout, 'rb'))
    shutil.rmtree('./output/')
    return img



if __name__ == '__main__':
    runway.run(port=8889)
Esempio n. 11
0
# 	global generator
# 	generator = Generator(Gs, batch_size=1, randomize_noise=False)
# 	return Gs

def generate_image(generator, latent_vector):
	latent_vector = latent_vector.reshape((1, 18, 512))
	generator.set_dlatents(latent_vector)
	img_array = generator.generate_images()[0]
	img = PIL.Image.fromarray(img_array, 'RGB')
	return img.resize((512, 512))   


# ENCODING

generate_inputs_1 = {
	'portrait': runway.image(),
	'iterations': runway.number(min=1, max=5000, default=10, step=1.0)
}
generate_outputs_1 = {
	"text": runway.text
}

encodeCount = 0
latent_vectors = []

@runway.command('encode', inputs=generate_inputs_1, outputs=generate_outputs_1)
def find_in_space(model, inputs):
	global generated_dlatents
	global prevIterations
	s2 = "Did not encode."
	if (inputs['iterations'] != prevIterations):
Esempio n. 12
0
from PIL import Image
import shutil

os.chdir("./tool/")


def run_cmd(command):
    try:
        print(command)
        call(command, shell=True)
    except KeyboardInterrupt:
        print("Process interrupted")
        sys.exit(1)
    

@runway.command('removal', inputs={'source': runway.image(description='input image'), "mask": runway.image(description='mask image')}, outputs={'image': runway.image})
def removal(models, inputs):
  os.makedirs('../images', exist_ok=True)
  os.makedirs('../mask', exist_ok=True)

  inputs['source'].save('../images/00000.png')
  inputs['mask'].save('../mask/00000.png')
  
  


  stage_1_command = ("python video_completion.py"
            + " --mode object_removal"
            + " --path ../images"
            + " --path_mask ../mask"
            + " --outroot ../result/temp_removal"
Esempio n. 13
0
import runway
import numpy as np
import torch
from torchvision.transforms.functional import to_tensor, to_pil_image
from PIL import Image

model = torch.jit.load('model.pth').cuda().eval()

new_width = 1024
new_height = 1024


@runway.command(
    'translate',
    inputs={
        'front_imgs': runway.image(description='input image to be translated'),
        'back_imgs': runway.image(description='back image to be translated')
    },
    outputs={
        'image':
        runway.image(
            description='output image containing the translated result')
    })
def translate(learn, inputs):
    srcimg = inputs['front_imgs'].resize((new_width, new_height),
                                         Image.ANTIALIAS)
    bgrimg = inputs['back_imgs'].resize((new_width, new_height),
                                        Image.ANTIALIAS)
    src = to_tensor(srcimg).cuda().unsqueeze(0)
    bgr = to_tensor(bgrimg).cuda().unsqueeze(0)
    if src.size(2) <= 2048 and src.size(3) <= 2048:
Esempio n. 14
0
    global img_placeholder
    global preds
    global g
    h, w = 480, 640
    img_shape = (h, w, 3)
    batch_shape = (1,) + img_shape
    g = tf.get_default_graph()
    sess = tf.Session(graph=g)
    img_placeholder = tf.placeholder(tf.float32, shape=batch_shape, name='img_placeholder')
    preds = transform.net(img_placeholder)
    load_checkpoint(os.path.join(options['checkpoint_path'], 'fns.ckpt'), sess)
    return sess


@runway.command('stylize',
    inputs={'image': runway.image(channels=4), 'background_image': runway.image(channels=4)},
    outputs={'output': runway.image(channels=4)})
def stylize(sess, inp):
    img = inp['image']
    background_image = inp['background_image'].resize(img.size)
    alpha_mask = img.getchannel("A")
    img = img.convert('RGB')
    original_size = img.size
    img = np.array(img.resize((640, 480)))
    img = np.expand_dims(img, 0)
    with g.as_default():
        output = sess.run(preds, feed_dict={img_placeholder: img})
    output = np.clip(output[0], 0, 255).astype(np.uint8)
    output = Image.fromarray(output).resize(original_size)
    output.putalpha(alpha_mask)
    composite = Image.alpha_composite(background_image, output)
Esempio n. 15
0
# 	generator = Generator(Gs, batch_size=1, randomize_noise=False)
# 	return Gs


def generate_image(generator, latent_vector):
    latent_vector = latent_vector.reshape((1, 18, 512))
    generator.set_dlatents(latent_vector)
    img_array = generator.generate_images()[0]
    img = PIL.Image.fromarray(img_array, 'RGB')
    return img.resize((512, 512))


# ENCODING

generate_inputs_1 = {
    'portrait': runway.image(),
    'iterations': runway.number(min=1, max=5000, default=10, step=1.0),
    'encode': runway.boolean(default=False)
}
generate_outputs_1 = {'image': runway.image(width=512, height=512)}

encodeCount = 0
latent_vectors = []
# latent_vectors.append(preLoad1)
# latent_vectors.append(preLoad2)
# latent_vectors.append(preLoad1)
# latent_vectors.append(preLoad2)


@runway.command('encode', inputs=generate_inputs_1, outputs=generate_outputs_1)
def find_in_space(model, inputs):
Esempio n. 16
0
@runway.setup(options={})
def setup(opts):
    try:
        torch.set_grad_enabled(False)
        torch.backends.cudnn.enabled = True
        exec(open('./3d-ken-burns/common.py', 'r').read())
        exec(open('./3d-ken-burns/models/disparity-estimation.py', 'r').read())
        exec(open('./3d-ken-burns/models/disparity-adjustment.py', 'r').read())
        exec(open('./3d-ken-burns/models/disparity-refinement.py', 'r').read())
        exec(open('./3d-ken-burns/models/pointcloud-inpainting.py', 'r').read())
        return process_load
    except RunwayError as e:
        print(e.code, e.message)
        print(e.to_response())

processingInput = {'image': runway.image(description="photograph")}
processingOutput = {'video': runway.array(
    item_type = runway.file, 
    description = "3d effect video"
)}

@runway.command('process', inputs = processingInput, outputs = processingOutput)
def process(model, inputs):
    try:
        numpyImage = numpy.image(inputs['image'])
        intWidth = numpyImage.shape[1]
        intHeight = numpyImage.shape[0]
        dblRatio = float(intWidth) / float(intHeight)
        intWidth = min(int(1024 * dblRatio), 1024)
        intHeight = min(int(1024 / dblRatio), 1024)
        numpyImage = cv2.resize(src=numpyImage, dsize=(intWidth, intHeight), fx=0.0, fy=0.0, interpolation=cv2.INTER_AREA)
Esempio n. 17
0
        return parser


opt = Options().parse()


@runway.setup(options={'checkpoints_root': runway.file})
def setup(opts):
    opt.checkpoints_dir = os.path.join(opts['checkpoints_root'], 'checkpoints')
    model = Pix2PixModel(opt)
    model.eval()
    return model


@runway.command('convert',
                inputs={'input': runway.image(channels=1)},
                outputs={'output': runway.image})
def convert(model, inputs):
    img = np.array(inputs['input'])
    h, w = img.shape[0:2]
    img = Image.fromarray(img)
    params = get_params(opt, (w, h))
    transform_label = get_transform(opt,
                                    params,
                                    method=Image.NEAREST,
                                    normalize=False)
    label_tensor = transform_label(img).unsqueeze(0)
    data = {'label': label_tensor, 'instance': label_tensor, 'image': None}
    generated = model(data, mode='inference')
    output = util.tensor2im(generated[0])
    output = Image.fromarray(output)
Esempio n. 18
0
import runway
import numpy as np
import argparse
import torch
from torchvision import transforms
import os.path
from inference_img import imgint
from PIL import Image
import cv2

sample_inputs = {
    'source_imgs': runway.image(description='input image to be translated'),
    'target': runway.image(description='input image to be translated'),
    'amount': runway.number(min=0, max=16, default=0)
}


@runway.command(
    'translate',
    inputs=sample_inputs,
    outputs={
        'image':
        runway.image(
            description='output image containing the translated result')
    })
def translate(learn, inputs):
    listimg, h, w = imgint(inputs['source_imgs'], inputs['target'])
    i = inputs['amount']
    cvimg = (listimg[i][0] * 255).byte().cpu().numpy().transpose(1, 2,
                                                                 0)[:h, :w]
    img = cv2.cvtColor(cvimg, cv2.COLOR_BGR2RGB)
Esempio n. 19
0
import runway
from run import restore
from PIL import Image


@runway.command(
    'translate',
    inputs={
        'source_imgs':
        runway.image(description='input image to be translated'),
        'scratch_remove':
        runway.boolean(default=False, description='remove scratches'),
    },
    outputs={'image': runway.image(description='out image after restoration')})
def translate(model, inputs):
    image = restore(inputs['source_imgs'], inputs['scratch_remove'])
    im = Image.open(open(image, 'rb'))
    return im


if __name__ == '__main__':
    runway.run(port=8889)
Esempio n. 20
0
    return out



@runway.setup(options={'checkpoint': runway.file(extension='.pkl')})
def setup(opts):
    tflib.init_tf()
    with open(opts['checkpoint'], 'rb') as file:
        _, _, Gs = pickle.load(file, encoding='latin1')
    return Gs




#@runway.command('project', inputs=project_inputs, outputs={'images': runway.array(item_type=runway.image, max_length=10)})
@runway.command('project', inputs={'projectionImage': runway.image( min_width=1024, min_height=1024, max_width=1024, max_height=1024), 'steps': runway.number (min=10, max=1000, default=200)}, outputs={'image': runway.image})
def project(model, inputs):
    im = inputs['projectionImage']
    if not os.path.exists('./projection'):
        os.mkdir('./projection')
    if not os.path.exists('./projection/imgs'):
        os.mkdir ('./projection/imgs')
    if not os.path.exists('./projection/records'):
        os.mkdir('./projection/records')
    if not os.path.exists('./projection/out'):
        os.mkdir('./projection/out')

    if os.path.isfile('./projection/imgs/project.png'):
        os.remove('./projection/imgs/project.png')

    for f in os.listdir('./projection/records/'):
Esempio n. 21
0
@runway.setup(options={
    'checkpoint':
    runway.file(extension='.pkl', description='checkpoint file')
})
def setup(opts):
    path = Path(".")
    learn = load_learner(path, opts['checkpoint'])
    return learn


@runway.command(
    'translate',
    inputs={
        'source_imgs':
        runway.image(description='input image to be translated'),
    },
    outputs={
        'image':
        runway.image(
            description='output image containing the translated result')
    })
def translate(learn, inputs):
    img_t = T.ToTensor()(inputs['source_imgs'])
    img_fast = Image(img_t)
    p, img_hr, b = learn.predict(img_fast)
    return np.uint8(np.clip(image2np(img_hr), 0, 1) * 255)


if __name__ == '__main__':
    runway.run(port=8889)
Esempio n. 22
0
lineCategory = runway.category(
    description="Number of lines of characters. Improves accurary.",
    choices=['single', 'multiple'],
    default='multiple')


@runway.setup(options={'lines': lineCategory})
def setup(opts):
    model = CnOcr()
    return model.ocr if opts[
        'lines'] == 'multiple' else model.ocr_for_single_line


classifyInput = {
    'image': runway.image(description="Image with Chinese characters.")
}
classifyOutput = {
    'characters':
    runway.array(item_type=runway.array(item_type=runway.text),
                 description="2D character array.")
}


@runway.command('classify', inputs=classifyInput, outputs=classifyOutput)
def classify(model, inputs):
    image = numpy.array(inputs['image'])
    res = model(image)
    return {'characters': res}

Esempio n. 23
0
    print('Loading model...')
    custom_objects = {
        'BilinearUpSampling2D': BilinearUpSampling2D,
        'depth_loss_function': depth_loss_function
    }
    graph = tf.get_default_graph()
    model = load_model(opts['model_file'],
                       custom_objects=custom_objects,
                       compile=False)
    print('Model loaded')
    return graph, model


@runway.command('predict_depth',
                inputs={'image': runway.image},
                outputs={'depth_image': runway.image(channels=1)})
def predict_depth(graph_and_model, inputs):
    graph, model = graph_and_model
    img = inputs['image']
    original_size = img.size
    img = np.clip(np.asarray(img.resize((640, 480)), dtype=float) / 255, 0, 1)
    img = np.expand_dims(img, 0)
    with graph.as_default():
        outputs = predict(model, img)
    return Image.fromarray(np.uint8(np.squeeze(outputs) * 255),
                           'L').resize(original_size)


if __name__ == '__main__':
    runway.run(debug=True, model_options={'model_file': 'nyu.h5'})