示例#1
0
def setup(opts):
checkpoint = opts['checkpoint'
use_gpu = True if torch.cuda.is_available() else False
# Load the model from the Pytorch Hub
model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub',
                       'PGAN', model_name=checkpoint,
                       pretrained=True, useGPU=use_gpu)

return model
@runway.command('generate',
               inputs={ 'z': runway.vector(length=512, sampling_std=0.5)},
               outputs={ 'image': runway.image })
def generate(model, inputs):
# Generate ♾ infinite ♾ images
   z = inputs['z']
   latents = z.reshape((1, 559))
   latents = torch.from_numpy(latents)
                  
with torch.no_grad():

       generated_image = model.test(latents.float())
   generated_image = generated_image.clamp(min=-1, max=1)
   generated_image = ((generated_image + 1.0) * 255 / 2.0)
# Now generated_image contains our generated image!

return generated_image[0].permute(1, 2, 0).numpy().astype(np.uint8)

if __name__ == '__main__':
runway.run(port=5232)
import runway
import numpy as np
from ISR.models import RDN


@runway.setup(options={'checkpoint': runway.file(extension='.hdf5')})
def setup(opts):
    rdn = RDN(arch_params={'C':6, 'D':20, 'G':64, 'G0':64, 'x':2})
    rdn.model.load_weights(opts['checkpoint'])
    return rdn
    

@runway.command('upscale', inputs={'image': runway.image}, outputs={'upscaled': runway.image})
def upscale(rdn, inputs):
    width, height = inputs['image'].size
    if width >= 1000 or height >= 1000:
        return rdn.predict(np.array(inputs['image']), by_patch_of_size=256)
    else:
        return rdn.predict(np.array(inputs['image']))


if __name__ == '__main__':
    runway.run(port=4231)
示例#3
0
    sess = tf.Session(graph=g)
    img_placeholder = tf.placeholder(tf.float32, shape=batch_shape, name='img_placeholder')
    preds = transform.net(img_placeholder)
    load_checkpoint(os.path.join(options['checkpoint_path'], 'fns.ckpt'), sess)
    return sess


@runway.command('stylize',
    inputs={'image': runway.image(channels=4), 'background_image': runway.image(channels=4)},
    outputs={'output': runway.image(channels=4)})
def stylize(sess, inp):
    img = inp['image']
    background_image = inp['background_image'].resize(img.size)
    alpha_mask = img.getchannel("A")
    img = img.convert('RGB')
    original_size = img.size
    img = np.array(img.resize((640, 480)))
    img = np.expand_dims(img, 0)
    with g.as_default():
        output = sess.run(preds, feed_dict={img_placeholder: img})
    output = np.clip(output[0], 0, 255).astype(np.uint8)
    output = Image.fromarray(output).resize(original_size)
    output.putalpha(alpha_mask)
    composite = Image.alpha_composite(background_image, output)
    return dict(output=composite)


if __name__ == '__main__':
    runway.run(model_options={'checkpoint_path': 'models/ckpt_hokusai_b20_e4_cw15'})

示例#4
0
                outputs={ 'image': image(description='Output image') })
def generate(model, args):
    content_image = args['content_image'].convert('RGB')
    style_image = args['style_image'].convert('RGB')
    preserve_color = args['preserve_color']
    alpha = args['alpha']
    print('[GENERATE] Ran with preserve_color "{}". alpha "{}"'.format(preserve_color, alpha))

    vgg = model['vgg']
    decoder = model['decoder']
    content_tf = model['content_tf']
    style_tf = model['style_tf']

    content = content_tf(content_image)
    style = style_tf(style_image)
    if preserve_color:
      style = coral(style, content)
    style = style.to(device).unsqueeze(0)
    content = content.to(device).unsqueeze(0)
    with torch.no_grad():
      output = style_transfer(vgg, decoder, content, style, alpha)
    ndarr = output[0].mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()

    return {
        'image': Image.fromarray(ndarr)
    }


if __name__ == '__main__':
    runway.run(host='0.0.0.0', port=8888)
示例#5
0
    )
    sess.run([tf.local_variables_initializer()])
    init_fn(sess)
    return sess

stylize_inputs = {
    'content_image': runway.image,
    'style_image': runway.image,
    'interpolation_weight': runway.number(default=0.5, min=0, max=1, step=0.1)
}

@runway.command('stylize', inputs=stylize_inputs, outputs={'image': runway.image})
def stylize(sess, inputs):
    content_img_np = np.array(inputs['content_image'])
    style_image_np = np.array(inputs['style_image'])
    interpolation_weight = inputs['interpolation_weight']
    identity_params = sess.run(bottleneck_feat, feed_dict={style_img_ph: content_img_np})
    style_params = sess.run(bottleneck_feat, feed_dict={style_img_ph: style_image_np})
    stylized_image_res = sess.run(
        stylized_images,
        feed_dict={
            bottleneck_feat: identity_params * (1 - interpolation_weight) + style_params * interpolation_weight,
            content_img_ph: content_img_np
        })
    out = (stylized_image_res*255.0).astype(np.uint8)
    return out[0]


if __name__ == '__main__':
    runway.run(port=8540)
示例#6
0
from PIL import Image
import cv2

sample_inputs = {
    'source_imgs': runway.image(description='input image to be translated'),
    'target': runway.image(description='input image to be translated'),
    'amount': runway.number(min=0, max=16, default=0)
}


@runway.command(
    'translate',
    inputs=sample_inputs,
    outputs={
        'image':
        runway.image(
            description='output image containing the translated result')
    })
def translate(learn, inputs):
    listimg, h, w = imgint(inputs['source_imgs'], inputs['target'])
    i = inputs['amount']
    cvimg = (listimg[i][0] * 255).byte().cpu().numpy().transpose(1, 2,
                                                                 0)[:h, :w]
    img = cv2.cvtColor(cvimg, cv2.COLOR_BGR2RGB)
    im_pil = Image.fromarray(img)
    return im_pil


if __name__ == '__main__':
    runway.run(port=8889)
示例#7
0
                           'DCGAN',
                           pretrained=True,
                           useGPU=use_gpu)
    return model


@runway.command('generate',
                inputs={'z': runway.vector(length=64, sampling_std=0.5)},
                outputs={'image': runway.image(width=64, height=64)})
def generate(model, inputs):
    # Generate ♾ infinite ♾ images
    z = inputs['z']
    latents = z.reshape((1, 64))
    latents = torch.from_numpy(latents)
    # Generate one image
    noise, _ = model.buildNoiseData(1)
    with torch.no_grad():
        generated_image = model.test(noise)
    generated_image = generated_image.clamp(min=-1, max=1)
    generated_image = ((generated_image + 1.0) * 255 / 2.0)
    # Now generated_image contains our generated image! 🌞
    # return generated_image[0].permute(1, 2, 0).numpy().astype(np.uint8)
    return {
        'image': generated_image[0].permute(1, 2,
                                            0).cpu().numpy().astype(np.uint8)
    }


if __name__ == '__main__':
    runway.run(5133)
示例#8
0
    h, w = 480, 640
    img_shape = (h, w, 3)
    batch_shape = (1, ) + img_shape
    g = tf.get_default_graph()
    sess = tf.Session(graph=g)
    img_placeholder = tf.placeholder(tf.float32,
                                     shape=batch_shape,
                                     name='img_placeholder')
    preds = transform.net(img_placeholder)
    load_checkpoint(os.path.join(options['checkpoint_path'], 'fns.ckpt'), sess)
    return sess


@runway.command('stylize',
                inputs={'image': runway.image},
                outputs={'output': runway.image})
def stylize(sess, inp):
    img = inp['image']
    original_size = img.size
    img = np.array(img.resize((640, 480)))
    img = np.expand_dims(img, 0)
    with g.as_default():
        output = sess.run(preds, feed_dict={img_placeholder: img})
    output = np.clip(output[0], 0, 255).astype(np.uint8)
    output = Image.fromarray(output).resize(original_size)
    return dict(output=output)


if __name__ == '__main__':
    runway.run(model_options={'checkpoint_path': 'models/Cubist'})
示例#9
0
}

command_outputs = {'output': runway.image}


@runway.command('convert', inputs=command_inputs, outputs=command_outputs)
def convert(model, inputs):
    img = inputs['semantic_map']
    original_size = img.size
    img = img.resize((opt.load_size, opt.load_size))
    params = get_params(opt, img.size)
    transform_label = get_transform(opt,
                                    params,
                                    method=Image.NEAREST,
                                    normalize=False)
    label_tensor = transform_label(img) * 255.0
    label_tensor[label_tensor == 255.0] = 0
    data = {
        'label': label_tensor.unsqueeze(0),
        'instance': label_tensor.unsqueeze(0),
        'image': None
    }
    generated = model(data, mode='inference')
    output = util.tensor2im(generated[0])
    output = Image.fromarray(output).resize(original_size)
    return output


if __name__ == '__main__':
    runway.run(port=5132)
def translate(models, inputs):
    global aligned_target
    global target_embedding
    global last_target
    fd = models['fd']
    fp = models['fp']
    idet = models['idet']
    fv = models['fv']
    generator = models['generator']
    source = np.array(inputs['source'])
    target = np.array(inputs['target'])
    src, mask, aligned_im, (x0, y0, x1, y1), landmarks = utils.get_src_inputs(
        source, fd, fp, idet)
    if last_target is None or not np.array_equal(last_target, target):
        aligned_target, target_embedding = utils.get_tar_inputs([target], fd,
                                                                fv)
    last_target = target
    out = generator.inference(src, mask, aligned_target, target_embedding)
    result_face = np.squeeze(((out[0] + 1) * 255 / 2).astype(np.uint8))
    result_img = utils.post_process_result(source, fd, result_face, aligned_im,
                                           src, x0, y0, x1, y1, landmarks)
    return result_img


if __name__ == "__main__":
    # runway.run()
    runway.run(model_options={
        'encoder': './weights/encoder.h5',
        'decoder': './weights/decoder.h5'
    })
示例#11
0
def setup():
    cfg_from_file('cfg/coco_attn2.yml')
    cfg.CUDA = torch.cuda.is_available()
    wordtoix, ixtoword = word_index()
    print('Loading Model...')
    text_encoder, netG = models(len(wordtoix))
    print('Models Loaded')
    seed = 100
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if cfg.CUDA:
        torch.cuda.manual_seed_all(seed)
    return (wordtoix, ixtoword, text_encoder, netG)


@runway.command('generate',
                inputs={'caption': runway.text},
                outputs={'result': runway.image})
def generate_command(model, inp):
    wordtoix, ixtoword, text_encoder, netG = model
    caption = inp["caption"]
    img = generate(caption, wordtoix, ixtoword, text_encoder, netG, False)
    if img is None:
        img = PIL.Image.new('RGB', (256, 256), color='black')
    return dict(result=img)


if __name__ == "__main__":
    runway.run(port=9000, debug=True)
示例#12
0
        global_feature = sess.run(model["global_f_ph"], feed_dict={model["img_ph"]: img})
        
        h_np = np.zeros([batch_size, 1024])
        c_np = np.zeros([batch_size, 1024])

        while True:
            action_np, h_np, c_np = sess.run((action, h, c), feed_dict={model["img_ph"]: img,
                                                                    model["global_f_ph"]: global_feature,
                                                                    model["h_ph"]: h_np,
                                                                    model["c_ph"]: c_np})
            ratios, terminals = command2action(action_np, ratios, terminals)
            bbox = generate_bbox(origin_image, ratios)
            if np.sum(terminals) == batch_size:
                return bbox
            
            img = crop_input(origin_image, bbox)    

    xmin, ymin, xmax, ymax = auto_cropping([ip_img - 0.5], model["sess"], model["action"], model["h"], model["c"])[0]
    

    return {"output_image" : im[ymin:ymax, xmin:xmax]}

if __name__ == "__main__":
    runway.run(model_options={"checkpoint" : "vfn_rl.pkl"})




    
def segment_humans(model, inputs):
    frame = np.array(inputs["input_image"])

    #image = frame[...,::-1]
    h, w = frame.shape[0], frame.shape[1]

    # Predict mask
    X, pad_up, pad_left, h_new, w_new = utils.preprocessing(frame, expected_size=320, pad_value=0)

    with torch.no_grad():
        if torch.cuda.is_available():
            mask = model(X.cuda())
            mask = mask[..., pad_up: pad_up+h_new, pad_left: pad_left+w_new]
            mask = F.interpolate(mask, size=(h,w), mode='bilinear', align_corners=True)
            mask = F.softmax(mask, dim=1)
            mask = mask[0,1,...].cpu().numpy()
        else:
            mask = model(X)
            mask = mask[..., pad_up: pad_up+h_new, pad_left: pad_left+w_new]
            mask = F.interpolate(mask, size=(h,w), mode='bilinear', align_corners=True)
            mask = F.softmax(mask, dim=1)
            mask = mask[0,1,...].numpy()

    mask = 255*mask
    mask = np.expand_dims(mask, axis=2)
    image_alpha = np.concatenate((frame, mask), axis=2)
    return image_alpha.astype(np.uint8)

if __name__ == "__main__":
    runway.run(model_options={"backbone": "mobilenetv2", "checkpoint" : "../asdas/UNet_MobileNetV2.pth"})
    print("\nPerforming object detection:")
    input_imgs = Variable(input_img.type(Tensor))

    # Get detections
    with torch.no_grad():
        detections = model["model"](input_imgs)
        detections = non_max_suppression(detections, 0.8, 0.4)

    bboxes = []
    class_labels = []
    scores = []
    # Draw bounding boxes and labels of detections
    if detections is not None:
        # Rescale boxes to original image
        detections = rescale_boxes(detections[0], 416)

        unique_labels = detections[:, -1].cpu().numpy()
        class_preds = [model["classes"][int(i)] for i in unique_labels]

        op = detections.cpu().numpy()

    for i in range(len(op)):
        bboxes.append(op[i, 0:4])
        scores.append(op[i, 5])

    return dict(bboxes=bboxes, classes=class_preds, scores=scores)


if __name__ == "__main__":
    runway.run(model_options={"checkpoint_dir": "./checkpoint"})
示例#15
0
	generator.set_dlatents(latent_vector)
	img_array = generator.generate_images()[0]
	img = PIL.Image.fromarray(img_array, 'RGB')
	return img.resize((512, 512))   

generate_inputs = {
	'age': runway.number(min=-500, max=500, default=6, step=0.1),
}

generate_outputs = {
	'image': runway.image(width=512, height=512),
}

@runway.command('generat3r', inputs=generate_inputs, outputs=generate_outputs)
def move_and_show(model, inputs):	
	latent_vector = (latent_vector_1 + latent_vector_2) * 2
	# load direction
	age_direction = np.load('ffhq_dataset/latent_directions/age.npy')
	direction = age_direction
	# model = generator
	coeff = inputs['age']/5.0
	new_latent_vector = latent_vector.copy()
	new_latent_vector[:8] = (latent_vector + coeff*direction)[:8]
	image = (generate_image(model, new_latent_vector))
	#ax[i].set_title('Coeff: %0.1f' % coeff)
	#plt.show()
	return {'image': image}

if __name__ == '__main__':
	runway.run(debug=True)
示例#16
0
@runway.setup(options={'checkpoints_root': runway.file(is_directory=True)})
def setup(opts):
    opt.name = opts['checkpoints_root'].split('/')[-1]
    opt.checkpoints_dir = os.path.join(opts['checkpoints_root'], '..')
    model = create_model(opt)
    return model


@runway.command('generate',
                inputs={'image': runway.image},
                outputs={'image': runway.image})
def generate(model, inputs):
    label = inputs['image']
    params = get_params(opt, label.size)
    transform_label = get_transform(opt,
                                    params,
                                    method=Image.NEAREST,
                                    normalize=False)
    label_tensor = transform_label(label)
    label_tensor = label_tensor.unsqueeze(0)
    generated = model.inference(label_tensor, None)
    torch.cuda.synchronize()
    im = util.tensor2im(generated.data[0])
    return Image.fromarray(im)


if __name__ == '__main__':
    runway.run(host='0.0.0.0',
               port=8888,
               model_options={'checkpoints_root': './checkpoints/Boston'})
示例#17
0
inputs = {'noise_vector': vector(length=128, description='A random seed.')}
outputs = {'image': image(width=512, height=512)}


# The @runway.command() decorator is used to create interfaces to call functions
# remotely via an HTTP endpoint. This lets you send data to, or get data from,
# your model. Each command creates an HTTP route that the Runway app will use
# to communicate with your model (e.g. POST /generate). Multiple commands
# can be defined for the same model.
@runway.command('generate',
                inputs=inputs,
                outputs=outputs,
                description='Generate an image.')
def generate(model, input_args):
    # Functions wrapped by @runway.command() receive two arguments:
    # 1. Whatever is returned by a function wrapped by @runway.setup(),
    #    usually a model.
    # 2. The input arguments sent by the remote caller via HTTP. These values
    #    match the schema defined by inputs.
    img = input_args['image']
    return model.generate(img)


# The runway.run() function triggers a call to the function wrapped by
# @runway.setup() passing model_options as its single argument. It also
# creates an HTTP server that listens for and fulfills remote requests that
# trigger commands.
if __name__ == '__main__':
    runway.run(host='0.0.0.0', port=9000, model_options={'model_size': 'big'})
示例#18
0
    "ear_r": [215, 175, 125],
    "eye_g": [220, 180, 210],
    "neck_l": [125, 125, 255]
}

to_tensor = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])


@runway.command('parse',
                inputs={'image': runway.image},
                outputs={
                    'parsed_face':
                    runway.segmentation(label_to_id=label_to_id,
                                        label_to_color=label_to_color)
                })
def parse(model, inputs):
    image = inputs['image'].resize((512, 512), Image.BILINEAR)
    img = to_tensor(image)
    img = torch.unsqueeze(img, 0)
    img = img.cuda()
    out = model(img)[0]
    parsing = out.squeeze(0).cpu().detach().numpy().argmax(0)
    return parsing.astype(np.uint8)


if __name__ == '__main__':
    runway.run(model_options={'checkpoint': './79999_iter.pth'})
示例#19
0
def process_tile(model, tile):
    img = np.transpose(tile[:, :, [2, 1, 0]], (2, 0, 1))
    img = torch.from_numpy(img).float()
    img = img.unsqueeze(0)
    img = img.to(device)
    output = model(img).data.squeeze().float().cpu().clamp_(0, 1).numpy()
    output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))
    output = resize(output, tile.shape)
    return output


@runway.command(name='upscale',
                inputs={'image': runway.image},
                outputs={'upscaled': runway.image})
def upscale(model, inputs):
    img = np.array(inputs['image'])
    img = img * 1.0 / 255
    tiler = ImageSlicer(img.shape,
                        tile_size=(512, 512),
                        tile_step=(256, 256),
                        weight='pyramid')
    tiles = [process_tile(model, tile) for tile in tiler.split(img)]
    output = tiler.merge(tiles)
    output = (output * 255.0).round().astype('uint8')
    return dict(upscaled=output)


if __name__ == '__main__':
    runway.run(port=4323)
        'mask': 1
    },
                        label_to_color={
                            'background': [0, 0, 0],
                            'mask': [255, 255, 255]
                        })
}


@runway.command('inpaint',
                inputs=command_inputs,
                outputs={'inpainted': runway.image})
def inpaint(output_image, inputs):
    image = inputs['image']
    original_size = image.size
    image = np.array(image.resize((256, 256)))
    image = np.expand_dims(image, 0)
    mask = np.array(inputs['mask'].resize((256, 256)))
    mask = mask * 255
    mask = np.stack((mask, ) * 3, axis=-1)
    mask = np.expand_dims(mask, 0)
    feed_dict = {input_image: np.concatenate([image, mask], axis=2)}
    with g.as_default():
        result = sess.run(output_image, feed_dict=feed_dict)
    return Image.fromarray(result[0][:, :, ::-1]).resize(original_size)


if __name__ == '__main__':
    runway.run(
        port=5232,
        model_options={'checkpoint_dir': './model_logs/release_places2_256'})
示例#21
0
    im = inputs['projectionImage']
    if not os.path.exists('./projection'):
        os.mkdir('./projection')
    if not os.path.exists('./projection/imgs'):
        os.mkdir ('./projection/imgs')
    if not os.path.exists('./projection/records'):
        os.mkdir('./projection/records')
    if not os.path.exists('./projection/out'):
        os.mkdir('./projection/out')

    if os.path.isfile('./projection/imgs/project.png'):
        os.remove('./projection/imgs/project.png')

    for f in os.listdir('./projection/records/'):
        if os.path.isfile(os.path.join('./projection/records/', f)):
            os.remove (os.path.join('./projection/records/', f))

    im.save('./projection/imgs/project.png')

    dataset_tool.create_from_images("./projection/records/", "./projection/imgs/", True)

    output = get_projected_real_images("records","./projection/",1,10, inputs['steps'], model)

    #return the last item
    return output[-1]


if __name__ == '__main__':
    runway.run(model_options={ 'checkpoint': 'network-snapshot-001383.pkl' })
        #runway.run(host='localhost', port=8888, debug=True, model_options={'checkpoint': './vox-cpk.pth.tar'})
}


@runway.command('inpaint',
                inputs=command_inputs,
                outputs={'output': runway.image})
def inpaint(model, inputs):
    output, input_image_tf, input_mask_tf = model
    image = inputs['image']
    original_size = image.size
    image = np.array(image.resize((256, 256)), dtype=np.float32)
    mask = np.array(inputs['mask'].resize((256, 256)), dtype=np.float32)
    mask = np.expand_dims(mask, -1)
    result = sess.run(output,
                      feed_dict={
                          input_image_tf: np.expand_dims(image, 0),
                          input_mask_tf: np.expand_dims(mask, 0)
                      })
    result = result[0][:, :, ::-1]
    result = imresize(result, original_size[::-1])
    mask = np.array(inputs['mask'].resize(original_size), dtype=np.float32)
    mask = np.stack([mask, mask, mask], -1)
    masked_result = mask * result
    masked_result += (1 - mask) * np.array(inputs['image'])
    return masked_result.astype(np.uint8)


if __name__ == "__main__":
    runway.run(model_options={
        'checkpoint_dir': 'checkpoints/places2_512x680_freeform'
    })
示例#23
0
    result = learner.predict(im)
    np_img = result[2].data.numpy()[1]
    return np_img


@runway.command('mask',
                inputs={
                    'image':
                    runway.image,
                    'threshold':
                    runway.number(default=0.5, min=0, max=1, step=0.001)
                },
                outputs={'image': runway.image(channels=4)})
def mask(learner, inputs):
    inp = inputs['image'].convert('RGB')
    original_size = inp.size
    inp_resized = inp.resize((512, 512))
    mask1 = inference(learner, inp_resized)
    mask2 = np.fliplr(inference(learner, np.fliplr(inp_resized)))
    mask = (mask1 + mask2) / 2
    mask[mask > inputs['threshold']] = 255
    mask = resize(mask, (original_size[1], original_size[0]),
                  anti_aliasing=False).astype(np.uint8)
    masked = np.concatenate((np.array(inp), np.expand_dims(mask, -1)), axis=2)
    return masked


if __name__ == '__main__':
    runway.run(port=7843,
               model_options={'checkpoint': './512x512_resnet34_2.pkl'})
示例#24
0
    sess = gpt2.start_tf_sess()
    gpt2.load_gpt2(sess, run_name=run_name)
    return None


# Every model needs to have at least one command. Every command allows to send
# inputs and process outputs. To see a complete list of supported inputs and
# outputs data types: https://sdk.runwayml.com/en/latest/data_types.html
@runway.command(name='generate',
                inputs={'caption': text()},
                outputs={'script': text()})
def generate(model, args):
    print('[GENERATE] Ran with caption value "{}"'.format(args['caption']))

    samples = gpt2.generate(sess,
                            nsamples=1,
                            return_as_list=True,
                            prefix=f"({args['caption']})",
                            include_prefix=True,
                            length=200,
                            temperature=0.7,
                            run_name=run_name)
    return {'script': samples[0][len(args['caption']) + 2:]}


if __name__ == '__main__':
    # run the model server using the default network interface and ports,
    # displayed here for convenience
    runway.run(host='0.0.0.0',
               port=8000,
               model_options={'run_name': 'first_345m_run'})