Ejemplo n.º 1
0
    print('Restored from {}'.format(ckpt.model_checkpoint_path))

    return sess


def convert_rect(rect, width, height):
    x = rect[0] / width
    y = rect[1] / height
    w = rect[2] / width
    h = rect[3] / height
    return x, y, w, h


caption_inputs = {
    'image': runway.image,
    'max_detections': runway.number(default=10, min=1, max=50, step=1)
}

caption_outputs = {
    'bboxes': runway.array(runway.image_bounding_box),
    'classes': runway.array(runway.text),
    'scores': runway.array(runway.number)
}


@runway.command('caption', inputs=caption_inputs, outputs=caption_outputs)
def caption(sess, inp):
    img = np.array(inp['image'])
    width = img.shape[1]
    height = img.shape[0]
    scores, boxes, captions = im_detect(sess, net, img, None, use_box_at=-1)
Ejemplo n.º 2
0
import dnnlib.tflib as tflib
import config
import matplotlib.pyplot as plt
from encoder.generator_model import Generator
from encoder.perceptual_model import PerceptualModel
import runway

prevIterations = -1
generated_dlatents = 0


@runway.setup(
    options={
        'checkpoint': runway.file(extension='.pkl'),
        'image dimensions': runway.number(min=128,
                                          max=1024,
                                          default=512,
                                          step=128)
    })
def setup(opts):
    # Initialize generator and perceptual model
    global perceptual_model
    global generator
    tflib.init_tf()
    model = opts['checkpoint']
    print("open model %s" % model)
    with open(model, 'rb') as file:
        G, D, Gs = pickle.load(file)
    Gs.print_layers()
    generator = Generator(Gs, batch_size=1, randomize_noise=False)
    perceptual_model = PerceptualModel(opts['image dimensions'],
                                       layer=9,
Ejemplo n.º 3
0
    new_latent_vector[:8] = (latent_vector + coeff*direction)[:8]
    return new_latent_vector

@runway.setup(options={'checkpoint': runway.file(extension='.pth')})
def setup(opts):
    global Gs
    if opts['checkpoint'] is None or opts['checkpoint'] == '':
        opts['checkpoint'] = 'checkpoint/Gs.pth'
    state = torch.load(opts['checkpoint'], map_location=device)
    Gs = models.load(state, device)
    Gs.to(device)
    return Gs

generate_inputs = {
    'z': runway.vector(512, sampling_std=0.5),
    'truncation': runway.number(min=0, max=1, default=0.8, step=0.01),
    'hair_color': runway.category(choices=hairs),
    'hair_coeff': runway.number(min=-5, max=5, default=0, step=0.01),
    'eyes_color': runway.category(choices=eyes),
    'eyes_coeff': runway.number(min=-5, max=5, default=0, step=0.01),
}

@runway.command('generate', inputs=generate_inputs, outputs={'image': runway.image})
def convert(model, inputs):
    truncation = inputs['truncation']
    Gs.set_truncation(truncation_psi=truncation)
    qlatents = torch.Tensor(inputs['z']).reshape(1, 512).to(device=device, dtype=torch.float32)
    dlatents = Gs.G_mapping(qlatents)
    swifted_dlatents = shift_latents(dlatents, inputs)
    generated = Gs(dlatents=swifted_dlatents)
    images = utils.tensor_to_PIL(generated)
Ejemplo n.º 4
0
tags = "Neutral Smiling"
tags = tags.split()
align_images.main()
encode_images.main()
@runway.setup(options={'checkpoint': runway.file(extension='.pkl')})
def setup(opts):
    global Gs
    tflib.init_tf()
    with open(opts['checkpoint'], 'rb') as file:
        G, D, Gs = pickle.load(file)
    return Gs


generate_inputs = {
    'image': runway.image,
    'coeff': runway.number(min=-20, max=20, default=0.0, step=0.01)
}

@runway.command('convert', inputs=generate_inputs, 'feature':runway.category(choices=tags,default=tags[1]), outputs={'image': runway.image})
def convert(model, inputs):
    img = np.array(inputs['image']) 
    coeff = inputs['coeff']
    z_addition = coeff * z_manipulate[tags.index(feature)]
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    if len(rects) == 0 or not model_ready:
		print('nothing found')
		return dict(output=img)
    img = align.main()
    encoded = encoder.main()
    img = np.array(Image.fromarray(img).convert('RGB'))
    output = np.clip(img, 0, 255).astype(np.uint8)
Ejemplo n.º 5
0
def setup(opts):
    global Gs
    tflib.init_tf()
    if opts['checkpoint'] is None:
        opts['checkpoint'] = 'checkpoints\\network-snapshot-000600.pkl'
    with open(opts['checkpoint'], 'rb') as file:
        _G, _D, Gs = pickle.load(file, encoding='latin1')
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
    rnd = np.random.RandomState()
    tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars})
    return Gs


generate_inputs = {
    'z': runway.vector(256, sampling_std=0.5),
    'truncation': runway.number(min=0, max=1, default=0.8, step=0.01)
}

for i in range(4):
    for j in range(4):
        generate_inputs.update({'style_%i/%i' %(i, j): runway.number(min=0, max=1, default=0.5, step=0.01)})

@runway.command('generate', inputs=generate_inputs, outputs={'image': runway.image})
def convert(model, inputs):
    z = inputs['z']
    truncation = inputs['truncation']
    latent = z.reshape((1, 256))
    style_mix = np.zeros([4, 4])
    for i in range(4):
        for j in range(4):
            style_mix[i, j] = inputs['style_%i/%i' %(i, j)]
Ejemplo n.º 6
0
    with open(opts['checkpoint'], 'rb') as file:
        _G, _D, Gs = pickle.load(file, encoding='latin1')
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]
    rnd = np.random.RandomState()
    tflib.set_vars(
        {var: rnd.randn(*var.shape.as_list())
         for var in noise_vars})
    return Gs


generate_inputs = {
    'z': runway.vector(512, sampling_std=0.5),
    'truncation': runway.number(min=0, max=1, default=0.8, step=0.01)
}


@runway.command('generate',
                inputs=generate_inputs,
                outputs={'image': runway.image})
def convert(model, inputs):
    z = inputs['z']
    truncation = inputs['truncation']
    latents = z.reshape((1, 512))
    images = model.run(latents,
                       None,
                       truncation_psi=truncation,
                       randomize_noise=False,
                       output_transform=fmt)
Ejemplo n.º 7
0
# 	return Gs


def generate_image(generator, latent_vector):
    latent_vector = latent_vector.reshape((1, 18, 512))
    generator.set_dlatents(latent_vector)
    img_array = generator.generate_images()[0]
    img = PIL.Image.fromarray(img_array, 'RGB')
    return img.resize((512, 512))


# ENCODING

generate_inputs_1 = {
    'portrait': runway.image(),
    'iterations': runway.number(min=1, max=5000, default=10, step=1.0),
    'encode': runway.boolean(default=False)
}
generate_outputs_1 = {'image': runway.image(width=512, height=512)}

encodeCount = 0
latent_vectors = []
# latent_vectors.append(preLoad1)
# latent_vectors.append(preLoad2)
# latent_vectors.append(preLoad1)
# latent_vectors.append(preLoad2)


@runway.command('encode', inputs=generate_inputs_1, outputs=generate_outputs_1)
def find_in_space(model, inputs):
    global generated_dlatents
Ejemplo n.º 8
0
# 	# Initialize generator and perceptual model
# 	tflib.init_tf()
# 	model = opts['checkpoint']
# 	print("open model %s" % model)
# 	with open(model, 'rb') as file:
# 		G, D, Gs = pickle.load(file)
# 	Gs.print_layers()
# 	global generator
# 	generator = Generator(Gs, batch_size=1, randomize_noise=False)
# 	perceptual_model = PerceptualModel(256, layer=9, batch_size=1)
# 	perceptual_model.build_perceptual_model(generator.generated_image)
# 	return Gs

generate_inputs = {
    'portrait': runway.image(),
    'iterations': runway.number(min=1, max=5000, default=10, step=1.0),
    'age': runway.number(min=-30, max=20, default=4, step=0.2)
}
# generate_outputs = {
# 	'latent_vector': runway.file()
# }

generate_outputs = {
    #	'generated': runway.image(width=512, height=512)
    # 'vector': runway.vector(length=512)
    "hextext": runway.text
}


@runway.command('encode', inputs=generate_inputs, outputs=generate_outputs)
def find_in_space(model, inputs):

inputs = {
    'composite_image':
    runway.image,
    'foreground_mask':
    runway.image,
    'transfer_color':
    runway.boolean(
        default=True,
        description=
        "Transfer colors back to source image for high-resolution output"),
    'transfer_resolution':
    runway.number(default=512,
                  min=256,
                  max=1024,
                  step=128,
                  description="Which resolution to transfer colors from")
}

outputs = {'harmonized_image': runway.image}


@runway.command('harmonize', inputs=inputs, outputs=outputs)
def harmonize(model, inputs):

    og_image = np.array(inputs["composite_image"])
    og_mask = np.array(inputs["foreground_mask"])

    # Re-shape inputs to transfer resolution
    image_size = og_image.shape[:2]
Ejemplo n.º 10
0

class BaseConfig(Config):
    NAME = "baseConfig"
    GPU_COUNT = 1
    IMAGES_PER_GPU = 1
    NUM_CLASSES = 1 + 1  # Background + pasta
    STEPS_PER_EPOCH = 100
    DETECTION_MIN_CONFIDENCE = 0.7


config = BaseConfig()

setup_options = {
    'checkpoint': runway.file(extension='.h5'),
    'min_confidence': runway.number(min=0, max=1, step=.1, default=.7),
}


@runway.setup(options=setup_options)
def setup(opts):
    config.DETECTION_MIN_CONFIDENCE = opts['min_confidence']
    model = modellib.MaskRCNN(mode="inference",
                              config=config,
                              model_dir='logs')
    model.load_weights(opts['checkpoint'], by_name=True)
    return model


@runway.command('detect',
                inputs={'image': runway.image},
Ejemplo n.º 11
0
                                               context=context,
                                               batch_size=1,
                                               temperature=temperature,
                                               top_k=top_k)
    saver = tf.train.Saver()
    ckpt = tf.train.latest_checkpoint(opts['checkpoint_dir'])
    saver.restore(sess, ckpt)

    g = tf.get_default_graph()
    g.finalize()
    return sess, enc, context, length_ph


command_inputs = {
    'prompt': runway.text,
    'seed': runway.number(default=0, min=0, max=999, step=1),
    'sequence_length': runway.number(default=128, min=1, max=256, step=1)
}


@runway.command('generate',
                inputs=command_inputs,
                outputs={'text': runway.text})
def generate(model, inputs):
    with g.as_default():
        sess, enc, context, length_ph = model
        seed = inputs['seed']
        np.random.seed(seed)
        tf.set_random_seed(seed)
        context_tokens = enc.encode(inputs['prompt'])
        out = sess.run(output,
Ejemplo n.º 12
0
import pickle
from tqdm import tqdm
import PIL.Image
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
import config
import matplotlib.pyplot as plt
from encoder.generator_model import Generator
from encoder.perceptual_model import PerceptualModel
import runway

prevIterations = -1
generated_dlatents = 0

@runway.setup(options={'checkpoint': runway.file(extension='.pkl'), 'image dimensions': runway.number(min=128, max=1024, default=512, step=128)})
def setup(opts):
	# Initialize generator and perceptual model
	global perceptual_model
	global generator
	tflib.init_tf()
	model = opts['checkpoint']
	print("open model %s" % model)
	with open(model, 'rb') as file:
		G, D, Gs = pickle.load(file)
	Gs.print_layers()	
	generator = Generator(Gs, batch_size=1, randomize_noise=False)		
	perceptual_model = PerceptualModel(opts['image dimensions'], layer=9, batch_size=1)
	perceptual_model.build_perceptual_model(generator.generated_image)
	return Gs
Ejemplo n.º 13
0
# 	generator = Generator(Gs, batch_size=1, randomize_noise=False)
# 	return Gs

def generate_image(generator, latent_vector):
	latent_vector = latent_vector.reshape((1, 18, 512))
	generator.set_dlatents(latent_vector)
	img_array = generator.generate_images()[0]
	img = PIL.Image.fromarray(img_array, 'RGB')
	return img.resize((512, 512))   


# ENCODING

generate_inputs_1 = {
	'portrait': runway.image(),
	'iterations': runway.number(min=1, max=5000, default=10, step=1.0)
}
generate_outputs_1 = {
	"text": runway.text
}

encodeCount = 0
latent_vectors = []

@runway.command('encode', inputs=generate_inputs_1, outputs=generate_outputs_1)
def find_in_space(model, inputs):
	global generated_dlatents
	global prevIterations
	s2 = "Did not encode."
	if (inputs['iterations'] != prevIterations):
		generator.reset_dlatents()
Ejemplo n.º 14
0
                                       pt.net_G.out_size).to(device)

    pt._save_stroke_params(PARAMS)
    final_rendered_image = pt._render(PARAMS, save_jpgs=False, save_video=True)

    return final_rendered_image


@runway.command(
    "translate",
    inputs={
        "source_imgs":
        runway.image(description="input image to be translated"),
        "Strokes":
        runway.number(min=100,
                      max=700,
                      default=100,
                      description="number of strokes"),
    },
    outputs={
        "image":
        runway.image(
            description="output image containing the translated result")
    },
)
def translate(learn, inputs):
    os.makedirs("images", exist_ok=True)
    inputs["source_imgs"].save("images/temp.jpg")
    paths = os.path.join("images", "temp.jpg")
    args.img_path = paths
    args.max_m_strokes = inputs["Strokes"]
    pt = ProgressivePainter(args=args)
Ejemplo n.º 15
0

@runway.setup
def setup():
    init_fn = slim.assign_from_checkpoint_fn(
        './arbitrary_style_transfer/model.ckpt',
        slim.get_variables_to_restore()
    )
    sess.run([tf.local_variables_initializer()])
    init_fn(sess)
    return sess

stylize_inputs = {
    'content_image': runway.image,
    'style_image': runway.image,
    'interpolation_weight': runway.number(default=0.5, min=0, max=1, step=0.1)
}

@runway.command('stylize', inputs=stylize_inputs, outputs={'image': runway.image})
def stylize(sess, inputs):
    content_img_np = np.array(inputs['content_image'])
    style_image_np = np.array(inputs['style_image'])
    interpolation_weight = inputs['interpolation_weight']
    identity_params = sess.run(bottleneck_feat, feed_dict={style_img_ph: content_img_np})
    style_params = sess.run(bottleneck_feat, feed_dict={style_img_ph: style_image_np})
    stylized_image_res = sess.run(
        stylized_images,
        feed_dict={
            bottleneck_feat: identity_params * (1 - interpolation_weight) + style_params * interpolation_weight,
            content_img_ph: content_img_np
        })
    params = argparse.Namespace(**importlib.import_module(config_file).params)
    model = BPEmbVaeSampler(lang=params.bpemb['lang'],
                            vs=params.bpemb['vs'],
                            dim=params.bpemb['dim'],
                            decode_from=model_path,
                            params=params,
                            cuda=use_gpu)
    return model


@runway.command('generate',
                inputs={
                    'z':
                    runway.vector(length=64),
                    'temperature':
                    runway.number(default=0.5, min=0.05, max=2.0, step=0.05)
                },
                outputs={'out': runway.text})
def generate(model, inputs):
    z = torch.from_numpy(inputs['z']).float().unsqueeze(0).to(model.device)
    temperature = inputs['temperature']
    with torch.no_grad():
        return model.sample(z, temperature)[0]


@runway.command('reconstruct',
                inputs={
                    'in':
                    runway.text,
                    'temperature':
                    runway.number(default=0.5, min=0.05, max=2.0, step=0.05)
Ejemplo n.º 17
0
    neighbors = find_nearest(excluded_words, result, id_to_word, faiss_index,
                             num_results)
    return ', '.join(neighbors)

setup_options = {
    'word_vector_dimensions': category(
        choices=['50', '100', '200', '300'],
        default='100',
        description='The number of dimensions used to represent each word in the latent '\
            'space. Higher dimensions increase accuracy but take longer to run and use '\
            'more memory.'
    ),
    'number_of_words': number(
        min=100,
        max=400000,
        default=400000,
        description='The number of words in the corpus. More words create more variety '\
            ' but take longer to run.'
    )
}


@runway.setup(options=setup_options)
def setup(opts):
    dimensions = int(opts['word_vector_dimensions'])
    vector_file = 'data/glove/glove.6B.{}d.txt'.format(dimensions)
    df, labels_array = build_word_vector_matrix(vector_file,
                                                opts['number_of_words'])
    word_to_id, id_to_word = get_label_dictionaries(labels_array)
    faiss_index = faiss.IndexFlatL2(dimensions)
    faiss_index.add(df)
Ejemplo n.º 18
0
    global Gs
    print('init')
    tflib.init_tf()
    print('reading pickle file')
    with open(opts['checkpoint'], 'rb') as file:
        _G, _D, Gs = pickle.load(file, encoding='latin1')
    print('Create noise')
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
    rnd = np.random.RandomState()
    tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars})
    return Gs


generate_inputs = {
    'z': runway.vector(512, sampling_std=0.5),
    'label': runway.number(min=0, max=100000, default=0, step=1), # generate random labels
    'scale': runway.number(min=-2, max=2, default=0, step=0.05),  # magnitude of labels - 0 = no labels
    'truncation': runway.number(min=0, max=1, default=1, step=0.1)
}

@runway.command('generate', inputs=generate_inputs, outputs={'image': runway.image})
def convert(model, inputs):
    z = inputs['z']
    print(z)
    label = int(inputs['label'])
    scale = inputs['scale']
    truncation = inputs['truncation']
    latents = z.reshape((1, 512))
    labels = scale * np.random.RandomState(label).randn(167)
    labels = labels.reshape((1,167)).astype(np.float32)
    images = model.run(latents, labels, truncation_psi=truncation, randomize_noise=False, output_transform=fmt)
    tflib.init_tf()
    with open(opts['checkpoint'], 'rb') as file:
        _, _, Gs = pickle.load(file, encoding='latin1')
    return Gs


#@runway.command('project', inputs=project_inputs, outputs={'images': runway.array(item_type=runway.image, max_length=10)})
@runway.command('project',
                inputs={
                    'projectionImage':
                    runway.image(min_width=1024,
                                 min_height=1024,
                                 max_width=1024,
                                 max_height=1024),
                    'steps':
                    runway.number(min=10, max=1000, default=200)
                },
                outputs={'image': runway.image})
def project(model, inputs):
    im = inputs['projectionImage']
    if not os.path.exists('./projection'):
        os.mkdir('./projection')
    if not os.path.exists('./projection/imgs'):
        os.mkdir('./projection/imgs')
    if not os.path.exists('./projection/records'):
        os.mkdir('./projection/records')
    if not os.path.exists('./projection/out'):
        os.mkdir('./projection/out')

    if os.path.isfile('./projection/imgs/project.png'):
        os.remove('./projection/imgs/project.png')