Beispiel #1
0
  dim_z = input_z.shape.as_list()[1]
  vocab_size = input_y.shape.as_list()[1]

  initializer = tf.global_variables_initializer()
  sess = tf.Session()
  sess.run(initializer)

  return sess, vocab_size, dim_z, input_z, input_y, input_trunc, output


@biggan.command('sample', inputs={'truncation': 'float', 'category': 'text', 'seed': 'integer'}, outputs={'generatedOutput': 'image', 'z': 'vector'})
def sample_cmd(model, inp):
  (sess, vocab_size, dim_z, input_z, input_y, input_trunc, output) = model
  z = truncated_z_sample(1, dim_z, inp['truncation'], seed=inp['seed'])
  y = CATEGORIES.index(inp['category'])
  ims = sample(sess, z, y, vocab_size, input_z, input_y, input_trunc, output)
  return dict(generatedOutput=ims[0], z=z[0])


@biggan.command('generateFromVector', inputs={'z': 'vector', 'category': 'text'}, outputs={'generatedOutput': 'image'})
def generate_from_vector(model, inp):
  (sess, vocab_size, dim_z, input_z, input_y, input_trunc, output) = model
  z = inp['z'].reshape(-1, inp['z'].shape[0])
  y = CATEGORIES.index(inp['category'])
  ims = sample(sess, z, y, vocab_size, input_z, input_y, input_trunc, output)
  return dict(generatedOutput=ims[0])


if __name__ == '__main__':
    biggan.run()
Beispiel #2
0
    model = 'checkpoints/karras2019stylegan-ffhq-1024x1024.pkl'
    print("open model %s" % model)
    with open(model, 'rb') as file:
        G, D, Gs = pickle.load(file)
    return Gs


@stylegan.command('convert',
                  inputs={
                      'z': 'vector',
                      'truncation': 'float'
                  },
                  outputs={'output': 'image'})
def convert(Gs, inp):
    truncation = inp['truncation']
    latents = np.array(inp['z']).reshape(
        (1,
         512))  # np.random.RandomState(1000).randn(1, *Gs.input_shapes[0][1:])
    #labels = np.zeros([latents.shape[0]] + Gs.input_shapes[1][1:])
    images = Gs.run(latents,
                    None,
                    truncation_psi=truncation,
                    randomize_noise=False,
                    output_transform=fmt)
    output = np.clip(images[0], 0, 255).astype(np.uint8)
    return dict(output=output)


if __name__ == '__main__':
    stylegan.run()
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import numpy as np
import torch
from runway import RunwayModel

masrcnn = RunwayModel()

@masrcnn.setup
def setup():
    config_file = "configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"

    cfg.merge_from_file(config_file)
    cfg.merge_from_list(["MODEL.DEVICE", "cpu"])

    coco_demo = COCODemo(
        cfg,
        min_image_size=800,
        confidence_threshold=0.7,
    )
    return coco_demo

@masrcnn.command('mask', inputs={'image': 'image'}, outputs={'image': 'image'})
def mask(model, inp):
    img = np.array(inp['image'])
    output = coco_demo.run_on_opencv_image(img)
    return dict(image=output)

if __name__ == "__main__":
    masrcnn.run()
    h, w = 480, 640
    img_shape = (h, w, 3)
    batch_shape = (1, ) + img_shape
    g = tf.Graph()
    g.as_default()
    g.device('/gpu:0')
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    sess = tf.Session(config=soft_config)
    img_placeholder = tf.placeholder(tf.float32,
                                     shape=batch_shape,
                                     name='img_placeholder')
    preds = transform.net(img_placeholder)
    load_checkpoint(models[idx_model]["ckpt"], sess)
    return sess


@faststyletransfer.command('convert',
                           inputs={'image': 'image'},
                           outputs={'output': 'image'})
def upscale(sess, inp):
    img = np.array(inp['image'])
    img = np.expand_dims(img, 0)
    output = sess.run(preds, feed_dict={img_placeholder: img})
    output = np.clip(output[0], 0, 255).astype(np.uint8)
    return dict(output=output)


if __name__ == '__main__':
    faststyletransfer.run()
Beispiel #5
0

@pgan.setup
def setup(alpha=0.5):
    global Gs
    tf.InteractiveSession()
    batch_size = 8
    model = 'network-snapshot-itpgan.pkl'
    print("open model %s" % model)
    with open(model, 'rb') as file:
        G, D, Gs = pickle.load(file)
    return Gs


@pgan.command('convert', inputs={'z': 'vector'}, outputs={'output': 'image'})
def convert(Gs, inp):
    latents = np.array(inp['z']).reshape(
        (1,
         512))  # np.random.RandomState(1000).randn(1, *Gs.input_shapes[0][1:])
    labels = np.zeros([latents.shape[0]] + Gs.input_shapes[1][1:])
    images = Gs.run(latents, labels)
    images = np.clip(np.rint((images + 1.0) / 2.0 * 255.0), 0.0,
                     255.0).astype(np.uint8)  # [-1,1] => [0,255]
    images = images.transpose(0, 2, 3, 1)  # NCHW => NHWC
    output = np.clip(images[0], 0, 255).astype(np.uint8)
    return dict(output=output)


if __name__ == '__main__':
    pgan.run()
Beispiel #6
0
    model = infer_engine.initialize_model_from_cfg(weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    return model


@detectron.command('detect',
                   inputs={'image': 'image'},
                   outputs={'output': 'image'})
def detect(model, inp):
    im = np.array(inp['image'])
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
            model, im, None)  #, timers=timers)
    #results = get_result_json(cls_boxes, cls_segms, cls_keyps, thresh=args.save_thresh, dataset=dummy_coco_dataset)
    im_new = vis_utils.vis_one_image_opencv(im[:, :, ::-1],
                                            cls_boxes,
                                            segms=cls_segms,
                                            keypoints=cls_keyps,
                                            thresh=0.9,
                                            kp_thresh=2,
                                            show_box=True,
                                            dataset=dummy_coco_dataset,
                                            show_class=True)
    out = np.array(im_new)
    output = np.clip(out, 0, 255).astype(np.uint8)
    return dict(output=output)


if __name__ == '__main__':
    detectron.run()
    saver = tf.train.Saver()
    path = opts['styleCheckpoint']
    model_name = [
        p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))
    ][0]
    checkpoint_dir = os.path.join(path, model_name, 'checkpoint_long')
    ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
    ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
    saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
    return dict(sess=sess, input_photo=input_photo, output_photo=output_photo)


@st.command('stylize',
            inputs={'contentImage': 'image'},
            outputs={'stylizedImage': 'image'})
def stylize(model, inp):
    img = inp['contentImage']
    img = np.array(img)
    img = img / 127.5 - 1.
    img = np.expand_dims(img, axis=0)
    img = model['sess'].run(model['output_photo'],
                            feed_dict={model['input_photo']: img})
    img = (img + 1.) * 127.5
    img = img.astype('uint8')
    img = img[0]
    return dict(stylizedImage=img)


if __name__ == '__main__':
    st.run()
Beispiel #8
0
# launch Runway
glow = RunwayModel()
model_ready = False

@glow.setup
def setup():
	global model_ready
	print('setup model')
	model_ready = True
	return None


@glow.command('convert', inputs={'image': 'image'}, outputs={'output': 'image'})
def detect(sess, inp):
	img = np.array(inp['image'])
	z_addition = 0.95 * z_manipulate[tags.index('Attractive')]
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	rects = detector(gray, 2)
	if len(rects) == 0 or not model_ready:
		print('nothing found')
		return dict(output=img)
	img = warper.align(img[:, :, ::-1], gray, rects[0], z_addition)[:, :, ::-1]
	img = np.array(Image.fromarray(img).convert('RGB'))
	output = np.clip(img, 0, 255).astype(np.uint8)
	return dict(output=output)


if __name__ == '__main__':
    glow.run()
Beispiel #9
0
    opt.norm = 'batch'
    opt.input_nc = 3
    opt.output_nc = 1
    opt.which_model_netG = 'resnet_9blocks'
    opt.no_dropout = True
    model = create_model(opt)
    return model


@photosketch.command('convert',
                     inputs={'image': 'image'},
                     outputs={'output': 'image'})
def convert(model, inp):
    img = np.array(inp['image'])
    img = img / 255.
    h, w = img.shape[0:2]
    img = np.transpose(img, (2, 0, 1))
    img = np.expand_dims(img, 0)
    img = torch.from_numpy(img).float()  #.to(device)
    data = {'A_paths': '', 'A': img, 'B': img}
    model.set_input(data)
    model.test()
    output = util.tensor2im(model.fake_B)
    output = Image.fromarray(output.astype('uint8'))
    output = output.convert('RGB')
    return dict(output=output)


if __name__ == '__main__':
    photosketch.run()