예제 #1
0
파일: model.py 프로젝트: clxie/BigGAN
import sys
import json
import tensorflow as tf
import tensorflow_hub as hub
from runway import RunwayModel
from utils import truncated_z_sample, sample, CATEGORIES


biggan = RunwayModel()


@biggan.setup
def setup(architecture='256x256'):
  module_path = 'https://tfhub.dev/deepmind/biggan-{}/2'.format(architecture.split('x')[0])

  module = hub.Module(module_path)
  inputs = {k: tf.placeholder(v.dtype, v.get_shape().as_list(), k)
            for k, v in module.get_input_info_dict().iteritems()}
  output = module(inputs)

  input_z = inputs['z']
  input_y = inputs['y']
  input_trunc = inputs['truncation']

  dim_z = input_z.shape.as_list()[1]
  vocab_size = input_y.shape.as_list()[1]

  initializer = tf.global_variables_initializer()
  sess = tf.Session()
  sess.run(initializer)
예제 #2
0
파일: model.py 프로젝트: genekogan/stylegan
import pickle
import numpy as np
import tensorflow as tf
import PIL.Image
import random
import dnnlib
import dnnlib.tflib as tflib
import config
from runway import RunwayModel

stylegan = RunwayModel()

fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)


@stylegan.setup
def setup(alpha=0.5):
    global Gs
    tflib.init_tf()
    model = 'checkpoints/karras2019stylegan-ffhq-1024x1024.pkl'
    print("open model %s" % model)
    with open(model, 'rb') as file:
        G, D, Gs = pickle.load(file)
    return Gs


@stylegan.command('convert',
                  inputs={
                      'z': 'vector',
                      'truncation': 'float'
                  },
예제 #3
0
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import numpy as np
import torch
from runway import RunwayModel

masrcnn = RunwayModel()

@masrcnn.setup
def setup():
    config_file = "configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"

    cfg.merge_from_file(config_file)
    cfg.merge_from_list(["MODEL.DEVICE", "cpu"])

    coco_demo = COCODemo(
        cfg,
        min_image_size=800,
        confidence_threshold=0.7,
    )
    return coco_demo

@masrcnn.command('mask', inputs={'image': 'image'}, outputs={'image': 'image'})
def mask(model, inp):
    img = np.array(inp['image'])
    output = coco_demo.run_on_opencv_image(img)
    return dict(image=output)

if __name__ == "__main__":
    masrcnn.run()
예제 #4
0
    "style": "styles/wu4.jpg"
}, {
    "ckpt": "models/ckpt_elsalahi_b20_e4_cw05/fns.ckpt",
    "style": "styles/elsalahi2.jpg"
}, {
    "ckpt": "models/scream/scream.ckpt",
    "style": "styles/the_scream.jpg"
}, {
    "ckpt": "models/udnie/udnie.ckpt",
    "style": "styles/udnie.jpg"
}, {
    "ckpt": "models/ckpt_maps3_b5_e2_cw10_tv1_02/fns.ckpt",
    "style": "styles/maps3.jpg"
}]

faststyletransfer = RunwayModel()
idx_model = 0


def load_checkpoint(checkpoint, sess):
    saver = tf.train.Saver()
    try:
        saver.restore(sess, checkpoint)
        return True
    except:
        print("checkpoint %s not loaded correctly" % checkpoint)
        return False


@faststyletransfer.setup
def setup(alpha=0.5):
예제 #5
0
import pickle
import numpy as np
import tensorflow as tf
import PIL.Image
import random
from runway import RunwayModel

pgan = RunwayModel()


@pgan.setup
def setup(alpha=0.5):
    global Gs
    tf.InteractiveSession()
    batch_size = 8
    model = 'network-snapshot-itpgan.pkl'
    print("open model %s" % model)
    with open(model, 'rb') as file:
        G, D, Gs = pickle.load(file)
    return Gs


@pgan.command('convert', inputs={'z': 'vector'}, outputs={'output': 'image'})
def convert(Gs, inp):
    latents = np.array(inp['z']).reshape(
        (1,
         512))  # np.random.RandomState(1000).randn(1, *Gs.input_shapes[0][1:])
    labels = np.zeros([latents.shape[0]] + Gs.input_shapes[1][1:])
    images = Gs.run(latents, labels)
    images = np.clip(np.rint((images + 1.0) / 2.0 * 255.0), 0.0,
                     255.0).astype(np.uint8)  # [-1,1] => [0,255]
예제 #6
0
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from runway import RunwayModel
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.io import cache_url
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.vis as vis_utils

c2_utils.import_detectron_ops()
#cv2.ocl.setUseOpenCL(False)

detectron = RunwayModel()


def get_result_json(boxes, segms, keypoints, thresh=0.7, dataset=None):
    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)
    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return
    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    if segms is not None:
        masks = mask_util.decode(segms)
    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)
    sorted_inds = np.argsort(-boxes[:, 4])
예제 #7
0
import os
import numpy as np
import tensorflow as tf
from runway import RunwayModel
from module import encoder, decoder
from glob import glob

st = RunwayModel()


@st.setup(options={'styleCheckpoint': 'checkpoint'})
def setup(opts):
    sess = tf.Session()
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    with tf.name_scope('placeholder'):
        input_photo = tf.placeholder(dtype=tf.float32,
                                     shape=[1, None, None, 3],
                                     name='photo')
    input_photo_features = encoder(image=input_photo,
                                   options={'gf_dim': 32},
                                   reuse=False)
    output_photo = decoder(features=input_photo_features,
                           options={'gf_dim': 32},
                           reuse=False)
    saver = tf.train.Saver()
    path = opts['styleCheckpoint']
    model_name = [
        p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))
    ][0]
    checkpoint_dir = os.path.join(path, model_name, 'checkpoint_long')
예제 #8
0
파일: server.py 프로젝트: kaichehung/glow
        return image



# load face detection and warping
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
warper = FaceWarper(predictor, desiredFaceWidth=256, desiredLeftEye=(0.371, 0.480))

# tags that can be modified
tags = "5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young"
tags = tags.split()

# launch Runway
glow = RunwayModel()
model_ready = False

@glow.setup
def setup():
	global model_ready
	print('setup model')
	model_ready = True
	return None


@glow.command('convert', inputs={'image': 'image'}, outputs={'output': 'image'})
def detect(sess, inp):
	img = np.array(inp['image'])
	z_addition = 0.95 * z_manipulate[tags.index('Attractive')]
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
예제 #9
0
import numpy as np
import torch
from PIL import Image
from options.test_options import TestOptions
from models.models import create_model
from runway import RunwayModel
import util.util as util

photosketch = RunwayModel()


@photosketch.setup
def setup():
    global opt
    opt = TestOptions().parse()
    opt.nThreads = 1
    opt.batchSize = 1
    opt.serial_batches = True
    opt.no_flip = True
    opt.name = 'pretrained'
    opt.checkpoints_dir = '.'
    opt.model = 'pix2pix'
    opt.which_direction = 'AtoB'
    opt.norm = 'batch'
    opt.input_nc = 3
    opt.output_nc = 1
    opt.which_model_netG = 'resnet_9blocks'
    opt.no_dropout = True
    model = create_model(opt)
    return model