コード例 #1
0
            lambda x: tf.assign(
                x,
                tf.contrib.framework.load_variable(config.load_model_dir, x.
                                                   name)), vars_list))
    sess.run(assign_ops)
    return output, input_image_tf, input_mask_tf


command_inputs = {
    'image':
    runway.image,
    'mask':
    runway.segmentation(label_to_id={
        'background': 0,
        'mask': 1
    },
                        label_to_color={
                            'background': [0, 0, 0],
                            'mask': [255, 255, 255]
                        })
}


@runway.command('inpaint',
                inputs=command_inputs,
                outputs={'output': runway.image})
def inpaint(model, inputs):
    output, input_image_tf, input_mask_tf = model
    image = inputs['image']
    original_size = image.size
    image = np.array(image.resize((256, 256)), dtype=np.float32)
    mask = np.array(inputs['mask'].resize((256, 256)), dtype=np.float32)
コード例 #2
0
    model.eval()
    model.to(device)
    print("Model:", CONFIG.MODEL.NAME)

    return Dict({
        'model': model,
        'device': device,
        'config': CONFIG,
        'postprocessor': postprocessor
    })


@runway.command(
    'mask_all',
    inputs={'image': runway.image},
    outputs={'image': runway.segmentation(label_to_id=label_to_id)})
def mask_all(model, inputs):
    labelmap = run_model(model, inputs).astype(np.uint8)
    return {'image': labelmap}


@runway.command('mask_one',
                inputs={
                    'image': runway.image,
                    'class': runway.category(choices=classes_list)
                },
                outputs={'image': runway.image})
def mask_one(model, inputs):
    labelmap = run_model(model, inputs)
    labelmap = 255.0 * np.array(
        labelmap == classes_list.index(inputs['class']))
コード例 #3
0
    "ear_r": [215, 175, 125],
    "eye_g": [220, 180, 210],
    "neck_l": [125, 125, 255]
}

to_tensor = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])


@runway.command('parse',
                inputs={'image': runway.image},
                outputs={
                    'parsed_face':
                    runway.segmentation(label_to_id=label_to_id,
                                        label_to_color=label_to_color)
                })
def parse(model, inputs):
    image = inputs['image'].resize((512, 512), Image.BILINEAR)
    img = to_tensor(image)
    img = torch.unsqueeze(img, 0)
    img = img.cuda()
    out = model(img)[0]
    parsing = out.squeeze(0).cpu().detach().numpy().argmax(0)
    return parsing.astype(np.uint8)


if __name__ == '__main__':
    runway.run(model_options={'checkpoint': './79999_iter.pth'})
コード例 #4
0
ファイル: runway_model.py プロジェクト: yanngraf/SPADE-COCO
label_to_color = {
    'unlabeled': (0, 0, 0),
    'grass': (29, 195, 49),
    'sky': (95, 219, 255),
    'clouds': (170, 170, 170),
    'sea': (54, 62, 167),
    'river': (0, 57, 150),
    'tree': (140, 104, 47),
    'mountain': (60, 55, 50)
}

command_inputs = {
    'semantic_map':
    runway.segmentation(label_to_id=label_to_id,
                        label_to_color=label_to_color,
                        default_label='unlabeled',
                        width=640,
                        height=360)
}

command_outputs = {'output': runway.image}


@runway.command('convert', inputs=command_inputs, outputs=command_outputs)
def convert(model, inputs):
    img = inputs['semantic_map']
    original_size = img.size
    img = img.resize((opt.load_size, opt.load_size))
    params = get_params(opt, img.size)
    transform_label = get_transform(opt,
                                    params,
コード例 #5
0
    model.eval()
    return model

label_to_color = {
    'unlabeled': (0, 0, 0),
    'grass': (29, 195, 49),
    'sky-other': (95, 219, 255),
    'clouds': (170, 170, 170),
    'sea': (54, 62, 167),
    'river': (0, 57, 150),
    'tree': (140, 104, 47),
    'mountain': (60, 55, 50)
}

command_inputs = {
    'semantic_map': runway.segmentation(label_to_id=label_to_id, label_to_color=label_to_color, default_label='sky-other', width=640, height=360)
}

command_outputs = {
    'output': runway.image
}

@runway.command('convert', inputs=command_inputs, outputs=command_outputs)
def convert(model, inputs):
    img = inputs['semantic_map']
    original_size = img.size
    img = img.resize((opt.load_size, opt.load_size))
    params = get_params(opt, img.size)
    transform_label = get_transform(opt, params, method=Image.NEAREST, normalize=False)
    label_tensor = transform_label(img) * 255.0
    label_tensor[label_tensor == 255.0] = 0
コード例 #6
0
ファイル: runway_model.py プロジェクト: yining1023/DeepLabV3
]

label_to_id = {label: i for i, label in enumerate(labels)}


@runway.setup(options={'checkpoint_dir': runway.file(is_directory=True)})
def setup(opts):
    return DeepLabModel(opts['checkpoint_dir'])


@runway.command('segment',
                inputs={'image': runway.image},
                outputs={
                    'segmentation':
                    runway.segmentation(
                        label_to_id=label_to_id,
                        label_to_color={'background': [0, 0, 0]})
                })
def segment(model, inputs):
    _, seg_map = model.run(inputs['image'])
    return seg_map.astype(np.uint8)


@runway.command('mask_person',
                inputs={'image': runway.image},
                outputs={'masked_image': runway.image})
def mask(model, inputs):
    _, seg_map = model.run(inputs['image'])
    mask = np.stack((seg_map, ) * 4, axis=-1)
    masked = np.array(inputs['image'].resize(seg_map.shape[::-1]))
    masked = np.dstack((masked, np.full(masked.shape[:-1], 255)))
コード例 #7
0
    "mouth": [255, 150, 0],
    "neck": [255, 225, 120],
    "r_ear": [255, 125, 125],
    "l_ear": [200, 100, 100],
    "cloth": [0, 255, 0],
    "hat": [0, 150, 80],
    "ear_r": [215, 175, 125],
    "eye_g": [220, 180, 210],
    "neck_l": [125, 125, 255]
}

command_inputs = {
    'semantic_map':
    runway.segmentation(label_to_id=label_to_id,
                        label_to_color=label_to_color,
                        default_label='background',
                        width=256,
                        height=256),
    'style_image':
    runway.image
}

command_outputs = {'output': runway.image}


@runway.command("generate_face",
                inputs=command_inputs,
                outputs=command_outputs,
                description="Generates a face using SPADE")
def generate_face(sess_out, inputs):
    original_size = inputs['semantic_map'].size