Exemplo n.º 1
0
def network_test(args):
    # set device
    device = torch.device('cuda' if args.gpu_no >= 0 else 'cpu')
    
    # load check point
    check_point = torch.load(args.check_point)

    # load network
    network = AvatarNet(args.layers)
    network.load_state_dict(check_point['state_dict'])
    network = network.to(device)

    # load target images
    content_img = imload(args.content, args.imsize, args.cropsize).to(device)
    style_imgs = [imload(style, args.imsize, args.cropsize, args.cencrop).to(device) for style in args.style]
    masks = None
    if args.mask:
        masks = [maskload(mask).to(device) for mask in args.mask]

    # stylize image
    with torch.no_grad():
        stylized_img =  network(content_img, style_imgs, args.style_strength, args.patch_size, args.patch_stride,
                masks, args.interpolation_weights, args.preserve_color, False)

    imsave(stylized_img, 'stylized_image.jpg')

    return None
Exemplo n.º 2
0
def evaluate_network(args):
    device = torch.device('cuda' if args.gpu_no >= 0 else 'cpu')

    if args.load_path is None:
        raise RuntimeError("Need a model to load !!")
    check_point = torch.load(args.load_path)

    transfer_network = Style_Transfer_Network().to(device)
    transfer_network.load_state_dict(check_point['state_dict'])

    content_img = imload(args.content, args.imsize, args.cropsize,
                         args.cencrop).to(device)
    style_imgs = [
        imload(_style, args.imsize, args.cropsize, args.cencrop).to(device)
        for _style in args.style
    ]

    masks = None
    if args.mask:
        masks = [maskload(mask).to(device) for mask in args.mask]

    with torch.no_grad():
        stylized_img = transfer_network(content_img, style_imgs,
                                        args.style_strength,
                                        args.interpolation_weights, masks,
                                        args.preserve_color)

    imsave(stylized_img, 'stylized_image.jpg')
    return stylized_img
Exemplo n.º 3
0
def evaluate_network(args):
    device = torch.device('cuda' if args.gpu_no >= 0 else 'cpu')
    check_point = torch.load(args.check_point)

    network = Generator(args.unet_flag).to(device).eval()
    network.load_state_dict(check_point['g_state_dict'])

    image = imload(args.image, args.imsize, args.cropsize,
                   args.cencrop).to(device)

    output = network(image)

    imsave(output, 'output.jpg')
Exemplo n.º 4
0
import numpy as np

from hw import maxpool2d, conv2d_full
from utils import imload, imsave_grayscale

kodim = imload('kodim20.png')

k1 = np.array([[-1., 1., 3.], [1., 5., -1.], [-3., -6., 1.]])
k2 = np.array([[-1., 4., 3.], [-17., 17., -10.], [-3., -4., 1.]])
k3 = np.array([[7., 19., 0.], [-7., 0., -19.], [0., 3., -3.]])

stack1 = np.stack([k1, k1, k3], axis=2)
stack2 = np.stack([k3, k1, k3], axis=2)
stack3 = np.stack([k3, k2, k2], axis=2)

test = conv2d_full(kodim, [stack1, stack2, stack3], stride=3)
test = maxpool2d(test, size=7, stride=5)

imsave_grayscale(test, 'online_checkoff.png')
Exemplo n.º 5
0
if __name__ == "__main__":
    # arguments
    parser = build_parser()
    args = parser.parse_args()

    # ImageNet class index to label
    ## ref: https://discuss.pytorch.org/t/imagenet-classes/4923/2
    idx_to_label = json.load(open('imagenet_class_index.json'))
    idx_to_label = {int(key): value[1] for key, value in idx_to_label.items()}

    # set device
    device = torch.device('cuda:%d' %
                          args.gpu_no if args.gpu_no >= 0 else 'cpu')
    network = CAM(args.network).to(device)
    network.eval()
    image = imload(args.image, args.imsize, args.cropsize).to(device)

    # make class activation map
    with torch.no_grad():
        prob, cls, cam = network(image, topk=args.topk)

        # tensor to pil image
        img_pil = imshow(image)
        img_pil.save(args.save_path + "input.jpg")

        for k in range(args.topk):
            print("Predict '%s' with %2.4f probability" %
                  (idx_to_label[cls[k]], prob[k]))
            cam_ = cam[k].squeeze().cpu().data.numpy()
            cam_pil = array_to_cam(cam_)
            cam_pil.save(args.save_path + "cam_class__%s_prob__%2.4f.jpg" %
Exemplo n.º 6
0
    args = get_arguments()

    device = torch.device('cpu' if -1 in args.gpu_no else 'cuda')

    network, _, _, _ = load_network(args, device)
    predictor = Prediction(network=network,
                           topk=args.topk,
                           scale_factor=args.scale_factor,
                           conf_th=args.conf_th,
                           nms=args.nms,
                           nms_th=args.nms_th,
                           normalized_coord=args.normalized_coord).to(device)
    predictor.eval()

    # single image prediction
    img_ten, img_pil, origin_size = imload(args.data, args.pretrained,
                                           args.imsize)
    box_ten, cls_ten, score_ten = predictor(img_ten.to(device))
    box_lst, cls_lst, score_lst = box_ten[0].tolist(), cls_ten[0].tolist(
    ), score_ten[0].tolist()

    # clamp outside image
    box_lst = [
        list(map(lambda x: max(0, min(x, args.imsize)), box))
        for box in box_lst
    ]

    # draw box, class and score per prediction
    for i, (box, cls, score) in enumerate(zip(box_lst, cls_lst, score_lst)):
        img_pil = draw_box(img_pil, box, color=CLASS2COLOR[cls])
        if args.fontsize > 0:
            text = '%s: %1.2f' % (INDEX2CLASS[cls], score)
Exemplo n.º 7
0
                
    return input_image


if __name__ == '__main__':
    # get arguments
    parser = build_parser()
    args = parser.parse_args()
    
    # gpu device set
    if args.cuda_device_no >= 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device_no)
    device = torch.device('cuda' if args.cuda_device_no >= 0 else 'cpu')
    
    # load target images
    content_image = imload(args.content, args.imsize, args.cropsize)
    content_image = content_image.to(device)

    style_image = imload(args.style, args.imsize, args.cropsize)
    style_image = style_image.to(device)

    # load pre-trianed vgg
    vgg = get_vgg_feature_network(args.vgg_flag)
    vgg = vgg.to(device)

    # stylize image
    output_image = stylize_image(vgg=vgg, device=device, 
            content_image=content_image, style_image=style_image,
            content_weight=args.content_weight, style_weight=args.style_weight, tv_weight=args.tv_weight,
            content_layers=args.content_layers, style_layers=args.style_layers, 
            learning_rate=args.lr, iterations=args.iterations, noise_content_ratio=args.noise_content_ratio)