def stylize(args):

    if args.content_type == "pi":
        #camera.start_preview()
        #sleep(5)
        #camera.capture('/home/pi/Desktop/image.jpg')
        #camera.stop_preview()
        content_image = '/home/pi/Desktop/image.jpg'
    else:
        content_image = utils.load_image(args.content_image,
                                         scale=args.content_scale)

    tstart = time.time()

    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0)

    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(args.model)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)

        output = style_model(content_image)
    utils.save_image(args.output_image, output[0])

    tstop = time.time()
    print("Inference time : " + str(1000 * (tstop - tstart)) + " ms")
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0])
Exemplo n.º 3
0
def stylize(**kwargs):
    '''
	generate the picture use the style of the style_picture.jpg
	'''
    for k_, v_, in kwargs.items():
        setattr(opt, k_, v_)

    content_image = tv.datasets.folder.default_loader(opt.content_path)

    content_transfrom = tv.transforms.Compose([
        tv.transforms.ToTensor(),  #change value to (0,1)
        tv.transforms.Lambda(lambda x: x * 255)
    ])  #change value to (0,255)
    content_image = content_transfrom(content_image)
    content_image = Variable(content_image.unsqueeze(0), volatile=True)

    style_mode = TransformerNet().eval()  # change to eval model
    style_mode.load_state_dict(
        t.load(opt.model_path, map_location=lambda _s, _: _s))

    if opt.use_gpu == True:
        content_image = content_image.cuda()
        style_mode.cuda()

    output = style_mode(content_image)
    output_data = output.cpu().data[0]
    tv.utils.save_image((output_data / 255).clamp(min=0, max=1),
                        opt.result_path)
Exemplo n.º 4
0
def stylize(**kwargs):
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    # 图片处理
    content_image = tv.datasets.folder.default_loader(opt.content_path)
    content_transform = tv.transforms.Compose(
        [tv.transforms.ToTensor(),
         tv.transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0)
    content_image = Variable(content_image, volatile=True)

    # 模型
    style_model = TransformerNet().eval()
    style_model.load_state_dict(
        t.load(opt.model_path, map_location=lambda _s, _: _s))

    if opt.use_gpu:
        content_image = content_image.cuda()
        style_model.cuda()

    # 风格迁移与保存
    output = style_model(content_image)
    output_data = output.cpu().data[0]
    tv.utils.save_image(((output_data / 255)).clamp(min=0, max=1),
                        opt.result_path)
def evaluate(args):
    # device = torch.device('cuda' if args.cuda and torch.cuda.is_available() else 'cpu')

    model = TransformerNet()
    state_dict = torch.load(args.model)

    if args.gpus is not None:
        model = nn.DataParallel(model, device_ids=args.gpus)
    else:
        model = nn.DataParallel(model)
    model.load_state_dict(state_dict)
    if args.cuda:
        model.cuda()

    with torch.no_grad():
        for root, dirs, filenames in os.walk(args.input_dir):
            for filename in filenames:
                if utils.is_image_file(filename):
                    impath = osp.join(root, filename)
                    img = utils.load_image(impath)
                    img = img.unsqueeze(0)
                    if args.cuda:
                        img.cuda()
                    rec_img = model(img)
                    if args.cuda:
                        rec_img = rec_img.cpu()
                        img = img.cpu()
                    save_path = osp.join(args.output_dir, filename)
                    # utils.save_image(rec_img[0], save_path)
                    utils.save_image_preserv_length(rec_img[0], img[0],
                                                    save_path)
Exemplo n.º 6
0
    def convert(self, orig_image):
        #pil_img = Image.fromarray(orig_image)
        pil_img = orig_image

        content_transform = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Lambda(lambda x: x.mul(255))])
        content_image = content_transform(pil_img)
        content_image = content_image.unsqueeze(0).to(self.device)

        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(self.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(self.device)

            rospy.loginfo('stylizing image ...')
            output = style_model(content_image).cpu()
            img = output[0].clone().clamp(0, 255).numpy()
            img = img.transpose(1, 2, 0).astype("uint8")

            img = cv2.addWeighted(
                orig_image, self.alpha, img[0:orig_image.shape[0],
                                            0:orig_image.shape[1]],
                1 - self.alpha, 0.0)

            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            return img
Exemplo n.º 7
0
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image,
                                     scale=args.content_scale)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)
    style_id = torch.LongTensor([args.style_id]).to(device)

    with torch.no_grad():
        import time
        start = time.time()
        style_model = TransformerNet(style_num=args.style_num)
        state_dict = torch.load(args.model)
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model([content_image, style_id]).cpu()
        end = time.time()
        print('Time={}'.format(end - start))
    if args.export_onnx:
        assert args.export_onnx.endswith(
            ".onnx"), "Export model file should end with .onnx"
        output = torch.onnx._export(style_model, [content_image_t, style_t],
                                    args.export_onnx,
                                    input_names=['input_image', 'style_index'],
                                    output_names=['output_image']).cpu()

    utils.save_image(
        'output/' + args.output_image + '_style' + str(args.style_id) + '.jpg',
        output[0])
Exemplo n.º 8
0
def stylize(**kwargs):
    """
    perform style transfer
    """
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)
    device = t.device('cuda') if opt.use_gpu else t.device('cpu')

    # input image preprocess
    content_image = tv.datasets.folder.default_loader(opt.content_path)
    content_transform = tv.transforms.Compose(
        [tv.transforms.ToTensor(),
         tv.transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device).detach()

    # model setup
    style_model = TransformerNet().eval()
    style_model.load_state_dict(
        t.load(opt.model_path, map_location=lambda _s, _: _s))
    style_model.to(device)

    # style transfer and save output
    output = style_model(content_image)
    output_data = output.cpu().data[0]
    tv.utils.save_image(((output_data / 255)).clamp(min=0, max=1),
                        opt.result_path)
Exemplo n.º 9
0
    def __init__(self, image_queue, output_queue, engine_id, log_flag = True):
        super(StyleVideoApp, self).__init__(image_queue, output_queue, engine_id)
        self.log_flag = log_flag
        self.is_first_image = True
        self.dir_path = os.getcwd()
        self.model = self.dir_path+'/../models/the_scream.model'
        self.path = self.dir_path+'/../models/'
        print('MODEL PATH {}'.format(self.path))  
    
        # initialize model
        self.style_model = TransformerNet()
        self.style_model.load_state_dict(torch.load(self.model))
        self.style_model.cuda()
        self.style_type = "the-scream"
        self.content_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Lambda(lambda x: x.mul(255))
        ])
        wtr_mrk4 = cv2.imread('../wtrMrk.png',-1) # The waterMark is of dimension 30x120
        self.mrk,_,_,mrk_alpha = cv2.split(wtr_mrk4) # The RGB channels are equivalent
        self.alpha = mrk_alpha.astype(float)/255
        #self.mrk = cv2.merge((mrk_ch,mrk_ch,mrk_ch))
        #self.alpha = cv2.merge((mrk_alpha,mrk_alpha,mrk_alpha))
 
        print('FINISHED INITIALISATION')
Exemplo n.º 10
0
def stylize_one(style_model_path, target_image):
    content_image = utils.load_image(target_image, scale=4)
    print('content_image', content_image)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0)
    with torch.no_grad():

        style_model = TransformerNet()
        state_dict = torch.load(style_model_path)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        # for k in list(state_dict.keys()):
        #     if re.search(r'in\d+\.running_(mean|var)$', k):
        #         del state_dict[k]

        style_model.load_state_dict(state_dict)
        output = style_model(content_image)
        data = output[0]
        torchvision.utils.save_image(data, './1.png', normalize=True)
        img = data.clone().clamp(0, 255).numpy()
        img = img.transpose(1, 2, 0).astype("uint8")
        img = Image.fromarray(img)
    return img
Exemplo n.º 11
0
def stylize(args):
    device = torch.device("cuda" if args.is_cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    # print(content_image.size)
    # ss('stop')
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)
    print(content_image.shape)
    # ss('stop')

    with torch.no_grad():
        print(1)
        style_model = TransformerNet()
        print(2)
        state_dict = torch.load(args.model)
        print(3)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        # for k in list(state_dict.keys()):
        #     if re.search(r'in\d+\.running_(mean|var)$', k):
        #         del state_dict[k]
        print(4)
        style_model.load_state_dict(state_dict)
        print(5)
        style_model.to(device)
        print(6)

        output = style_model(content_image).cpu()
        print(output.shape)
        # ss('s')
    utils.save_image(args.output_image, output[0])
Exemplo n.º 12
0
def run_feedforward_texture_transfer(args):

    print('running feedforward neural style transfer...')

    content_image = load_image(args.content_image,
                               mask=False,
                               size=args.image_size,
                               scale=None,
                               square=False)
    content_image = preprocess(content_image)

    input_image = content_image

    in_channels = 3

    stylizing_net = TransformerNet(in_channels)
    state_dict = torch.load(args.style_model)

    for k in list(state_dict.keys()):
        if re.search(r'in\d+\/running_(mean|var)$', k):
            del state_dict[k]
    del k
    stylizing_net.load_state_dict(state_dict)
    stylizing_net = stylizing_net.to(device)

    output = stylizing_net(input_image)

    if args.original_colors == 1:
        output = original_colors(content_image.cpu(), output)
    save_image(filename=args.output_image, data=output.detach())
Exemplo n.º 13
0
def stylize_one(style_model_path, target_image):
    content_image = utils.load_image(target_image)
    # print('content_image', content_image)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)
    with torch.no_grad():

        style_model = TransformerNet()
        state_dict = torch.load(style_model_path)

        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]

        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image)
        data = output[0].clamp(0, 255)
        # torchvision.utils.save_image(data, './1.png', normalize=True)
        img = data.cpu().clone().clamp(0, 255).numpy()
        img = img.transpose(1, 2, 0).astype("uint8")
        img = Image.fromarray(img)
    return img, data
Exemplo n.º 14
0
def st_fns():
    tstart = time.time()

    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(load_img())
    content_image = content_image.unsqueeze(0)

    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load("../saved_models/" + combo2.value + ".pth")
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)

        output = style_model(content_image)
    shot_time = time.strftime("-%Y%m%d-%H%M%S")
    utilsIm.save_image(input_box1.value + "/image_st_" + shot_time + ".jpg",
                       output[0])

    image_st = cv2.imread(input_box1.value + "/image_st_" + shot_time + ".jpg")
    window_name = "image_st_" + shot_time + ".jpg"
    cv2.imshow(window_name, image_st)

    tstop = time.time()
    print("Inference time : " + str(1000 * (tstop - tstart)) + " ms")
Exemplo n.º 15
0
def stylize(args):
    device = torch.device("cpu")

    content_image = utils.load_image(args['content_image'], scale=None)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args['model'].endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args['model'])
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            output = style_model(content_image).cpu()
    utils.save_image(args["output_image"], output[0])
Exemplo n.º 16
0
def stylize(args):
    #content_image = utils.tensor_load_rgbimage(args.content_image, scale = args.content_scale)
    #content_image = content_image.unsqueeze(0)
    content_image = None
    if args.srcnn:
        content_image = utils.tensor_load_rgbimage(args.content_image,
                                                   scale=args.upsample)
    else:
        content_image = utils.tensor_load_rgbimage(args.content_image)
    content_image.unsqueeze_(0)
    if args.cuda:
        content_image = content_image.cuda()
    content_image = Variable(utils.preprocess_batch(content_image),
                             volatile=True)

    style_model = None
    if args.srcnn:
        style_model = SRCNN()
    else:
        style_model = TransformerNet(args.arch)
    ##style_model = TransformerNet()
    style_model.load_state_dict(torch.load(args.model))

    if args.cuda:
        style_model.cuda()

    output = style_model(content_image)
    utils.tensor_save_bgrimage(output.data[0], args.output_image, args.cuda)
Exemplo n.º 17
0
def vectorize(args):
    size = args.size
    # vectors = np.zeros((size, size, 2), dtype=np.float32)
    # for y in range(size):
    #     for x in range(size):
    #         xx = float(x - size / 2)
    #         yy = float(y - size / 2)
    #         rsq = xx ** 2 + yy ** 2
    #         if (rsq == 0):
    #             vectors[y, x, 0] = 1
    #             vectors[y, x, 1] = 1
    #         else:
    #             vectors[y, x, 0] = -yy / rsq
    #             vectors[y, x, 1] = xx / rsq
    # vectors = NormalizVectrs(vectors)

    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = Image.open(args.content_image).convert('L')
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0)
    content_image = utils.subtract_imagenet_mean_batch(content_image)
    content_image = content_image.to(device)

    with torch.no_grad():
        vectorize_model = TransformerNet()
        state_dict = torch.load(args.saved_model)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                pdb.set_trace()
                del state_dict[k]
        vectorize_model.load_state_dict(state_dict)
        vectorize_model.to(device)
        output = vectorize_model(content_image)

    target = dataset.hdf5_loader(args.target_vector)
    target_transform = transforms.ToTensor()
    target = target_transform(target)
    target = target.unsqueeze(0).to(device)

    cosine_loss = torch.nn.CosineEmbeddingLoss()
    label = torch.ones(1, 1, args.size, args.size).to(device)
    loss = cosine_loss(output, target, label)
    print(loss.item())

    output = output.cpu().clone().numpy()[0].transpose(1, 2, 0)
    output = NormalizVectrs(output)
    lic(output, "output.jpg")

    target = target.cpu().clone().numpy()[0].transpose(1, 2, 0)
    lic(target, "target.jpg")
Exemplo n.º 18
0
def stylize(args):
    if args.model.endswith(".onnx"):
        return stylize_onnx_caffe2(args)

    content_image = utils.tensor_load_rgbimage(args.content_image,
                                               scale=args.content_scale)
    content_image = content_image.unsqueeze(0)

    if args.cuda:
        content_image = content_image.cuda()
    content_image = Variable(utils.preprocess_batch(content_image),
                             requires_grad=False)
    style_model = TransformerNet()
    state_dict = torch.load(args.model)

    #    removed_modules = ['in2']
    in_names = [
        "in1.scale", "in1.shift", "in2.scale", "in2.shift", "in3.scale",
        "in3.shift", "res1.in1.scale", "res1.in1.shift", "res1.in2.scale",
        "res1.in2.shift", "res2.in1.scale", "res2.in1.shift", "res2.in2.scale",
        "res2.in2.shift", "res3.in1.scale", "res3.in1.shift", "res3.in2.scale",
        "res3.in2.shift", "res4.in1.scale", "res4.in1.shift", "res4.in2.scale",
        "res4.in2.shift", "res5.in1.scale", "res5.in1.shift", "res5.in2.scale",
        "res5.in2.shift", "in4.scale", "in4.shift", "in5.scale", "in5.shift"
    ]

    #   kl = list(state_dict.keys())
    #   for k in kl:

    for k in in_names:
        state_dict[k.replace("scale",
                             "weight").replace("shift",
                                               "bias")] = state_dict.pop(k)

    style_model.load_state_dict(state_dict)

    if args.cuda:
        style_model.cuda()

    if args.half:
        style_model.half()
        content_image = content_image.half()

    if args.export_onnx:
        assert args.export_onnx.endswith(
            ".onnx"), "Export model file should end with .onnx"
        output = torch.onnx._export(style_model, content_image,
                                    args.export_onnx)
    else:
        output = style_model(content_image)

    if args.half:
        output = output.float()

    utils.tensor_save_bgrimage(output.data[0], args.output_image, args.cuda)
Exemplo n.º 19
0
def stylize(args):
    content_image = load_image_eval(args.content_image)
    with flow.no_grad():
        style_model = TransformerNet()
        state_dict = flow.load(args.model)
        style_model.load_state_dict(state_dict)
        style_model.to("cuda")
        output = style_model(
            flow.Tensor(content_image).clamp(0, 255).to("cuda"))
    print(args.output_image)
    cv2.imwrite(args.output_image, recover_image(output.numpy()))
def stylize(**kwargs):
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    style_model = TransformerNet().cuda()
    style_model.load_state_dict(t.load(opt.model_path, ))

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])
    video = cv2.VideoCapture(opt.video_path)
    frames = list()
    # 从文件读取视频内容
    # 视频每秒传输帧数
    fps = video.get(cv2.CAP_PROP_FPS)
    # 视频图像的宽度
    frame_width = int(640)
    # 视频图像的长度
    frame_height = int(360)
    # 视频帧数
    frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))

    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    out = cv2.VideoWriter('./ablation_4_16_.mp4', fourcc, fps,
                          (frame_width, frame_height))
    n = 0
    while video.isOpened():
        ret, frame = video.read()
        if ret == False:
            break
        n += 1
        frame = cv2.resize(frame, (640, 360))
        # print(ret,frame.shape)
        cv2.imwrite('./ablation/ori/temp%d.jpg' % (n), frame)
        content_image = tv.datasets.folder.default_loader(
            './ablation/ori/temp%d.jpg' % (n))
        content_image = transform(content_image)
        content_image = content_image.unsqueeze(0).cuda()

        output = style_model(content_image)
        # output = utils.normalize_batch(output)
        tv.utils.save_image((output.data.cpu()[0] * 0.225 + 0.45).clamp(min=0,
                                                                        max=1),
                            './ablation/4/temp%d.jpg' % (n))
        image = cv2.imread('./ablation/4/temp%d.jpg' % (n))
        out.write(image)
        sys.stdout.write('\r>> Converting image %d/%d' % (n, frame_count))
        sys.stdout.flush()

    video.release()
    cv2.destroyAllWindows()
Exemplo n.º 21
0
def get_model(model_name):
    """Returns model
    Args:
        model_name: name of the model to load
    Returns:
        pytorch model
    """
    style_model = TransformerNet()
    state_dict = torch.load(MODEL_PATH[model_name])
    style_model.load_state_dict(state_dict)
    return style_model.eval().to(device)
Exemplo n.º 22
0
def multi_style(path, width=320, device=device):
    model_iter = itertools.cycle(os.listdir(path))
    model_file = next(model_iter)
    print(f'Using {model_file} ')
    model_path = os.path.join(path, model_file)
    model = TransformerNet()
    model.load_state_dict(read_state_dict(model_path))
    model.to(device)

    monitor_width = get_monitors()[0].width
    monitor_height = get_monitors()[0].height

    vs = VideoStream(src=0).start()
    time.sleep(2.0)
    timer = Timer()
    last_update = int(time.time())
    while (True):
        frame = vs.read()
        if frame is None:
            frame = np.random.randint(0,
                                      255, (int(width / 1.5), width, 3),
                                      dtype=np.uint8)
        frame = cv2.flip(frame, 1)
        frame = resize(frame, width=width)

        # Style the frame
        img = style_frame(frame, model, device).numpy()
        img = np.clip(img, 0, 255)
        img = img.astype(np.uint8)

        img = img.transpose(1, 2, 0)
        img = cv2.resize(img[:, :, ::-1], (monitor_width, monitor_height))

        # print(img.shape)
        cv2.imshow("Output", img)
        timer()

        #Determine if n key has been selected or if the time since last rotation
        #is greater than defined rotation constraint
        key = cv2.waitKey(1) & 0xFF
        time_since_last_update = int(time.time()) - last_update

        rotate_by_key = key == ord("n")
        rotate_by_time = (time_since_last_update >= rotate_time)

        if rotate_by_time or rotate_by_key:
            model_file = next(model_iter)
            print(f'Using {model_file} ')
            model_path = os.path.join(path, model_file)
            model.load_state_dict(read_state_dict(model_path))
            model.to(device)
            last_update = int(time.time())
        elif key == ord("q"):
            break
Exemplo n.º 23
0
def load_model(model_path):
    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(model_path)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        style_model.eval()
        return style_model
Exemplo n.º 24
0
class SetupModel(object):
    model = TransformerNet()

    def __init__(self, f):
        self.f = f

        self.model.load_state_dict(getWeights())
        self.model.to(device)
        self.model.eval()

    def __call__(self, *args, **kwargs):
        return self.f(*args, **kwargs)
 def __init__(self, csv_file, transform=None, modelname = "mosaic"):
     self.frame = pd.read_csv(csv_file, header=None)
     self.transform = transform
     self.modelname = modelname
     
     self.style_model = TransformerNet()
     modelpath = "saved_models/"+self.modelname+".pth"
     state_dict = torch.load(modelpath)
     for k in list(state_dict.keys()):
         if re.search(r'in\d+\.running_(mean|var)$', k):
             del state_dict[k]
     self.style_model.load_state_dict(state_dict)
     self.style_model.to(device)        
Exemplo n.º 26
0
def init():
    global model
    #model_path = os.path.join('picasso.pth')
    model_path = Model.get_model_path('picasso.pth')

    model = TransformerNet()
    state_dict = torch.load(model_path)
    # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
    for k in list(state_dict.keys()):
        if re.search(r'in\d+\.running_(mean|var)$', k):
            del state_dict[k]
    model.load_state_dict(state_dict)
    model.eval()
Exemplo n.º 27
0
def get_output(trained_model, content_image):
    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(trained_model)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image).cpu()
        # utils.save_image(args.output_image, output[0])
    return output
Exemplo n.º 28
0
def stylize():

    if combo1.value == "Picamera":
        from picamera import PiCamera
        camera = PiCamera()
        camera.resolution = (640, 480)
        camera.start_preview()
        sleep(5)
        img_time = time.strftime("image-%Y%m%d-%H%M%S")
        img = '/home/pi/Desktop/' + img_time + '.jpg'
        camera.capture(img)
        camera.stop_preview()
        content_image = utilsIm.load_image(img, scale=args.content_scale)
    elif combo1.value == "USB Camera":
        from cv2 import VideoCapture
        cam = VideoCapture(0)
        s, img = cam.read()
        shot_time = time.strftime("image-%Y%m%d-%H%M%S")
        shot = '/home/pi/Desktop/' + shot_time + '.jpg'
        imwrite(shot, img)


#         content_image = utilsIm.load_image(shot, scale=args.content_scale)
    else:
        content_image = utilsIm.load_image(input_box2.value,
                                           scale=slider.value)
        #content_image = utilsIm.load_image("C:/Users/K/Pictures/uda/led.jpg", scale=slider.value)

    tstart = time.time()

    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0)

    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load("../saved_models/" + combo2.value + ".pth")
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)

        output = style_model(content_image)
    utilsIm.save_image(input_box1.value + "/image_ui.jpg", output[0])

    tstop = time.time()
    print("Inference time : " + str(1000 * (tstop - tstart)) + " ms")
Exemplo n.º 29
0
def multi_style(path,
                width=320,
                device=device,
                cycle_length=np.inf,
                half_precision=False,
                rotate=0):
    model_iter = itertools.cycle(os.listdir(path))
    model_file = next(model_iter)
    print(f'Using {model_file} ')
    model_path = os.path.join(path, model_file)
    model = TransformerNet()
    model.load_state_dict(read_state_dict(model_path))
    model.to(device)
    if half_precision:
        model.half()
    vs = VideoStream(src=0).start()
    time.sleep(2.0)
    timer = Timer()
    cycle_begin = time.time()
    while (True):
        frame = vs.read()
        if frame is None:
            frame = np.random.randint(0,
                                      255, (int(width / 1.5), width, 3),
                                      dtype=np.uint8)
        frame = cv2.flip(frame, 1)
        frame = resize(frame, width=width)

        # Style the frame
        img = style_frame(frame, model, device, half_precision)
        img = cv2.resize(img[:, :, ::-1], (640, 480))

        # rotate
        if rotate > 0:
            img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        elif rotate < 0:
            img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
        # print(img.shape)
        cv2.imshow("Output", img)
        timer()
        key = cv2.waitKey(1) & 0xFF
        if key == ord("n") or (time.time() - cycle_begin) > cycle_length:
            model_file = next(model_iter)
            print(f'Using {model_file} ')
            model_path = os.path.join(path, model_file)
            model.load_state_dict(read_state_dict(model_path))
            model.to(device)
            cycle_begin = time.time()
        elif key == ord("q"):
            break
Exemplo n.º 30
0
def load_model(model_path):
    print('cargando modelo')
    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(model_path)
        # eliminamos las claves guardadas 'running_*' que estan decrapeadas en
        # InstanceNorm del checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        style_model.eval()
        return style_model